1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "async_objs.h" 5 #include "bkey_buf.h" 6 #include "bkey_methods.h" 7 #include "bkey_sort.h" 8 #include "btree_cache.h" 9 #include "btree_io.h" 10 #include "btree_iter.h" 11 #include "btree_locking.h" 12 #include "btree_update.h" 13 #include "btree_update_interior.h" 14 #include "buckets.h" 15 #include "checksum.h" 16 #include "debug.h" 17 #include "enumerated_ref.h" 18 #include "error.h" 19 #include "extents.h" 20 #include "io_write.h" 21 #include "journal_reclaim.h" 22 #include "journal_seq_blacklist.h" 23 #include "recovery.h" 24 #include "super-io.h" 25 #include "trace.h" 26 27 #include <linux/sched/mm.h> 28 29 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn) 30 { 31 bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn)); 32 prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn)); 33 prt_str(out, "min: "); 34 bch2_bpos_to_text(out, bn->min_key); 35 prt_newline(out); 36 prt_str(out, "max: "); 37 bch2_bpos_to_text(out, bn->max_key); 38 } 39 40 void bch2_btree_node_io_unlock(struct btree *b) 41 { 42 EBUG_ON(!btree_node_write_in_flight(b)); 43 44 clear_btree_node_write_in_flight_inner(b); 45 clear_btree_node_write_in_flight(b); 46 smp_mb__after_atomic(); 47 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); 48 } 49 50 void bch2_btree_node_io_lock(struct btree *b) 51 { 52 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, 53 TASK_UNINTERRUPTIBLE); 54 } 55 56 void __bch2_btree_node_wait_on_read(struct btree *b) 57 { 58 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, 59 TASK_UNINTERRUPTIBLE); 60 } 61 62 void __bch2_btree_node_wait_on_write(struct btree *b) 63 { 64 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, 65 TASK_UNINTERRUPTIBLE); 66 } 67 68 void bch2_btree_node_wait_on_read(struct btree *b) 69 { 70 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, 71 TASK_UNINTERRUPTIBLE); 72 } 73 74 void bch2_btree_node_wait_on_write(struct btree *b) 75 { 76 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, 77 TASK_UNINTERRUPTIBLE); 78 } 79 80 static void verify_no_dups(struct btree *b, 81 struct bkey_packed *start, 82 struct bkey_packed *end) 83 { 84 #ifdef CONFIG_BCACHEFS_DEBUG 85 struct bkey_packed *k, *p; 86 87 if (start == end) 88 return; 89 90 for (p = start, k = bkey_p_next(start); 91 k != end; 92 p = k, k = bkey_p_next(k)) { 93 struct bkey l = bkey_unpack_key(b, p); 94 struct bkey r = bkey_unpack_key(b, k); 95 96 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r))); 97 } 98 #endif 99 } 100 101 static void set_needs_whiteout(struct bset *i, int v) 102 { 103 struct bkey_packed *k; 104 105 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 106 k->needs_whiteout = v; 107 } 108 109 static void btree_bounce_free(struct bch_fs *c, size_t size, 110 bool used_mempool, void *p) 111 { 112 if (used_mempool) 113 mempool_free(p, &c->btree_bounce_pool); 114 else 115 kvfree(p); 116 } 117 118 static void *btree_bounce_alloc(struct bch_fs *c, size_t size, 119 bool *used_mempool) 120 { 121 unsigned flags = memalloc_nofs_save(); 122 void *p; 123 124 BUG_ON(size > c->opts.btree_node_size); 125 126 *used_mempool = false; 127 p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT); 128 if (!p) { 129 *used_mempool = true; 130 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); 131 } 132 memalloc_nofs_restore(flags); 133 return p; 134 } 135 136 static void sort_bkey_ptrs(const struct btree *bt, 137 struct bkey_packed **ptrs, unsigned nr) 138 { 139 unsigned n = nr, a = nr / 2, b, c, d; 140 141 if (!a) 142 return; 143 144 /* Heap sort: see lib/sort.c: */ 145 while (1) { 146 if (a) 147 a--; 148 else if (--n) 149 swap(ptrs[0], ptrs[n]); 150 else 151 break; 152 153 for (b = a; c = 2 * b + 1, (d = c + 1) < n;) 154 b = bch2_bkey_cmp_packed(bt, 155 ptrs[c], 156 ptrs[d]) >= 0 ? c : d; 157 if (d == n) 158 b = c; 159 160 while (b != a && 161 bch2_bkey_cmp_packed(bt, 162 ptrs[a], 163 ptrs[b]) >= 0) 164 b = (b - 1) / 2; 165 c = b; 166 while (b != a) { 167 b = (b - 1) / 2; 168 swap(ptrs[b], ptrs[c]); 169 } 170 } 171 } 172 173 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) 174 { 175 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k; 176 bool used_mempool = false; 177 size_t bytes = b->whiteout_u64s * sizeof(u64); 178 179 if (!b->whiteout_u64s) 180 return; 181 182 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool); 183 184 ptrs = ptrs_end = ((void *) new_whiteouts + bytes); 185 186 for (k = unwritten_whiteouts_start(b); 187 k != unwritten_whiteouts_end(b); 188 k = bkey_p_next(k)) 189 *--ptrs = k; 190 191 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); 192 193 k = new_whiteouts; 194 195 while (ptrs != ptrs_end) { 196 bkey_p_copy(k, *ptrs); 197 k = bkey_p_next(k); 198 ptrs++; 199 } 200 201 verify_no_dups(b, new_whiteouts, 202 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s)); 203 204 memcpy_u64s(unwritten_whiteouts_start(b), 205 new_whiteouts, b->whiteout_u64s); 206 207 btree_bounce_free(c, bytes, used_mempool, new_whiteouts); 208 } 209 210 static bool should_compact_bset(struct btree *b, struct bset_tree *t, 211 bool compacting, enum compact_mode mode) 212 { 213 if (!bset_dead_u64s(b, t)) 214 return false; 215 216 switch (mode) { 217 case COMPACT_LAZY: 218 return should_compact_bset_lazy(b, t) || 219 (compacting && !bset_written(b, bset(b, t))); 220 case COMPACT_ALL: 221 return true; 222 default: 223 BUG(); 224 } 225 } 226 227 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) 228 { 229 bool ret = false; 230 231 for_each_bset(b, t) { 232 struct bset *i = bset(b, t); 233 struct bkey_packed *k, *n, *out, *start, *end; 234 struct btree_node_entry *src = NULL, *dst = NULL; 235 236 if (t != b->set && !bset_written(b, i)) { 237 src = container_of(i, struct btree_node_entry, keys); 238 dst = max(write_block(b), 239 (void *) btree_bkey_last(b, t - 1)); 240 } 241 242 if (src != dst) 243 ret = true; 244 245 if (!should_compact_bset(b, t, ret, mode)) { 246 if (src != dst) { 247 memmove(dst, src, sizeof(*src) + 248 le16_to_cpu(src->keys.u64s) * 249 sizeof(u64)); 250 i = &dst->keys; 251 set_btree_bset(b, t, i); 252 } 253 continue; 254 } 255 256 start = btree_bkey_first(b, t); 257 end = btree_bkey_last(b, t); 258 259 if (src != dst) { 260 memmove(dst, src, sizeof(*src)); 261 i = &dst->keys; 262 set_btree_bset(b, t, i); 263 } 264 265 out = i->start; 266 267 for (k = start; k != end; k = n) { 268 n = bkey_p_next(k); 269 270 if (!bkey_deleted(k)) { 271 bkey_p_copy(out, k); 272 out = bkey_p_next(out); 273 } else { 274 BUG_ON(k->needs_whiteout); 275 } 276 } 277 278 i->u64s = cpu_to_le16((u64 *) out - i->_data); 279 set_btree_bset_end(b, t); 280 bch2_bset_set_no_aux_tree(b, t); 281 ret = true; 282 } 283 284 bch2_verify_btree_nr_keys(b); 285 286 bch2_btree_build_aux_trees(b); 287 288 return ret; 289 } 290 291 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, 292 enum compact_mode mode) 293 { 294 return bch2_drop_whiteouts(b, mode); 295 } 296 297 static void btree_node_sort(struct bch_fs *c, struct btree *b, 298 unsigned start_idx, 299 unsigned end_idx) 300 { 301 struct btree_node *out; 302 struct sort_iter_stack sort_iter; 303 struct bset_tree *t; 304 struct bset *start_bset = bset(b, &b->set[start_idx]); 305 bool used_mempool = false; 306 u64 start_time, seq = 0; 307 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1; 308 bool sorting_entire_node = start_idx == 0 && 309 end_idx == b->nsets; 310 311 sort_iter_stack_init(&sort_iter, b); 312 313 for (t = b->set + start_idx; 314 t < b->set + end_idx; 315 t++) { 316 u64s += le16_to_cpu(bset(b, t)->u64s); 317 sort_iter_add(&sort_iter.iter, 318 btree_bkey_first(b, t), 319 btree_bkey_last(b, t)); 320 } 321 322 bytes = sorting_entire_node 323 ? btree_buf_bytes(b) 324 : __vstruct_bytes(struct btree_node, u64s); 325 326 out = btree_bounce_alloc(c, bytes, &used_mempool); 327 328 start_time = local_clock(); 329 330 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter); 331 332 out->keys.u64s = cpu_to_le16(u64s); 333 334 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes); 335 336 if (sorting_entire_node) 337 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], 338 start_time); 339 340 /* Make sure we preserve bset journal_seq: */ 341 for (t = b->set + start_idx; t < b->set + end_idx; t++) 342 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq)); 343 start_bset->journal_seq = cpu_to_le64(seq); 344 345 if (sorting_entire_node) { 346 u64s = le16_to_cpu(out->keys.u64s); 347 348 BUG_ON(bytes != btree_buf_bytes(b)); 349 350 /* 351 * Our temporary buffer is the same size as the btree node's 352 * buffer, we can just swap buffers instead of doing a big 353 * memcpy() 354 */ 355 *out = *b->data; 356 out->keys.u64s = cpu_to_le16(u64s); 357 swap(out, b->data); 358 set_btree_bset(b, b->set, &b->data->keys); 359 } else { 360 start_bset->u64s = out->keys.u64s; 361 memcpy_u64s(start_bset->start, 362 out->keys.start, 363 le16_to_cpu(out->keys.u64s)); 364 } 365 366 for (i = start_idx + 1; i < end_idx; i++) 367 b->nr.bset_u64s[start_idx] += 368 b->nr.bset_u64s[i]; 369 370 b->nsets -= shift; 371 372 for (i = start_idx + 1; i < b->nsets; i++) { 373 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift]; 374 b->set[i] = b->set[i + shift]; 375 } 376 377 for (i = b->nsets; i < MAX_BSETS; i++) 378 b->nr.bset_u64s[i] = 0; 379 380 set_btree_bset_end(b, &b->set[start_idx]); 381 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]); 382 383 btree_bounce_free(c, bytes, used_mempool, out); 384 385 bch2_verify_btree_nr_keys(b); 386 } 387 388 void bch2_btree_sort_into(struct bch_fs *c, 389 struct btree *dst, 390 struct btree *src) 391 { 392 struct btree_nr_keys nr; 393 struct btree_node_iter src_iter; 394 u64 start_time = local_clock(); 395 396 BUG_ON(dst->nsets != 1); 397 398 bch2_bset_set_no_aux_tree(dst, dst->set); 399 400 bch2_btree_node_iter_init_from_start(&src_iter, src); 401 402 nr = bch2_sort_repack(btree_bset_first(dst), 403 src, &src_iter, 404 &dst->format, 405 true); 406 407 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], 408 start_time); 409 410 set_btree_bset_end(dst, dst->set); 411 412 dst->nr.live_u64s += nr.live_u64s; 413 dst->nr.bset_u64s[0] += nr.bset_u64s[0]; 414 dst->nr.packed_keys += nr.packed_keys; 415 dst->nr.unpacked_keys += nr.unpacked_keys; 416 417 bch2_verify_btree_nr_keys(dst); 418 } 419 420 /* 421 * We're about to add another bset to the btree node, so if there's currently 422 * too many bsets - sort some of them together: 423 */ 424 static bool btree_node_compact(struct bch_fs *c, struct btree *b) 425 { 426 unsigned unwritten_idx; 427 bool ret = false; 428 429 for (unwritten_idx = 0; 430 unwritten_idx < b->nsets; 431 unwritten_idx++) 432 if (!bset_written(b, bset(b, &b->set[unwritten_idx]))) 433 break; 434 435 if (b->nsets - unwritten_idx > 1) { 436 btree_node_sort(c, b, unwritten_idx, b->nsets); 437 ret = true; 438 } 439 440 if (unwritten_idx > 1) { 441 btree_node_sort(c, b, 0, unwritten_idx); 442 ret = true; 443 } 444 445 return ret; 446 } 447 448 void bch2_btree_build_aux_trees(struct btree *b) 449 { 450 for_each_bset(b, t) 451 bch2_bset_build_aux_tree(b, t, 452 !bset_written(b, bset(b, t)) && 453 t == bset_tree_last(b)); 454 } 455 456 /* 457 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one? 458 * 459 * The first bset is going to be of similar order to the size of the node, the 460 * last bset is bounded by btree_write_set_buffer(), which is set to keep the 461 * memmove on insert from being too expensive: the middle bset should, ideally, 462 * be the geometric mean of the first and the last. 463 * 464 * Returns true if the middle bset is greater than that geometric mean: 465 */ 466 static inline bool should_compact_all(struct bch_fs *c, struct btree *b) 467 { 468 unsigned mid_u64s_bits = 469 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2; 470 471 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits; 472 } 473 474 /* 475 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be 476 * inserted into 477 * 478 * Safe to call if there already is an unwritten bset - will only add a new bset 479 * if @b doesn't already have one. 480 * 481 * Returns true if we sorted (i.e. invalidated iterators 482 */ 483 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) 484 { 485 struct bch_fs *c = trans->c; 486 struct btree_node_entry *bne; 487 bool reinit_iter = false; 488 489 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); 490 BUG_ON(bset_written(b, bset(b, &b->set[1]))); 491 BUG_ON(btree_node_just_written(b)); 492 493 if (b->nsets == MAX_BSETS && 494 !btree_node_write_in_flight(b) && 495 should_compact_all(c, b)) { 496 bch2_btree_node_write_trans(trans, b, SIX_LOCK_write, 497 BTREE_WRITE_init_next_bset); 498 reinit_iter = true; 499 } 500 501 if (b->nsets == MAX_BSETS && 502 btree_node_compact(c, b)) 503 reinit_iter = true; 504 505 BUG_ON(b->nsets >= MAX_BSETS); 506 507 bne = want_new_bset(c, b); 508 if (bne) 509 bch2_bset_init_next(b, bne); 510 511 bch2_btree_build_aux_trees(b); 512 513 if (reinit_iter) 514 bch2_trans_node_reinit_iter(trans, b); 515 } 516 517 static void btree_err_msg(struct printbuf *out, struct bch_fs *c, 518 struct bch_dev *ca, 519 bool print_pos, 520 struct btree *b, struct bset *i, struct bkey_packed *k, 521 unsigned offset, int rw) 522 { 523 if (print_pos) { 524 prt_str(out, rw == READ 525 ? "error validating btree node " 526 : "corrupt btree node before write "); 527 prt_printf(out, "at btree "); 528 bch2_btree_pos_to_text(out, c, b); 529 prt_newline(out); 530 } 531 532 if (ca) 533 prt_printf(out, "%s ", ca->name); 534 535 prt_printf(out, "node offset %u/%u", 536 b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key))); 537 if (i) 538 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); 539 if (k) 540 prt_printf(out, " bset byte offset %lu", 541 (unsigned long)(void *)k - 542 ((unsigned long)(void *)i & ~511UL)); 543 prt_str(out, ": "); 544 } 545 546 __printf(11, 12) 547 static int __btree_err(int ret, 548 struct bch_fs *c, 549 struct bch_dev *ca, 550 struct btree *b, 551 struct bset *i, 552 struct bkey_packed *k, 553 int rw, 554 enum bch_sb_error_id err_type, 555 struct bch_io_failures *failed, 556 struct printbuf *err_msg, 557 const char *fmt, ...) 558 { 559 if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes) 560 return bch_err_throw(c, fsck_fix); 561 562 bool have_retry = false; 563 int ret2; 564 565 if (ca) { 566 bch2_mark_btree_validate_failure(failed, ca->dev_idx); 567 568 struct extent_ptr_decoded pick; 569 have_retry = !bch2_bkey_pick_read_device(c, 570 bkey_i_to_s_c(&b->key), 571 failed, &pick, -1); 572 } 573 574 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) 575 ret = bch_err_throw(c, btree_node_read_err_fixable); 576 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) 577 ret = bch_err_throw(c, btree_node_read_err_bad_node); 578 579 bch2_sb_error_count(c, err_type); 580 581 bool print_deferred = err_msg && 582 rw == READ && 583 !(test_bit(BCH_FS_in_fsck, &c->flags) && 584 c->opts.fix_errors == FSCK_FIX_ask); 585 586 struct printbuf out = PRINTBUF; 587 bch2_log_msg_start(c, &out); 588 589 if (!print_deferred) 590 err_msg = &out; 591 592 btree_err_msg(err_msg, c, ca, !print_deferred, b, i, k, b->written, rw); 593 594 va_list args; 595 va_start(args, fmt); 596 prt_vprintf(err_msg, fmt, args); 597 va_end(args); 598 599 if (print_deferred) { 600 prt_newline(err_msg); 601 602 switch (ret) { 603 case -BCH_ERR_btree_node_read_err_fixable: 604 ret2 = bch2_fsck_err_opt(c, FSCK_CAN_FIX, err_type); 605 if (!bch2_err_matches(ret2, BCH_ERR_fsck_fix) && 606 !bch2_err_matches(ret2, BCH_ERR_fsck_ignore)) { 607 ret = ret2; 608 goto fsck_err; 609 } 610 611 if (!have_retry) 612 ret = bch_err_throw(c, fsck_fix); 613 goto out; 614 case -BCH_ERR_btree_node_read_err_bad_node: 615 prt_str(&out, ", "); 616 ret = __bch2_topology_error(c, &out); 617 break; 618 } 619 620 goto out; 621 } 622 623 if (rw == WRITE) { 624 prt_str(&out, ", "); 625 ret = __bch2_inconsistent_error(c, &out) 626 ? -BCH_ERR_fsck_errors_not_fixed 627 : 0; 628 goto print; 629 } 630 631 switch (ret) { 632 case -BCH_ERR_btree_node_read_err_fixable: 633 ret2 = __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf); 634 if (!bch2_err_matches(ret2, BCH_ERR_fsck_fix) && 635 !bch2_err_matches(ret2, BCH_ERR_fsck_ignore)) { 636 ret = ret2; 637 goto fsck_err; 638 } 639 640 if (!have_retry) 641 ret = bch_err_throw(c, fsck_fix); 642 goto out; 643 case -BCH_ERR_btree_node_read_err_bad_node: 644 prt_str(&out, ", "); 645 ret = __bch2_topology_error(c, &out); 646 break; 647 } 648 print: 649 bch2_print_str(c, KERN_ERR, out.buf); 650 out: 651 fsck_err: 652 printbuf_exit(&out); 653 return ret; 654 } 655 656 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \ 657 ({ \ 658 int _ret = __btree_err(type, c, ca, b, i, k, write, \ 659 BCH_FSCK_ERR_##_err_type, \ 660 failed, err_msg, \ 661 msg, ##__VA_ARGS__); \ 662 \ 663 if (!bch2_err_matches(_ret, BCH_ERR_fsck_fix)) { \ 664 ret = _ret; \ 665 goto fsck_err; \ 666 } \ 667 \ 668 true; \ 669 }) 670 671 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) 672 673 /* 674 * When btree topology repair changes the start or end of a node, that might 675 * mean we have to drop keys that are no longer inside the node: 676 */ 677 __cold 678 void bch2_btree_node_drop_keys_outside_node(struct btree *b) 679 { 680 for_each_bset(b, t) { 681 struct bset *i = bset(b, t); 682 struct bkey_packed *k; 683 684 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 685 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) 686 break; 687 688 if (k != i->start) { 689 unsigned shift = (u64 *) k - (u64 *) i->start; 690 691 memmove_u64s_down(i->start, k, 692 (u64 *) vstruct_end(i) - (u64 *) k); 693 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift); 694 set_btree_bset_end(b, t); 695 } 696 697 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 698 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) 699 break; 700 701 if (k != vstruct_last(i)) { 702 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start); 703 set_btree_bset_end(b, t); 704 } 705 } 706 707 /* 708 * Always rebuild search trees: eytzinger search tree nodes directly 709 * depend on the values of min/max key: 710 */ 711 bch2_bset_set_no_aux_tree(b, b->set); 712 bch2_btree_build_aux_trees(b); 713 b->nr = bch2_btree_node_count_keys(b); 714 715 struct bkey_s_c k; 716 struct bkey unpacked; 717 struct btree_node_iter iter; 718 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { 719 BUG_ON(bpos_lt(k.k->p, b->data->min_key)); 720 BUG_ON(bpos_gt(k.k->p, b->data->max_key)); 721 } 722 } 723 724 static int validate_bset(struct bch_fs *c, struct bch_dev *ca, 725 struct btree *b, struct bset *i, 726 unsigned offset, unsigned sectors, int write, 727 struct bch_io_failures *failed, 728 struct printbuf *err_msg) 729 { 730 unsigned version = le16_to_cpu(i->version); 731 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); 732 struct printbuf buf1 = PRINTBUF; 733 struct printbuf buf2 = PRINTBUF; 734 int ret = 0; 735 736 btree_err_on(!bch2_version_compatible(version), 737 -BCH_ERR_btree_node_read_err_incompatible, 738 c, ca, b, i, NULL, 739 btree_node_unsupported_version, 740 "unsupported bset version %u.%u", 741 BCH_VERSION_MAJOR(version), 742 BCH_VERSION_MINOR(version)); 743 744 if (c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes && 745 btree_err_on(version < c->sb.version_min, 746 -BCH_ERR_btree_node_read_err_fixable, 747 c, NULL, b, i, NULL, 748 btree_node_bset_older_than_sb_min, 749 "bset version %u older than superblock version_min %u", 750 version, c->sb.version_min)) { 751 if (bch2_version_compatible(version)) { 752 mutex_lock(&c->sb_lock); 753 c->disk_sb.sb->version_min = cpu_to_le16(version); 754 bch2_write_super(c); 755 mutex_unlock(&c->sb_lock); 756 } else { 757 /* We have no idea what's going on: */ 758 i->version = cpu_to_le16(c->sb.version); 759 } 760 } 761 762 if (btree_err_on(BCH_VERSION_MAJOR(version) > 763 BCH_VERSION_MAJOR(c->sb.version), 764 -BCH_ERR_btree_node_read_err_fixable, 765 c, NULL, b, i, NULL, 766 btree_node_bset_newer_than_sb, 767 "bset version %u newer than superblock version %u", 768 version, c->sb.version)) { 769 mutex_lock(&c->sb_lock); 770 c->disk_sb.sb->version = cpu_to_le16(version); 771 bch2_write_super(c); 772 mutex_unlock(&c->sb_lock); 773 } 774 775 btree_err_on(BSET_SEPARATE_WHITEOUTS(i), 776 -BCH_ERR_btree_node_read_err_incompatible, 777 c, ca, b, i, NULL, 778 btree_node_unsupported_version, 779 "BSET_SEPARATE_WHITEOUTS no longer supported"); 780 781 if (!write && 782 btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)), 783 -BCH_ERR_btree_node_read_err_fixable, 784 c, ca, b, i, NULL, 785 bset_past_end_of_btree_node, 786 "bset past end of btree node (offset %u len %u but written %zu)", 787 offset, sectors, ptr_written ?: btree_sectors(c))) 788 i->u64s = 0; 789 790 btree_err_on(offset && !i->u64s, 791 -BCH_ERR_btree_node_read_err_fixable, 792 c, ca, b, i, NULL, 793 bset_empty, 794 "empty bset"); 795 796 btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset, 797 -BCH_ERR_btree_node_read_err_want_retry, 798 c, ca, b, i, NULL, 799 bset_wrong_sector_offset, 800 "bset at wrong sector offset"); 801 802 if (!offset) { 803 struct btree_node *bn = 804 container_of(i, struct btree_node, keys); 805 /* These indicate that we read the wrong btree node: */ 806 807 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 808 struct bch_btree_ptr_v2 *bp = 809 &bkey_i_to_btree_ptr_v2(&b->key)->v; 810 811 /* XXX endianness */ 812 btree_err_on(bp->seq != bn->keys.seq, 813 -BCH_ERR_btree_node_read_err_must_retry, 814 c, ca, b, NULL, NULL, 815 bset_bad_seq, 816 "incorrect sequence number (wrong btree node)"); 817 } 818 819 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, 820 -BCH_ERR_btree_node_read_err_must_retry, 821 c, ca, b, i, NULL, 822 btree_node_bad_btree, 823 "incorrect btree id"); 824 825 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, 826 -BCH_ERR_btree_node_read_err_must_retry, 827 c, ca, b, i, NULL, 828 btree_node_bad_level, 829 "incorrect level"); 830 831 if (!write) 832 compat_btree_node(b->c.level, b->c.btree_id, version, 833 BSET_BIG_ENDIAN(i), write, bn); 834 835 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 836 struct bch_btree_ptr_v2 *bp = 837 &bkey_i_to_btree_ptr_v2(&b->key)->v; 838 839 if (BTREE_PTR_RANGE_UPDATED(bp)) { 840 b->data->min_key = bp->min_key; 841 b->data->max_key = b->key.k.p; 842 } 843 844 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), 845 -BCH_ERR_btree_node_read_err_must_retry, 846 c, ca, b, NULL, NULL, 847 btree_node_bad_min_key, 848 "incorrect min_key: got %s should be %s", 849 (printbuf_reset(&buf1), 850 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), 851 (printbuf_reset(&buf2), 852 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); 853 } 854 855 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), 856 -BCH_ERR_btree_node_read_err_must_retry, 857 c, ca, b, i, NULL, 858 btree_node_bad_max_key, 859 "incorrect max key %s", 860 (printbuf_reset(&buf1), 861 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); 862 863 if (write) 864 compat_btree_node(b->c.level, b->c.btree_id, version, 865 BSET_BIG_ENDIAN(i), write, bn); 866 867 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1), 868 -BCH_ERR_btree_node_read_err_bad_node, 869 c, ca, b, i, NULL, 870 btree_node_bad_format, 871 "invalid bkey format: %s\n%s", buf1.buf, 872 (printbuf_reset(&buf2), 873 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf)); 874 printbuf_reset(&buf1); 875 876 compat_bformat(b->c.level, b->c.btree_id, version, 877 BSET_BIG_ENDIAN(i), write, 878 &bn->format); 879 } 880 fsck_err: 881 printbuf_exit(&buf2); 882 printbuf_exit(&buf1); 883 return ret; 884 } 885 886 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b, 887 struct bkey_s_c k, 888 enum bch_validate_flags flags) 889 { 890 return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) { 891 .from = BKEY_VALIDATE_btree_node, 892 .level = b->c.level, 893 .btree = b->c.btree_id, 894 .flags = flags 895 }); 896 } 897 898 static int bset_key_validate(struct bch_fs *c, struct btree *b, 899 struct bkey_s_c k, 900 bool updated_range, 901 enum bch_validate_flags flags) 902 { 903 struct bkey_validate_context from = (struct bkey_validate_context) { 904 .from = BKEY_VALIDATE_btree_node, 905 .level = b->c.level, 906 .btree = b->c.btree_id, 907 .flags = flags, 908 }; 909 return __bch2_bkey_validate(c, k, from) ?: 910 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?: 911 (flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0); 912 } 913 914 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b, 915 struct bset *i, struct bkey_packed *k) 916 { 917 if (bkey_p_next(k) > vstruct_last(i)) 918 return false; 919 920 if (k->format > KEY_FORMAT_CURRENT) 921 return false; 922 923 if (!bkeyp_u64s_valid(&b->format, k)) 924 return false; 925 926 struct bkey tmp; 927 struct bkey_s u = __bkey_disassemble(b, k, &tmp); 928 return !__bch2_bkey_validate(c, u.s_c, 929 (struct bkey_validate_context) { 930 .from = BKEY_VALIDATE_btree_node, 931 .level = b->c.level, 932 .btree = b->c.btree_id, 933 .flags = BCH_VALIDATE_silent 934 }); 935 } 936 937 static inline int btree_node_read_bkey_cmp(const struct btree *b, 938 const struct bkey_packed *l, 939 const struct bkey_packed *r) 940 { 941 return bch2_bkey_cmp_packed(b, l, r) 942 ?: (int) bkey_deleted(r) - (int) bkey_deleted(l); 943 } 944 945 static int validate_bset_keys(struct bch_fs *c, struct btree *b, 946 struct bset *i, int write, 947 struct bch_io_failures *failed, 948 struct printbuf *err_msg) 949 { 950 unsigned version = le16_to_cpu(i->version); 951 struct bkey_packed *k, *prev = NULL; 952 struct printbuf buf = PRINTBUF; 953 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && 954 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); 955 int ret = 0; 956 957 for (k = i->start; 958 k != vstruct_last(i);) { 959 struct bkey_s u; 960 struct bkey tmp; 961 unsigned next_good_key; 962 963 if (btree_err_on(bkey_p_next(k) > vstruct_last(i), 964 -BCH_ERR_btree_node_read_err_fixable, 965 c, NULL, b, i, k, 966 btree_node_bkey_past_bset_end, 967 "key extends past end of bset")) { 968 i->u64s = cpu_to_le16((u64 *) k - i->_data); 969 break; 970 } 971 972 if (btree_err_on(k->format > KEY_FORMAT_CURRENT, 973 -BCH_ERR_btree_node_read_err_fixable, 974 c, NULL, b, i, k, 975 btree_node_bkey_bad_format, 976 "invalid bkey format %u", k->format)) 977 goto drop_this_key; 978 979 if (btree_err_on(!bkeyp_u64s_valid(&b->format, k), 980 -BCH_ERR_btree_node_read_err_fixable, 981 c, NULL, b, i, k, 982 btree_node_bkey_bad_u64s, 983 "bad k->u64s %u (min %u max %zu)", k->u64s, 984 bkeyp_key_u64s(&b->format, k), 985 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k))) 986 goto drop_this_key; 987 988 if (!write) 989 bch2_bkey_compat(b->c.level, b->c.btree_id, version, 990 BSET_BIG_ENDIAN(i), write, 991 &b->format, k); 992 993 u = __bkey_disassemble(b, k, &tmp); 994 995 ret = bset_key_validate(c, b, u.s_c, updated_range, write); 996 if (ret == -BCH_ERR_fsck_delete_bkey) 997 goto drop_this_key; 998 if (ret) 999 goto fsck_err; 1000 1001 if (write) 1002 bch2_bkey_compat(b->c.level, b->c.btree_id, version, 1003 BSET_BIG_ENDIAN(i), write, 1004 &b->format, k); 1005 1006 if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) { 1007 struct bkey up = bkey_unpack_key(b, prev); 1008 1009 printbuf_reset(&buf); 1010 prt_printf(&buf, "keys out of order: "); 1011 bch2_bkey_to_text(&buf, &up); 1012 prt_printf(&buf, " > "); 1013 bch2_bkey_to_text(&buf, u.k); 1014 1015 if (btree_err(-BCH_ERR_btree_node_read_err_fixable, 1016 c, NULL, b, i, k, 1017 btree_node_bkey_out_of_order, 1018 "%s", buf.buf)) 1019 goto drop_this_key; 1020 } 1021 1022 prev = k; 1023 k = bkey_p_next(k); 1024 continue; 1025 drop_this_key: 1026 next_good_key = k->u64s; 1027 1028 if (!next_good_key || 1029 (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN && 1030 version >= bcachefs_metadata_version_snapshot)) { 1031 /* 1032 * only do scanning if bch2_bkey_compat() has nothing to 1033 * do 1034 */ 1035 1036 if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) { 1037 for (next_good_key = 1; 1038 next_good_key < (u64 *) vstruct_last(i) - (u64 *) k; 1039 next_good_key++) 1040 if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) 1041 goto got_good_key; 1042 } 1043 1044 /* 1045 * didn't find a good key, have to truncate the rest of 1046 * the bset 1047 */ 1048 next_good_key = (u64 *) vstruct_last(i) - (u64 *) k; 1049 } 1050 got_good_key: 1051 le16_add_cpu(&i->u64s, -next_good_key); 1052 memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k); 1053 set_btree_node_need_rewrite(b); 1054 set_btree_node_need_rewrite_error(b); 1055 } 1056 fsck_err: 1057 printbuf_exit(&buf); 1058 return ret; 1059 } 1060 1061 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, 1062 struct btree *b, 1063 struct bch_io_failures *failed, 1064 struct printbuf *err_msg) 1065 { 1066 struct btree_node_entry *bne; 1067 struct sort_iter *iter; 1068 struct btree_node *sorted; 1069 struct bkey_packed *k; 1070 struct bset *i; 1071 bool used_mempool, blacklisted; 1072 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && 1073 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); 1074 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); 1075 u64 max_journal_seq = 0; 1076 struct printbuf buf = PRINTBUF; 1077 int ret = 0, write = READ; 1078 u64 start_time = local_clock(); 1079 1080 b->version_ondisk = U16_MAX; 1081 /* We might get called multiple times on read retry: */ 1082 b->written = 0; 1083 1084 iter = mempool_alloc(&c->fill_iter, GFP_NOFS); 1085 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); 1086 1087 if (bch2_meta_read_fault("btree")) 1088 btree_err(-BCH_ERR_btree_node_read_err_must_retry, 1089 c, ca, b, NULL, NULL, 1090 btree_node_fault_injected, 1091 "dynamic fault"); 1092 1093 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), 1094 -BCH_ERR_btree_node_read_err_must_retry, 1095 c, ca, b, NULL, NULL, 1096 btree_node_bad_magic, 1097 "bad magic: want %llx, got %llx", 1098 bset_magic(c), le64_to_cpu(b->data->magic)); 1099 1100 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 1101 struct bch_btree_ptr_v2 *bp = 1102 &bkey_i_to_btree_ptr_v2(&b->key)->v; 1103 1104 bch2_bpos_to_text(&buf, b->data->min_key); 1105 prt_str(&buf, "-"); 1106 bch2_bpos_to_text(&buf, b->data->max_key); 1107 1108 btree_err_on(b->data->keys.seq != bp->seq, 1109 -BCH_ERR_btree_node_read_err_must_retry, 1110 c, ca, b, NULL, NULL, 1111 btree_node_bad_seq, 1112 "got wrong btree node: got\n%s", 1113 (printbuf_reset(&buf), 1114 bch2_btree_node_header_to_text(&buf, b->data), 1115 buf.buf)); 1116 } else { 1117 btree_err_on(!b->data->keys.seq, 1118 -BCH_ERR_btree_node_read_err_must_retry, 1119 c, ca, b, NULL, NULL, 1120 btree_node_bad_seq, 1121 "bad btree header: seq 0\n%s", 1122 (printbuf_reset(&buf), 1123 bch2_btree_node_header_to_text(&buf, b->data), 1124 buf.buf)); 1125 } 1126 1127 while (b->written < (ptr_written ?: btree_sectors(c))) { 1128 unsigned sectors; 1129 bool first = !b->written; 1130 1131 if (first) { 1132 bne = NULL; 1133 i = &b->data->keys; 1134 } else { 1135 bne = write_block(b); 1136 i = &bne->keys; 1137 1138 if (i->seq != b->data->keys.seq) 1139 break; 1140 } 1141 1142 struct nonce nonce = btree_nonce(i, b->written << 9); 1143 bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)); 1144 1145 btree_err_on(!good_csum_type, 1146 bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) 1147 ? -BCH_ERR_btree_node_read_err_must_retry 1148 : -BCH_ERR_btree_node_read_err_want_retry, 1149 c, ca, b, i, NULL, 1150 bset_unknown_csum, 1151 "unknown checksum type %llu", BSET_CSUM_TYPE(i)); 1152 1153 if (first) { 1154 if (good_csum_type) { 1155 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); 1156 bool csum_bad = bch2_crc_cmp(b->data->csum, csum); 1157 if (csum_bad) 1158 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1159 1160 btree_err_on(csum_bad, 1161 -BCH_ERR_btree_node_read_err_want_retry, 1162 c, ca, b, i, NULL, 1163 bset_bad_csum, 1164 "%s", 1165 (printbuf_reset(&buf), 1166 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), 1167 buf.buf)); 1168 1169 ret = bset_encrypt(c, i, b->written << 9); 1170 if (bch2_fs_fatal_err_on(ret, c, 1171 "decrypting btree node: %s", bch2_err_str(ret))) 1172 goto fsck_err; 1173 } 1174 1175 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && 1176 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), 1177 -BCH_ERR_btree_node_read_err_incompatible, 1178 c, NULL, b, NULL, NULL, 1179 btree_node_unsupported_version, 1180 "btree node does not have NEW_EXTENT_OVERWRITE set"); 1181 1182 sectors = vstruct_sectors(b->data, c->block_bits); 1183 } else { 1184 if (good_csum_type) { 1185 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); 1186 bool csum_bad = bch2_crc_cmp(bne->csum, csum); 1187 if (ca && csum_bad) 1188 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1189 1190 btree_err_on(csum_bad, 1191 -BCH_ERR_btree_node_read_err_want_retry, 1192 c, ca, b, i, NULL, 1193 bset_bad_csum, 1194 "%s", 1195 (printbuf_reset(&buf), 1196 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), 1197 buf.buf)); 1198 1199 ret = bset_encrypt(c, i, b->written << 9); 1200 if (bch2_fs_fatal_err_on(ret, c, 1201 "decrypting btree node: %s", bch2_err_str(ret))) 1202 goto fsck_err; 1203 } 1204 1205 sectors = vstruct_sectors(bne, c->block_bits); 1206 } 1207 1208 b->version_ondisk = min(b->version_ondisk, 1209 le16_to_cpu(i->version)); 1210 1211 ret = validate_bset(c, ca, b, i, b->written, sectors, READ, failed, err_msg); 1212 if (ret) 1213 goto fsck_err; 1214 1215 if (!b->written) 1216 btree_node_set_format(b, b->data->format); 1217 1218 ret = validate_bset_keys(c, b, i, READ, failed, err_msg); 1219 if (ret) 1220 goto fsck_err; 1221 1222 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); 1223 1224 blacklisted = bch2_journal_seq_is_blacklisted(c, 1225 le64_to_cpu(i->journal_seq), 1226 true); 1227 1228 btree_err_on(blacklisted && first, 1229 -BCH_ERR_btree_node_read_err_fixable, 1230 c, ca, b, i, NULL, 1231 bset_blacklisted_journal_seq, 1232 "first btree node bset has blacklisted journal seq (%llu)", 1233 le64_to_cpu(i->journal_seq)); 1234 1235 btree_err_on(blacklisted && ptr_written, 1236 -BCH_ERR_btree_node_read_err_fixable, 1237 c, ca, b, i, NULL, 1238 first_bset_blacklisted_journal_seq, 1239 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", 1240 le64_to_cpu(i->journal_seq), 1241 b->written, b->written + sectors, ptr_written); 1242 1243 b->written = min(b->written + sectors, btree_sectors(c)); 1244 1245 if (blacklisted && !first) 1246 continue; 1247 1248 sort_iter_add(iter, 1249 vstruct_idx(i, 0), 1250 vstruct_last(i)); 1251 1252 max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq)); 1253 } 1254 1255 if (ptr_written) { 1256 btree_err_on(b->written < ptr_written, 1257 -BCH_ERR_btree_node_read_err_want_retry, 1258 c, ca, b, NULL, NULL, 1259 btree_node_data_missing, 1260 "btree node data missing: expected %u sectors, found %u", 1261 ptr_written, b->written); 1262 } else { 1263 for (bne = write_block(b); 1264 bset_byte_offset(b, bne) < btree_buf_bytes(b); 1265 bne = (void *) bne + block_bytes(c)) 1266 btree_err_on(bne->keys.seq == b->data->keys.seq && 1267 !bch2_journal_seq_is_blacklisted(c, 1268 le64_to_cpu(bne->keys.journal_seq), 1269 true), 1270 -BCH_ERR_btree_node_read_err_want_retry, 1271 c, ca, b, NULL, NULL, 1272 btree_node_bset_after_end, 1273 "found bset signature after last bset"); 1274 } 1275 1276 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool); 1277 sorted->keys.u64s = 0; 1278 1279 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); 1280 memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0, 1281 btree_buf_bytes(b) - 1282 sizeof(struct btree_node) - 1283 b->nr.live_u64s * sizeof(u64)); 1284 1285 b->data->keys.u64s = sorted->keys.u64s; 1286 *sorted = *b->data; 1287 swap(sorted, b->data); 1288 set_btree_bset(b, b->set, &b->data->keys); 1289 b->nsets = 1; 1290 b->data->keys.journal_seq = cpu_to_le64(max_journal_seq); 1291 1292 BUG_ON(b->nr.live_u64s != le16_to_cpu(b->data->keys.u64s)); 1293 1294 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted); 1295 1296 if (updated_range) 1297 bch2_btree_node_drop_keys_outside_node(b); 1298 1299 i = &b->data->keys; 1300 for (k = i->start; k != vstruct_last(i);) { 1301 struct bkey tmp; 1302 struct bkey_s u = __bkey_disassemble(b, k, &tmp); 1303 1304 ret = btree_node_bkey_val_validate(c, b, u.s_c, READ); 1305 if (ret == -BCH_ERR_fsck_delete_bkey || 1306 (static_branch_unlikely(&bch2_inject_invalid_keys) && 1307 !bversion_cmp(u.k->bversion, MAX_VERSION))) { 1308 btree_keys_account_key_drop(&b->nr, 0, k); 1309 1310 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 1311 memmove_u64s_down(k, bkey_p_next(k), 1312 (u64 *) vstruct_end(i) - (u64 *) k); 1313 set_btree_bset_end(b, b->set); 1314 set_btree_node_need_rewrite(b); 1315 set_btree_node_need_rewrite_error(b); 1316 continue; 1317 } 1318 if (ret) 1319 goto fsck_err; 1320 1321 if (u.k->type == KEY_TYPE_btree_ptr_v2) { 1322 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u); 1323 1324 bp.v->mem_ptr = 0; 1325 } 1326 1327 k = bkey_p_next(k); 1328 } 1329 1330 bch2_bset_build_aux_tree(b, b->set, false); 1331 1332 set_needs_whiteout(btree_bset_first(b), true); 1333 1334 btree_node_reset_sib_u64s(b); 1335 1336 scoped_guard(rcu) 1337 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) { 1338 struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev); 1339 1340 if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw) { 1341 set_btree_node_need_rewrite(b); 1342 set_btree_node_need_rewrite_degraded(b); 1343 } 1344 } 1345 1346 if (!ptr_written) { 1347 set_btree_node_need_rewrite(b); 1348 set_btree_node_need_rewrite_ptr_written_zero(b); 1349 } 1350 fsck_err: 1351 mempool_free(iter, &c->fill_iter); 1352 printbuf_exit(&buf); 1353 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time); 1354 return ret; 1355 } 1356 1357 static void btree_node_read_work(struct work_struct *work) 1358 { 1359 struct btree_read_bio *rb = 1360 container_of(work, struct btree_read_bio, work); 1361 struct bch_fs *c = rb->c; 1362 struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL; 1363 struct btree *b = rb->b; 1364 struct bio *bio = &rb->bio; 1365 struct bch_io_failures failed = { .nr = 0 }; 1366 int ret = 0; 1367 1368 struct printbuf buf = PRINTBUF; 1369 bch2_log_msg_start(c, &buf); 1370 1371 prt_printf(&buf, "btree node read error at btree "); 1372 bch2_btree_pos_to_text(&buf, c, b); 1373 prt_newline(&buf); 1374 1375 goto start; 1376 while (1) { 1377 ret = bch2_bkey_pick_read_device(c, 1378 bkey_i_to_s_c(&b->key), 1379 &failed, &rb->pick, -1); 1380 if (ret) { 1381 set_btree_node_read_error(b); 1382 break; 1383 } 1384 1385 ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); 1386 rb->have_ioref = ca != NULL; 1387 rb->start_time = local_clock(); 1388 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META); 1389 bio->bi_iter.bi_sector = rb->pick.ptr.offset; 1390 bio->bi_iter.bi_size = btree_buf_bytes(b); 1391 1392 if (rb->have_ioref) { 1393 bio_set_dev(bio, ca->disk_sb.bdev); 1394 submit_bio_wait(bio); 1395 } else { 1396 bio->bi_status = BLK_STS_REMOVED; 1397 } 1398 1399 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, 1400 rb->start_time, !bio->bi_status); 1401 start: 1402 if (rb->have_ioref) 1403 enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_read); 1404 rb->have_ioref = false; 1405 1406 if (bio->bi_status) { 1407 bch2_mark_io_failure(&failed, &rb->pick, false); 1408 continue; 1409 } 1410 1411 ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf); 1412 if (ret == -BCH_ERR_btree_node_read_err_want_retry || 1413 ret == -BCH_ERR_btree_node_read_err_must_retry) 1414 continue; 1415 1416 if (ret) 1417 set_btree_node_read_error(b); 1418 1419 break; 1420 } 1421 1422 bch2_io_failures_to_text(&buf, c, &failed); 1423 1424 if (btree_node_read_error(b)) 1425 bch2_btree_lost_data(c, &buf, b->c.btree_id); 1426 1427 /* 1428 * only print retry success if we read from a replica with no errors 1429 */ 1430 if (btree_node_read_error(b)) 1431 prt_printf(&buf, "ret %s", bch2_err_str(ret)); 1432 else if (failed.nr) { 1433 if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev)) 1434 prt_printf(&buf, "retry success"); 1435 else 1436 prt_printf(&buf, "repair success"); 1437 } 1438 1439 if ((failed.nr || 1440 btree_node_need_rewrite(b)) && 1441 !btree_node_read_error(b) && 1442 c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) { 1443 prt_printf(&buf, " (rewriting node)"); 1444 bch2_btree_node_rewrite_async(c, b); 1445 } 1446 prt_newline(&buf); 1447 1448 if (failed.nr) 1449 bch2_print_str_ratelimited(c, KERN_ERR, buf.buf); 1450 1451 async_object_list_del(c, btree_read_bio, rb->list_idx); 1452 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], 1453 rb->start_time); 1454 bio_put(&rb->bio); 1455 printbuf_exit(&buf); 1456 clear_btree_node_read_in_flight(b); 1457 smp_mb__after_atomic(); 1458 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1459 } 1460 1461 static void btree_node_read_endio(struct bio *bio) 1462 { 1463 struct btree_read_bio *rb = 1464 container_of(bio, struct btree_read_bio, bio); 1465 struct bch_fs *c = rb->c; 1466 struct bch_dev *ca = rb->have_ioref 1467 ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL; 1468 1469 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, 1470 rb->start_time, !bio->bi_status); 1471 1472 queue_work(c->btree_read_complete_wq, &rb->work); 1473 } 1474 1475 void bch2_btree_read_bio_to_text(struct printbuf *out, struct btree_read_bio *rbio) 1476 { 1477 bch2_bio_to_text(out, &rbio->bio); 1478 } 1479 1480 struct btree_node_read_all { 1481 struct closure cl; 1482 struct bch_fs *c; 1483 struct btree *b; 1484 unsigned nr; 1485 void *buf[BCH_REPLICAS_MAX]; 1486 struct bio *bio[BCH_REPLICAS_MAX]; 1487 blk_status_t err[BCH_REPLICAS_MAX]; 1488 }; 1489 1490 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data) 1491 { 1492 struct btree_node *bn = data; 1493 struct btree_node_entry *bne; 1494 unsigned offset = 0; 1495 1496 if (le64_to_cpu(bn->magic) != bset_magic(c)) 1497 return 0; 1498 1499 while (offset < btree_sectors(c)) { 1500 if (!offset) { 1501 offset += vstruct_sectors(bn, c->block_bits); 1502 } else { 1503 bne = data + (offset << 9); 1504 if (bne->keys.seq != bn->keys.seq) 1505 break; 1506 offset += vstruct_sectors(bne, c->block_bits); 1507 } 1508 } 1509 1510 return offset; 1511 } 1512 1513 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data) 1514 { 1515 struct btree_node *bn = data; 1516 struct btree_node_entry *bne; 1517 1518 if (!offset) 1519 return false; 1520 1521 while (offset < btree_sectors(c)) { 1522 bne = data + (offset << 9); 1523 if (bne->keys.seq == bn->keys.seq) 1524 return true; 1525 offset++; 1526 } 1527 1528 return false; 1529 return offset; 1530 } 1531 1532 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) 1533 { 1534 closure_type(ra, struct btree_node_read_all, cl); 1535 struct bch_fs *c = ra->c; 1536 struct btree *b = ra->b; 1537 struct printbuf buf = PRINTBUF; 1538 bool dump_bset_maps = false; 1539 int ret = 0, best = -1, write = READ; 1540 unsigned i, written = 0, written2 = 0; 1541 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 1542 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; 1543 bool _saw_error = false, *saw_error = &_saw_error; 1544 struct printbuf *err_msg = NULL; 1545 struct bch_io_failures *failed = NULL; 1546 1547 for (i = 0; i < ra->nr; i++) { 1548 struct btree_node *bn = ra->buf[i]; 1549 1550 if (ra->err[i]) 1551 continue; 1552 1553 if (le64_to_cpu(bn->magic) != bset_magic(c) || 1554 (seq && seq != bn->keys.seq)) 1555 continue; 1556 1557 if (best < 0) { 1558 best = i; 1559 written = btree_node_sectors_written(c, bn); 1560 continue; 1561 } 1562 1563 written2 = btree_node_sectors_written(c, ra->buf[i]); 1564 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, 1565 c, NULL, b, NULL, NULL, 1566 btree_node_replicas_sectors_written_mismatch, 1567 "btree node sectors written mismatch: %u != %u", 1568 written, written2) || 1569 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]), 1570 -BCH_ERR_btree_node_read_err_fixable, 1571 c, NULL, b, NULL, NULL, 1572 btree_node_bset_after_end, 1573 "found bset signature after last bset") || 1574 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9), 1575 -BCH_ERR_btree_node_read_err_fixable, 1576 c, NULL, b, NULL, NULL, 1577 btree_node_replicas_data_mismatch, 1578 "btree node replicas content mismatch")) 1579 dump_bset_maps = true; 1580 1581 if (written2 > written) { 1582 written = written2; 1583 best = i; 1584 } 1585 } 1586 fsck_err: 1587 if (dump_bset_maps) { 1588 for (i = 0; i < ra->nr; i++) { 1589 struct btree_node *bn = ra->buf[i]; 1590 struct btree_node_entry *bne = NULL; 1591 unsigned offset = 0, sectors; 1592 bool gap = false; 1593 1594 if (ra->err[i]) 1595 continue; 1596 1597 printbuf_reset(&buf); 1598 1599 while (offset < btree_sectors(c)) { 1600 if (!offset) { 1601 sectors = vstruct_sectors(bn, c->block_bits); 1602 } else { 1603 bne = ra->buf[i] + (offset << 9); 1604 if (bne->keys.seq != bn->keys.seq) 1605 break; 1606 sectors = vstruct_sectors(bne, c->block_bits); 1607 } 1608 1609 prt_printf(&buf, " %u-%u", offset, offset + sectors); 1610 if (bne && bch2_journal_seq_is_blacklisted(c, 1611 le64_to_cpu(bne->keys.journal_seq), false)) 1612 prt_printf(&buf, "*"); 1613 offset += sectors; 1614 } 1615 1616 while (offset < btree_sectors(c)) { 1617 bne = ra->buf[i] + (offset << 9); 1618 if (bne->keys.seq == bn->keys.seq) { 1619 if (!gap) 1620 prt_printf(&buf, " GAP"); 1621 gap = true; 1622 1623 sectors = vstruct_sectors(bne, c->block_bits); 1624 prt_printf(&buf, " %u-%u", offset, offset + sectors); 1625 if (bch2_journal_seq_is_blacklisted(c, 1626 le64_to_cpu(bne->keys.journal_seq), false)) 1627 prt_printf(&buf, "*"); 1628 } 1629 offset++; 1630 } 1631 1632 bch_err(c, "replica %u:%s", i, buf.buf); 1633 } 1634 } 1635 1636 if (best >= 0) { 1637 memcpy(b->data, ra->buf[best], btree_buf_bytes(b)); 1638 ret = bch2_btree_node_read_done(c, NULL, b, NULL, NULL); 1639 } else { 1640 ret = -1; 1641 } 1642 1643 if (ret) { 1644 set_btree_node_read_error(b); 1645 1646 struct printbuf buf = PRINTBUF; 1647 bch2_btree_lost_data(c, &buf, b->c.btree_id); 1648 if (buf.pos) 1649 bch_err(c, "%s", buf.buf); 1650 printbuf_exit(&buf); 1651 } else if (*saw_error) 1652 bch2_btree_node_rewrite_async(c, b); 1653 1654 for (i = 0; i < ra->nr; i++) { 1655 mempool_free(ra->buf[i], &c->btree_bounce_pool); 1656 bio_put(ra->bio[i]); 1657 } 1658 1659 closure_debug_destroy(&ra->cl); 1660 kfree(ra); 1661 printbuf_exit(&buf); 1662 1663 clear_btree_node_read_in_flight(b); 1664 smp_mb__after_atomic(); 1665 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1666 } 1667 1668 static void btree_node_read_all_replicas_endio(struct bio *bio) 1669 { 1670 struct btree_read_bio *rb = 1671 container_of(bio, struct btree_read_bio, bio); 1672 struct bch_fs *c = rb->c; 1673 struct btree_node_read_all *ra = rb->ra; 1674 1675 if (rb->have_ioref) { 1676 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev); 1677 1678 bch2_latency_acct(ca, rb->start_time, READ); 1679 enumerated_ref_put(&ca->io_ref[READ], 1680 BCH_DEV_READ_REF_btree_node_read_all_replicas); 1681 } 1682 1683 ra->err[rb->idx] = bio->bi_status; 1684 closure_put(&ra->cl); 1685 } 1686 1687 /* 1688 * XXX This allocates multiple times from the same mempools, and can deadlock 1689 * under sufficient memory pressure (but is only a debug path) 1690 */ 1691 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync) 1692 { 1693 struct bkey_s_c k = bkey_i_to_s_c(&b->key); 1694 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1695 const union bch_extent_entry *entry; 1696 struct extent_ptr_decoded pick; 1697 struct btree_node_read_all *ra; 1698 unsigned i; 1699 1700 ra = kzalloc(sizeof(*ra), GFP_NOFS); 1701 if (!ra) 1702 return bch_err_throw(c, ENOMEM_btree_node_read_all_replicas); 1703 1704 closure_init(&ra->cl, NULL); 1705 ra->c = c; 1706 ra->b = b; 1707 ra->nr = bch2_bkey_nr_ptrs(k); 1708 1709 for (i = 0; i < ra->nr; i++) { 1710 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); 1711 ra->bio[i] = bio_alloc_bioset(NULL, 1712 buf_pages(ra->buf[i], btree_buf_bytes(b)), 1713 REQ_OP_READ|REQ_SYNC|REQ_META, 1714 GFP_NOFS, 1715 &c->btree_bio); 1716 } 1717 1718 i = 0; 1719 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) { 1720 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, 1721 BCH_DEV_READ_REF_btree_node_read_all_replicas); 1722 struct btree_read_bio *rb = 1723 container_of(ra->bio[i], struct btree_read_bio, bio); 1724 rb->c = c; 1725 rb->b = b; 1726 rb->ra = ra; 1727 rb->start_time = local_clock(); 1728 rb->have_ioref = ca != NULL; 1729 rb->idx = i; 1730 rb->pick = pick; 1731 rb->bio.bi_iter.bi_sector = pick.ptr.offset; 1732 rb->bio.bi_end_io = btree_node_read_all_replicas_endio; 1733 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b)); 1734 1735 if (rb->have_ioref) { 1736 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], 1737 bio_sectors(&rb->bio)); 1738 bio_set_dev(&rb->bio, ca->disk_sb.bdev); 1739 1740 closure_get(&ra->cl); 1741 submit_bio(&rb->bio); 1742 } else { 1743 ra->err[i] = BLK_STS_REMOVED; 1744 } 1745 1746 i++; 1747 } 1748 1749 if (sync) { 1750 closure_sync(&ra->cl); 1751 btree_node_read_all_replicas_done(&ra->cl.work); 1752 } else { 1753 continue_at(&ra->cl, btree_node_read_all_replicas_done, 1754 c->btree_read_complete_wq); 1755 } 1756 1757 return 0; 1758 } 1759 1760 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, 1761 bool sync) 1762 { 1763 struct bch_fs *c = trans->c; 1764 struct extent_ptr_decoded pick; 1765 struct btree_read_bio *rb; 1766 struct bch_dev *ca; 1767 struct bio *bio; 1768 int ret; 1769 1770 trace_and_count(c, btree_node_read, trans, b); 1771 1772 if (static_branch_unlikely(&bch2_verify_all_btree_replicas) && 1773 !btree_node_read_all_replicas(c, b, sync)) 1774 return; 1775 1776 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), 1777 NULL, &pick, -1); 1778 1779 if (ret <= 0) { 1780 bool ratelimit = true; 1781 struct printbuf buf = PRINTBUF; 1782 bch2_log_msg_start(c, &buf); 1783 1784 prt_str(&buf, "btree node read error: no device to read from\n at "); 1785 bch2_btree_pos_to_text(&buf, c, b); 1786 prt_newline(&buf); 1787 bch2_btree_lost_data(c, &buf, b->c.btree_id); 1788 1789 if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && 1790 bch2_fs_emergency_read_only2(c, &buf)) 1791 ratelimit = false; 1792 1793 static DEFINE_RATELIMIT_STATE(rs, 1794 DEFAULT_RATELIMIT_INTERVAL, 1795 DEFAULT_RATELIMIT_BURST); 1796 if (!ratelimit || __ratelimit(&rs)) 1797 bch2_print_str(c, KERN_ERR, buf.buf); 1798 printbuf_exit(&buf); 1799 1800 set_btree_node_read_error(b); 1801 clear_btree_node_read_in_flight(b); 1802 smp_mb__after_atomic(); 1803 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1804 return; 1805 } 1806 1807 ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); 1808 1809 bio = bio_alloc_bioset(NULL, 1810 buf_pages(b->data, btree_buf_bytes(b)), 1811 REQ_OP_READ|REQ_SYNC|REQ_META, 1812 GFP_NOFS, 1813 &c->btree_bio); 1814 rb = container_of(bio, struct btree_read_bio, bio); 1815 rb->c = c; 1816 rb->b = b; 1817 rb->ra = NULL; 1818 rb->start_time = local_clock(); 1819 rb->have_ioref = ca != NULL; 1820 rb->pick = pick; 1821 INIT_WORK(&rb->work, btree_node_read_work); 1822 bio->bi_iter.bi_sector = pick.ptr.offset; 1823 bio->bi_end_io = btree_node_read_endio; 1824 bch2_bio_map(bio, b->data, btree_buf_bytes(b)); 1825 1826 async_object_list_add(c, btree_read_bio, rb, &rb->list_idx); 1827 1828 if (rb->have_ioref) { 1829 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], 1830 bio_sectors(bio)); 1831 bio_set_dev(bio, ca->disk_sb.bdev); 1832 1833 if (sync) { 1834 submit_bio_wait(bio); 1835 bch2_latency_acct(ca, rb->start_time, READ); 1836 btree_node_read_work(&rb->work); 1837 } else { 1838 submit_bio(bio); 1839 } 1840 } else { 1841 bio->bi_status = BLK_STS_REMOVED; 1842 1843 if (sync) 1844 btree_node_read_work(&rb->work); 1845 else 1846 queue_work(c->btree_read_complete_wq, &rb->work); 1847 } 1848 } 1849 1850 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, 1851 const struct bkey_i *k, unsigned level) 1852 { 1853 struct bch_fs *c = trans->c; 1854 struct closure cl; 1855 struct btree *b; 1856 int ret; 1857 1858 closure_init_stack(&cl); 1859 1860 do { 1861 ret = bch2_btree_cache_cannibalize_lock(trans, &cl); 1862 closure_sync(&cl); 1863 } while (ret); 1864 1865 b = bch2_btree_node_mem_alloc(trans, level != 0); 1866 bch2_btree_cache_cannibalize_unlock(trans); 1867 1868 BUG_ON(IS_ERR(b)); 1869 1870 bkey_copy(&b->key, k); 1871 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id)); 1872 1873 set_btree_node_read_in_flight(b); 1874 1875 /* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */ 1876 bch2_trans_unlock(trans); 1877 bch2_btree_node_read(trans, b, true); 1878 1879 if (btree_node_read_error(b)) { 1880 mutex_lock(&c->btree_cache.lock); 1881 bch2_btree_node_hash_remove(&c->btree_cache, b); 1882 mutex_unlock(&c->btree_cache.lock); 1883 1884 ret = bch_err_throw(c, btree_node_read_error); 1885 goto err; 1886 } 1887 1888 bch2_btree_set_root_for_read(c, b); 1889 err: 1890 six_unlock_write(&b->c.lock); 1891 six_unlock_intent(&b->c.lock); 1892 1893 return ret; 1894 } 1895 1896 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, 1897 const struct bkey_i *k, unsigned level) 1898 { 1899 return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level)); 1900 } 1901 1902 struct btree_node_scrub { 1903 struct bch_fs *c; 1904 struct bch_dev *ca; 1905 void *buf; 1906 bool used_mempool; 1907 unsigned written; 1908 1909 enum btree_id btree; 1910 unsigned level; 1911 struct bkey_buf key; 1912 __le64 seq; 1913 1914 struct work_struct work; 1915 struct bio bio; 1916 }; 1917 1918 static bool btree_node_scrub_check(struct bch_fs *c, struct btree_node *data, unsigned ptr_written, 1919 struct printbuf *err) 1920 { 1921 unsigned written = 0; 1922 1923 if (le64_to_cpu(data->magic) != bset_magic(c)) { 1924 prt_printf(err, "bad magic: want %llx, got %llx", 1925 bset_magic(c), le64_to_cpu(data->magic)); 1926 return false; 1927 } 1928 1929 while (written < (ptr_written ?: btree_sectors(c))) { 1930 struct btree_node_entry *bne; 1931 struct bset *i; 1932 bool first = !written; 1933 1934 if (first) { 1935 bne = NULL; 1936 i = &data->keys; 1937 } else { 1938 bne = (void *) data + (written << 9); 1939 i = &bne->keys; 1940 1941 if (!ptr_written && i->seq != data->keys.seq) 1942 break; 1943 } 1944 1945 struct nonce nonce = btree_nonce(i, written << 9); 1946 bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)); 1947 1948 if (first) { 1949 if (good_csum_type) { 1950 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, data); 1951 if (bch2_crc_cmp(data->csum, csum)) { 1952 bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), data->csum, csum); 1953 return false; 1954 } 1955 } 1956 1957 written += vstruct_sectors(data, c->block_bits); 1958 } else { 1959 if (good_csum_type) { 1960 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); 1961 if (bch2_crc_cmp(bne->csum, csum)) { 1962 bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), bne->csum, csum); 1963 return false; 1964 } 1965 } 1966 1967 written += vstruct_sectors(bne, c->block_bits); 1968 } 1969 } 1970 1971 return true; 1972 } 1973 1974 static void btree_node_scrub_work(struct work_struct *work) 1975 { 1976 struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work); 1977 struct bch_fs *c = scrub->c; 1978 struct printbuf err = PRINTBUF; 1979 1980 __bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level, 1981 bkey_i_to_s_c(scrub->key.k)); 1982 prt_newline(&err); 1983 1984 if (!btree_node_scrub_check(c, scrub->buf, scrub->written, &err)) { 1985 struct btree_trans *trans = bch2_trans_get(c); 1986 1987 struct btree_iter iter; 1988 bch2_trans_node_iter_init(trans, &iter, scrub->btree, 1989 scrub->key.k->k.p, 0, scrub->level - 1, 0); 1990 1991 struct btree *b; 1992 int ret = lockrestart_do(trans, 1993 PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(trans, &iter))); 1994 if (ret) 1995 goto err; 1996 1997 if (bkey_i_to_btree_ptr_v2(&b->key)->v.seq == scrub->seq) { 1998 bch_err(c, "error validating btree node during scrub on %s at btree %s", 1999 scrub->ca->name, err.buf); 2000 2001 ret = bch2_btree_node_rewrite(trans, &iter, b, 0, 0); 2002 } 2003 err: 2004 bch2_trans_iter_exit(trans, &iter); 2005 bch2_trans_begin(trans); 2006 bch2_trans_put(trans); 2007 } 2008 2009 printbuf_exit(&err); 2010 bch2_bkey_buf_exit(&scrub->key, c);; 2011 btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf); 2012 enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); 2013 kfree(scrub); 2014 enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub); 2015 } 2016 2017 static void btree_node_scrub_endio(struct bio *bio) 2018 { 2019 struct btree_node_scrub *scrub = container_of(bio, struct btree_node_scrub, bio); 2020 2021 queue_work(scrub->c->btree_read_complete_wq, &scrub->work); 2022 } 2023 2024 int bch2_btree_node_scrub(struct btree_trans *trans, 2025 enum btree_id btree, unsigned level, 2026 struct bkey_s_c k, unsigned dev) 2027 { 2028 if (k.k->type != KEY_TYPE_btree_ptr_v2) 2029 return 0; 2030 2031 struct bch_fs *c = trans->c; 2032 2033 if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_node_scrub)) 2034 return bch_err_throw(c, erofs_no_writes); 2035 2036 struct extent_ptr_decoded pick; 2037 int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev); 2038 if (ret <= 0) 2039 goto err; 2040 2041 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, 2042 BCH_DEV_READ_REF_btree_node_scrub); 2043 if (!ca) { 2044 ret = bch_err_throw(c, device_offline); 2045 goto err; 2046 } 2047 2048 bool used_mempool = false; 2049 void *buf = btree_bounce_alloc(c, c->opts.btree_node_size, &used_mempool); 2050 2051 unsigned vecs = buf_pages(buf, c->opts.btree_node_size); 2052 2053 struct btree_node_scrub *scrub = 2054 kzalloc(sizeof(*scrub) + sizeof(struct bio_vec) * vecs, GFP_KERNEL); 2055 if (!scrub) { 2056 ret = -ENOMEM; 2057 goto err_free; 2058 } 2059 2060 scrub->c = c; 2061 scrub->ca = ca; 2062 scrub->buf = buf; 2063 scrub->used_mempool = used_mempool; 2064 scrub->written = btree_ptr_sectors_written(k); 2065 2066 scrub->btree = btree; 2067 scrub->level = level; 2068 bch2_bkey_buf_init(&scrub->key); 2069 bch2_bkey_buf_reassemble(&scrub->key, c, k); 2070 scrub->seq = bkey_s_c_to_btree_ptr_v2(k).v->seq; 2071 2072 INIT_WORK(&scrub->work, btree_node_scrub_work); 2073 2074 bio_init(&scrub->bio, ca->disk_sb.bdev, scrub->bio.bi_inline_vecs, vecs, REQ_OP_READ); 2075 bch2_bio_map(&scrub->bio, scrub->buf, c->opts.btree_node_size); 2076 scrub->bio.bi_iter.bi_sector = pick.ptr.offset; 2077 scrub->bio.bi_end_io = btree_node_scrub_endio; 2078 submit_bio(&scrub->bio); 2079 return 0; 2080 err_free: 2081 btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf); 2082 enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); 2083 err: 2084 enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub); 2085 return ret; 2086 } 2087 2088 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, 2089 struct btree_write *w) 2090 { 2091 unsigned long old, new; 2092 2093 old = READ_ONCE(b->will_make_reachable); 2094 do { 2095 new = old; 2096 if (!(old & 1)) 2097 break; 2098 2099 new &= ~1UL; 2100 } while (!try_cmpxchg(&b->will_make_reachable, &old, new)); 2101 2102 if (old & 1) 2103 closure_put(&((struct btree_update *) new)->cl); 2104 2105 bch2_journal_pin_drop(&c->journal, &w->journal); 2106 } 2107 2108 static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time) 2109 { 2110 struct btree_write *w = btree_prev_write(b); 2111 unsigned long old, new; 2112 unsigned type = 0; 2113 2114 bch2_btree_complete_write(c, b, w); 2115 2116 if (start_time) 2117 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_write], start_time); 2118 2119 old = READ_ONCE(b->flags); 2120 do { 2121 new = old; 2122 2123 if ((old & (1U << BTREE_NODE_dirty)) && 2124 (old & (1U << BTREE_NODE_need_write)) && 2125 !(old & (1U << BTREE_NODE_never_write)) && 2126 !(old & (1U << BTREE_NODE_write_blocked)) && 2127 !(old & (1U << BTREE_NODE_will_make_reachable))) { 2128 new &= ~(1U << BTREE_NODE_dirty); 2129 new &= ~(1U << BTREE_NODE_need_write); 2130 new |= (1U << BTREE_NODE_write_in_flight); 2131 new |= (1U << BTREE_NODE_write_in_flight_inner); 2132 new |= (1U << BTREE_NODE_just_written); 2133 new ^= (1U << BTREE_NODE_write_idx); 2134 2135 type = new & BTREE_WRITE_TYPE_MASK; 2136 new &= ~BTREE_WRITE_TYPE_MASK; 2137 } else { 2138 new &= ~(1U << BTREE_NODE_write_in_flight); 2139 new &= ~(1U << BTREE_NODE_write_in_flight_inner); 2140 } 2141 } while (!try_cmpxchg(&b->flags, &old, new)); 2142 2143 if (new & (1U << BTREE_NODE_write_in_flight)) 2144 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type); 2145 else { 2146 smp_mb__after_atomic(); 2147 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); 2148 } 2149 } 2150 2151 static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time) 2152 { 2153 struct btree_trans *trans = bch2_trans_get(c); 2154 2155 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 2156 2157 /* we don't need transaction context anymore after we got the lock. */ 2158 bch2_trans_put(trans); 2159 __btree_node_write_done(c, b, start_time); 2160 six_unlock_read(&b->c.lock); 2161 } 2162 2163 static void btree_node_write_work(struct work_struct *work) 2164 { 2165 struct btree_write_bio *wbio = 2166 container_of(work, struct btree_write_bio, work); 2167 struct bch_fs *c = wbio->wbio.c; 2168 struct btree *b = wbio->wbio.bio.bi_private; 2169 u64 start_time = wbio->start_time; 2170 int ret = 0; 2171 2172 btree_bounce_free(c, 2173 wbio->data_bytes, 2174 wbio->wbio.used_mempool, 2175 wbio->data); 2176 2177 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr, 2178 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); 2179 2180 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) { 2181 ret = bch_err_throw(c, btree_node_write_all_failed); 2182 goto err; 2183 } 2184 2185 if (wbio->wbio.first_btree_write) { 2186 if (wbio->wbio.failed.nr) { 2187 2188 } 2189 } else { 2190 ret = bch2_trans_do(c, 2191 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, 2192 BCH_WATERMARK_interior_updates| 2193 BCH_TRANS_COMMIT_journal_reclaim| 2194 BCH_TRANS_COMMIT_no_enospc| 2195 BCH_TRANS_COMMIT_no_check_rw, 2196 !wbio->wbio.failed.nr)); 2197 if (ret) 2198 goto err; 2199 } 2200 out: 2201 async_object_list_del(c, btree_write_bio, wbio->list_idx); 2202 bio_put(&wbio->wbio.bio); 2203 btree_node_write_done(c, b, start_time); 2204 return; 2205 err: 2206 set_btree_node_noevict(b); 2207 2208 if (!bch2_err_matches(ret, EROFS)) { 2209 struct printbuf buf = PRINTBUF; 2210 prt_printf(&buf, "writing btree node: %s\n ", bch2_err_str(ret)); 2211 bch2_btree_pos_to_text(&buf, c, b); 2212 bch2_fs_fatal_error(c, "%s", buf.buf); 2213 printbuf_exit(&buf); 2214 } 2215 goto out; 2216 } 2217 2218 static void btree_node_write_endio(struct bio *bio) 2219 { 2220 struct bch_write_bio *wbio = to_wbio(bio); 2221 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL; 2222 struct bch_write_bio *orig = parent ?: wbio; 2223 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio); 2224 struct bch_fs *c = wbio->c; 2225 struct btree *b = wbio->bio.bi_private; 2226 struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL; 2227 2228 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write, 2229 wbio->submit_time, !bio->bi_status); 2230 2231 if (ca && bio->bi_status) { 2232 struct printbuf buf = PRINTBUF; 2233 buf.atomic++; 2234 prt_printf(&buf, "btree write error: %s\n ", 2235 bch2_blk_status_to_str(bio->bi_status)); 2236 bch2_btree_pos_to_text(&buf, c, b); 2237 bch_err_dev_ratelimited(ca, "%s", buf.buf); 2238 printbuf_exit(&buf); 2239 } 2240 2241 if (bio->bi_status) { 2242 unsigned long flags; 2243 spin_lock_irqsave(&c->btree_write_error_lock, flags); 2244 bch2_dev_list_add_dev(&orig->failed, wbio->dev); 2245 spin_unlock_irqrestore(&c->btree_write_error_lock, flags); 2246 } 2247 2248 /* 2249 * XXX: we should be using io_ref[WRITE], but we aren't retrying failed 2250 * btree writes yet (due to device removal/ro): 2251 */ 2252 if (wbio->have_ioref) 2253 enumerated_ref_put(&ca->io_ref[READ], 2254 BCH_DEV_READ_REF_btree_node_write); 2255 2256 if (parent) { 2257 bio_put(bio); 2258 bio_endio(&parent->bio); 2259 return; 2260 } 2261 2262 clear_btree_node_write_in_flight_inner(b); 2263 smp_mb__after_atomic(); 2264 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); 2265 INIT_WORK(&wb->work, btree_node_write_work); 2266 queue_work(c->btree_write_complete_wq, &wb->work); 2267 } 2268 2269 static int validate_bset_for_write(struct bch_fs *c, struct btree *b, 2270 struct bset *i, unsigned sectors) 2271 { 2272 int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key), 2273 (struct bkey_validate_context) { 2274 .from = BKEY_VALIDATE_btree_node, 2275 .level = b->c.level + 1, 2276 .btree = b->c.btree_id, 2277 .flags = BCH_VALIDATE_write, 2278 }); 2279 if (ret) { 2280 bch2_fs_inconsistent(c, "invalid btree node key before write"); 2281 return ret; 2282 } 2283 2284 ret = validate_bset_keys(c, b, i, WRITE, NULL, NULL) ?: 2285 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, NULL, NULL); 2286 if (ret) { 2287 bch2_inconsistent_error(c); 2288 dump_stack(); 2289 } 2290 2291 return ret; 2292 } 2293 2294 static void btree_write_submit(struct work_struct *work) 2295 { 2296 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); 2297 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; 2298 2299 bkey_copy(&tmp.k, &wbio->key); 2300 2301 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr) 2302 ptr->offset += wbio->sector_offset; 2303 2304 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, 2305 &tmp.k, false); 2306 } 2307 2308 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) 2309 { 2310 struct btree_write_bio *wbio; 2311 struct bset *i; 2312 struct btree_node *bn = NULL; 2313 struct btree_node_entry *bne = NULL; 2314 struct sort_iter_stack sort_iter; 2315 struct nonce nonce; 2316 unsigned bytes_to_write, sectors_to_write, bytes, u64s; 2317 u64 seq = 0; 2318 bool used_mempool; 2319 unsigned long old, new; 2320 bool validate_before_checksum = false; 2321 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK; 2322 void *data; 2323 u64 start_time = local_clock(); 2324 int ret; 2325 2326 if (flags & BTREE_WRITE_ALREADY_STARTED) 2327 goto do_write; 2328 2329 /* 2330 * We may only have a read lock on the btree node - the dirty bit is our 2331 * "lock" against racing with other threads that may be trying to start 2332 * a write, we do a write iff we clear the dirty bit. Since setting the 2333 * dirty bit requires a write lock, we can't race with other threads 2334 * redirtying it: 2335 */ 2336 old = READ_ONCE(b->flags); 2337 do { 2338 new = old; 2339 2340 if (!(old & (1 << BTREE_NODE_dirty))) 2341 return; 2342 2343 if ((flags & BTREE_WRITE_ONLY_IF_NEED) && 2344 !(old & (1 << BTREE_NODE_need_write))) 2345 return; 2346 2347 if (old & 2348 ((1 << BTREE_NODE_never_write)| 2349 (1 << BTREE_NODE_write_blocked))) 2350 return; 2351 2352 if (b->written && 2353 (old & (1 << BTREE_NODE_will_make_reachable))) 2354 return; 2355 2356 if (old & (1 << BTREE_NODE_write_in_flight)) 2357 return; 2358 2359 if (flags & BTREE_WRITE_ONLY_IF_NEED) 2360 type = new & BTREE_WRITE_TYPE_MASK; 2361 new &= ~BTREE_WRITE_TYPE_MASK; 2362 2363 new &= ~(1 << BTREE_NODE_dirty); 2364 new &= ~(1 << BTREE_NODE_need_write); 2365 new |= (1 << BTREE_NODE_write_in_flight); 2366 new |= (1 << BTREE_NODE_write_in_flight_inner); 2367 new |= (1 << BTREE_NODE_just_written); 2368 new ^= (1 << BTREE_NODE_write_idx); 2369 } while (!try_cmpxchg_acquire(&b->flags, &old, new)); 2370 2371 if (new & (1U << BTREE_NODE_need_write)) 2372 return; 2373 do_write: 2374 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); 2375 2376 atomic_long_dec(&c->btree_cache.nr_dirty); 2377 2378 BUG_ON(btree_node_fake(b)); 2379 BUG_ON((b->will_make_reachable != 0) != !b->written); 2380 2381 BUG_ON(b->written >= btree_sectors(c)); 2382 BUG_ON(b->written & (block_sectors(c) - 1)); 2383 BUG_ON(bset_written(b, btree_bset_last(b))); 2384 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); 2385 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); 2386 2387 bch2_sort_whiteouts(c, b); 2388 2389 sort_iter_stack_init(&sort_iter, b); 2390 2391 bytes = !b->written 2392 ? sizeof(struct btree_node) 2393 : sizeof(struct btree_node_entry); 2394 2395 bytes += b->whiteout_u64s * sizeof(u64); 2396 2397 for_each_bset(b, t) { 2398 i = bset(b, t); 2399 2400 if (bset_written(b, i)) 2401 continue; 2402 2403 bytes += le16_to_cpu(i->u64s) * sizeof(u64); 2404 sort_iter_add(&sort_iter.iter, 2405 btree_bkey_first(b, t), 2406 btree_bkey_last(b, t)); 2407 seq = max(seq, le64_to_cpu(i->journal_seq)); 2408 } 2409 2410 BUG_ON(b->written && !seq); 2411 2412 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */ 2413 bytes += 8; 2414 2415 /* buffer must be a multiple of the block size */ 2416 bytes = round_up(bytes, block_bytes(c)); 2417 2418 data = btree_bounce_alloc(c, bytes, &used_mempool); 2419 2420 if (!b->written) { 2421 bn = data; 2422 *bn = *b->data; 2423 i = &bn->keys; 2424 } else { 2425 bne = data; 2426 bne->keys = b->data->keys; 2427 i = &bne->keys; 2428 } 2429 2430 i->journal_seq = cpu_to_le64(seq); 2431 i->u64s = 0; 2432 2433 sort_iter_add(&sort_iter.iter, 2434 unwritten_whiteouts_start(b), 2435 unwritten_whiteouts_end(b)); 2436 SET_BSET_SEPARATE_WHITEOUTS(i, false); 2437 2438 u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter); 2439 le16_add_cpu(&i->u64s, u64s); 2440 2441 b->whiteout_u64s = 0; 2442 2443 BUG_ON(!b->written && i->u64s != b->data->keys.u64s); 2444 2445 set_needs_whiteout(i, false); 2446 2447 /* do we have data to write? */ 2448 if (b->written && !i->u64s) 2449 goto nowrite; 2450 2451 bytes_to_write = vstruct_end(i) - data; 2452 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9; 2453 2454 if (!b->written && 2455 b->key.k.type == KEY_TYPE_btree_ptr_v2) 2456 BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write); 2457 2458 memset(data + bytes_to_write, 0, 2459 (sectors_to_write << 9) - bytes_to_write); 2460 2461 BUG_ON(b->written + sectors_to_write > btree_sectors(c)); 2462 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN); 2463 BUG_ON(i->seq != b->data->keys.seq); 2464 2465 i->version = cpu_to_le16(c->sb.version); 2466 SET_BSET_OFFSET(i, b->written); 2467 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c)); 2468 2469 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))) 2470 validate_before_checksum = true; 2471 2472 /* validate_bset will be modifying: */ 2473 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current) 2474 validate_before_checksum = true; 2475 2476 /* if we're going to be encrypting, check metadata validity first: */ 2477 if (validate_before_checksum && 2478 validate_bset_for_write(c, b, i, sectors_to_write)) 2479 goto err; 2480 2481 ret = bset_encrypt(c, i, b->written << 9); 2482 if (bch2_fs_fatal_err_on(ret, c, 2483 "encrypting btree node: %s", bch2_err_str(ret))) 2484 goto err; 2485 2486 nonce = btree_nonce(i, b->written << 9); 2487 2488 if (bn) 2489 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn); 2490 else 2491 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); 2492 2493 /* if we're not encrypting, check metadata after checksumming: */ 2494 if (!validate_before_checksum && 2495 validate_bset_for_write(c, b, i, sectors_to_write)) 2496 goto err; 2497 2498 /* 2499 * We handle btree write errors by immediately halting the journal - 2500 * after we've done that, we can't issue any subsequent btree writes 2501 * because they might have pointers to new nodes that failed to write. 2502 * 2503 * Furthermore, there's no point in doing any more btree writes because 2504 * with the journal stopped, we're never going to update the journal to 2505 * reflect that those writes were done and the data flushed from the 2506 * journal: 2507 * 2508 * Also on journal error, the pending write may have updates that were 2509 * never journalled (interior nodes, see btree_update_nodes_written()) - 2510 * it's critical that we don't do the write in that case otherwise we 2511 * will have updates visible that weren't in the journal: 2512 * 2513 * Make sure to update b->written so bch2_btree_init_next() doesn't 2514 * break: 2515 */ 2516 if (bch2_journal_error(&c->journal) || 2517 c->opts.nochanges) 2518 goto err; 2519 2520 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); 2521 2522 wbio = container_of(bio_alloc_bioset(NULL, 2523 buf_pages(data, sectors_to_write << 9), 2524 REQ_OP_WRITE|REQ_META, 2525 GFP_NOFS, 2526 &c->btree_bio), 2527 struct btree_write_bio, wbio.bio); 2528 wbio_init(&wbio->wbio.bio); 2529 wbio->data = data; 2530 wbio->data_bytes = bytes; 2531 wbio->sector_offset = b->written; 2532 wbio->start_time = start_time; 2533 wbio->wbio.c = c; 2534 wbio->wbio.used_mempool = used_mempool; 2535 wbio->wbio.first_btree_write = !b->written; 2536 wbio->wbio.bio.bi_end_io = btree_node_write_endio; 2537 wbio->wbio.bio.bi_private = b; 2538 2539 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9); 2540 2541 bkey_copy(&wbio->key, &b->key); 2542 2543 b->written += sectors_to_write; 2544 2545 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2) 2546 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written = 2547 cpu_to_le16(b->written); 2548 2549 atomic64_inc(&c->btree_write_stats[type].nr); 2550 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); 2551 2552 async_object_list_add(c, btree_write_bio, wbio, &wbio->list_idx); 2553 2554 INIT_WORK(&wbio->work, btree_write_submit); 2555 queue_work(c->btree_write_submit_wq, &wbio->work); 2556 return; 2557 err: 2558 set_btree_node_noevict(b); 2559 b->written += sectors_to_write; 2560 nowrite: 2561 btree_bounce_free(c, bytes, used_mempool, data); 2562 __btree_node_write_done(c, b, 0); 2563 } 2564 2565 /* 2566 * Work that must be done with write lock held: 2567 */ 2568 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) 2569 { 2570 bool invalidated_iter = false; 2571 struct btree_node_entry *bne; 2572 2573 if (!btree_node_just_written(b)) 2574 return false; 2575 2576 BUG_ON(b->whiteout_u64s); 2577 2578 clear_btree_node_just_written(b); 2579 2580 /* 2581 * Note: immediately after write, bset_written() doesn't work - the 2582 * amount of data we had to write after compaction might have been 2583 * smaller than the offset of the last bset. 2584 * 2585 * However, we know that all bsets have been written here, as long as 2586 * we're still holding the write lock: 2587 */ 2588 2589 /* 2590 * XXX: decide if we really want to unconditionally sort down to a 2591 * single bset: 2592 */ 2593 if (b->nsets > 1) { 2594 btree_node_sort(c, b, 0, b->nsets); 2595 invalidated_iter = true; 2596 } else { 2597 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL); 2598 } 2599 2600 for_each_bset(b, t) 2601 set_needs_whiteout(bset(b, t), true); 2602 2603 bch2_btree_verify(c, b); 2604 2605 /* 2606 * If later we don't unconditionally sort down to a single bset, we have 2607 * to ensure this is still true: 2608 */ 2609 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b)); 2610 2611 bne = want_new_bset(c, b); 2612 if (bne) 2613 bch2_bset_init_next(b, bne); 2614 2615 bch2_btree_build_aux_trees(b); 2616 2617 return invalidated_iter; 2618 } 2619 2620 /* 2621 * Use this one if the node is intent locked: 2622 */ 2623 void bch2_btree_node_write(struct bch_fs *c, struct btree *b, 2624 enum six_lock_type lock_type_held, 2625 unsigned flags) 2626 { 2627 if (lock_type_held == SIX_LOCK_intent || 2628 (lock_type_held == SIX_LOCK_read && 2629 six_lock_tryupgrade(&b->c.lock))) { 2630 __bch2_btree_node_write(c, b, flags); 2631 2632 /* don't cycle lock unnecessarily: */ 2633 if (btree_node_just_written(b) && 2634 six_trylock_write(&b->c.lock)) { 2635 bch2_btree_post_write_cleanup(c, b); 2636 six_unlock_write(&b->c.lock); 2637 } 2638 2639 if (lock_type_held == SIX_LOCK_read) 2640 six_lock_downgrade(&b->c.lock); 2641 } else { 2642 __bch2_btree_node_write(c, b, flags); 2643 if (lock_type_held == SIX_LOCK_write && 2644 btree_node_just_written(b)) 2645 bch2_btree_post_write_cleanup(c, b); 2646 } 2647 } 2648 2649 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b, 2650 enum six_lock_type lock_type_held, 2651 unsigned flags) 2652 { 2653 struct bch_fs *c = trans->c; 2654 2655 if (lock_type_held == SIX_LOCK_intent || 2656 (lock_type_held == SIX_LOCK_read && 2657 six_lock_tryupgrade(&b->c.lock))) { 2658 __bch2_btree_node_write(c, b, flags); 2659 2660 /* don't cycle lock unnecessarily: */ 2661 if (btree_node_just_written(b) && 2662 six_trylock_write(&b->c.lock)) { 2663 bch2_btree_post_write_cleanup(c, b); 2664 __bch2_btree_node_unlock_write(trans, b); 2665 } 2666 2667 if (lock_type_held == SIX_LOCK_read) 2668 six_lock_downgrade(&b->c.lock); 2669 } else { 2670 __bch2_btree_node_write(c, b, flags); 2671 if (lock_type_held == SIX_LOCK_write && 2672 btree_node_just_written(b)) 2673 bch2_btree_post_write_cleanup(c, b); 2674 } 2675 } 2676 2677 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag) 2678 { 2679 struct bucket_table *tbl; 2680 struct rhash_head *pos; 2681 struct btree *b; 2682 unsigned i; 2683 bool ret = false; 2684 restart: 2685 rcu_read_lock(); 2686 for_each_cached_btree(b, c, tbl, i, pos) 2687 if (test_bit(flag, &b->flags)) { 2688 rcu_read_unlock(); 2689 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE); 2690 ret = true; 2691 goto restart; 2692 } 2693 rcu_read_unlock(); 2694 2695 return ret; 2696 } 2697 2698 bool bch2_btree_flush_all_reads(struct bch_fs *c) 2699 { 2700 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight); 2701 } 2702 2703 bool bch2_btree_flush_all_writes(struct bch_fs *c) 2704 { 2705 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight); 2706 } 2707 2708 static const char * const bch2_btree_write_types[] = { 2709 #define x(t, n) [n] = #t, 2710 BCH_BTREE_WRITE_TYPES() 2711 NULL 2712 }; 2713 2714 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c) 2715 { 2716 printbuf_tabstop_push(out, 20); 2717 printbuf_tabstop_push(out, 10); 2718 2719 prt_printf(out, "\tnr\tsize\n"); 2720 2721 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) { 2722 u64 nr = atomic64_read(&c->btree_write_stats[i].nr); 2723 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes); 2724 2725 prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr); 2726 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0); 2727 prt_newline(out); 2728 } 2729 } 2730