1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "bkey_methods.h" 5 #include "bkey_sort.h" 6 #include "btree_cache.h" 7 #include "btree_io.h" 8 #include "btree_iter.h" 9 #include "btree_locking.h" 10 #include "btree_update.h" 11 #include "btree_update_interior.h" 12 #include "buckets.h" 13 #include "checksum.h" 14 #include "debug.h" 15 #include "error.h" 16 #include "extents.h" 17 #include "io_write.h" 18 #include "journal_reclaim.h" 19 #include "journal_seq_blacklist.h" 20 #include "recovery.h" 21 #include "super-io.h" 22 #include "trace.h" 23 24 #include <linux/sched/mm.h> 25 26 void bch2_btree_node_io_unlock(struct btree *b) 27 { 28 EBUG_ON(!btree_node_write_in_flight(b)); 29 30 clear_btree_node_write_in_flight_inner(b); 31 clear_btree_node_write_in_flight(b); 32 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); 33 } 34 35 void bch2_btree_node_io_lock(struct btree *b) 36 { 37 bch2_assert_btree_nodes_not_locked(); 38 39 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, 40 TASK_UNINTERRUPTIBLE); 41 } 42 43 void __bch2_btree_node_wait_on_read(struct btree *b) 44 { 45 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, 46 TASK_UNINTERRUPTIBLE); 47 } 48 49 void __bch2_btree_node_wait_on_write(struct btree *b) 50 { 51 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, 52 TASK_UNINTERRUPTIBLE); 53 } 54 55 void bch2_btree_node_wait_on_read(struct btree *b) 56 { 57 bch2_assert_btree_nodes_not_locked(); 58 59 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, 60 TASK_UNINTERRUPTIBLE); 61 } 62 63 void bch2_btree_node_wait_on_write(struct btree *b) 64 { 65 bch2_assert_btree_nodes_not_locked(); 66 67 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, 68 TASK_UNINTERRUPTIBLE); 69 } 70 71 static void verify_no_dups(struct btree *b, 72 struct bkey_packed *start, 73 struct bkey_packed *end) 74 { 75 #ifdef CONFIG_BCACHEFS_DEBUG 76 struct bkey_packed *k, *p; 77 78 if (start == end) 79 return; 80 81 for (p = start, k = bkey_p_next(start); 82 k != end; 83 p = k, k = bkey_p_next(k)) { 84 struct bkey l = bkey_unpack_key(b, p); 85 struct bkey r = bkey_unpack_key(b, k); 86 87 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r))); 88 } 89 #endif 90 } 91 92 static void set_needs_whiteout(struct bset *i, int v) 93 { 94 struct bkey_packed *k; 95 96 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 97 k->needs_whiteout = v; 98 } 99 100 static void btree_bounce_free(struct bch_fs *c, size_t size, 101 bool used_mempool, void *p) 102 { 103 if (used_mempool) 104 mempool_free(p, &c->btree_bounce_pool); 105 else 106 vpfree(p, size); 107 } 108 109 static void *btree_bounce_alloc(struct bch_fs *c, size_t size, 110 bool *used_mempool) 111 { 112 unsigned flags = memalloc_nofs_save(); 113 void *p; 114 115 BUG_ON(size > btree_bytes(c)); 116 117 *used_mempool = false; 118 p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); 119 if (!p) { 120 *used_mempool = true; 121 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); 122 } 123 memalloc_nofs_restore(flags); 124 return p; 125 } 126 127 static void sort_bkey_ptrs(const struct btree *bt, 128 struct bkey_packed **ptrs, unsigned nr) 129 { 130 unsigned n = nr, a = nr / 2, b, c, d; 131 132 if (!a) 133 return; 134 135 /* Heap sort: see lib/sort.c: */ 136 while (1) { 137 if (a) 138 a--; 139 else if (--n) 140 swap(ptrs[0], ptrs[n]); 141 else 142 break; 143 144 for (b = a; c = 2 * b + 1, (d = c + 1) < n;) 145 b = bch2_bkey_cmp_packed(bt, 146 ptrs[c], 147 ptrs[d]) >= 0 ? c : d; 148 if (d == n) 149 b = c; 150 151 while (b != a && 152 bch2_bkey_cmp_packed(bt, 153 ptrs[a], 154 ptrs[b]) >= 0) 155 b = (b - 1) / 2; 156 c = b; 157 while (b != a) { 158 b = (b - 1) / 2; 159 swap(ptrs[b], ptrs[c]); 160 } 161 } 162 } 163 164 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) 165 { 166 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k; 167 bool used_mempool = false; 168 size_t bytes = b->whiteout_u64s * sizeof(u64); 169 170 if (!b->whiteout_u64s) 171 return; 172 173 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool); 174 175 ptrs = ptrs_end = ((void *) new_whiteouts + bytes); 176 177 for (k = unwritten_whiteouts_start(c, b); 178 k != unwritten_whiteouts_end(c, b); 179 k = bkey_p_next(k)) 180 *--ptrs = k; 181 182 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); 183 184 k = new_whiteouts; 185 186 while (ptrs != ptrs_end) { 187 bkey_p_copy(k, *ptrs); 188 k = bkey_p_next(k); 189 ptrs++; 190 } 191 192 verify_no_dups(b, new_whiteouts, 193 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s)); 194 195 memcpy_u64s(unwritten_whiteouts_start(c, b), 196 new_whiteouts, b->whiteout_u64s); 197 198 btree_bounce_free(c, bytes, used_mempool, new_whiteouts); 199 } 200 201 static bool should_compact_bset(struct btree *b, struct bset_tree *t, 202 bool compacting, enum compact_mode mode) 203 { 204 if (!bset_dead_u64s(b, t)) 205 return false; 206 207 switch (mode) { 208 case COMPACT_LAZY: 209 return should_compact_bset_lazy(b, t) || 210 (compacting && !bset_written(b, bset(b, t))); 211 case COMPACT_ALL: 212 return true; 213 default: 214 BUG(); 215 } 216 } 217 218 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) 219 { 220 struct bset_tree *t; 221 bool ret = false; 222 223 for_each_bset(b, t) { 224 struct bset *i = bset(b, t); 225 struct bkey_packed *k, *n, *out, *start, *end; 226 struct btree_node_entry *src = NULL, *dst = NULL; 227 228 if (t != b->set && !bset_written(b, i)) { 229 src = container_of(i, struct btree_node_entry, keys); 230 dst = max(write_block(b), 231 (void *) btree_bkey_last(b, t - 1)); 232 } 233 234 if (src != dst) 235 ret = true; 236 237 if (!should_compact_bset(b, t, ret, mode)) { 238 if (src != dst) { 239 memmove(dst, src, sizeof(*src) + 240 le16_to_cpu(src->keys.u64s) * 241 sizeof(u64)); 242 i = &dst->keys; 243 set_btree_bset(b, t, i); 244 } 245 continue; 246 } 247 248 start = btree_bkey_first(b, t); 249 end = btree_bkey_last(b, t); 250 251 if (src != dst) { 252 memmove(dst, src, sizeof(*src)); 253 i = &dst->keys; 254 set_btree_bset(b, t, i); 255 } 256 257 out = i->start; 258 259 for (k = start; k != end; k = n) { 260 n = bkey_p_next(k); 261 262 if (!bkey_deleted(k)) { 263 bkey_p_copy(out, k); 264 out = bkey_p_next(out); 265 } else { 266 BUG_ON(k->needs_whiteout); 267 } 268 } 269 270 i->u64s = cpu_to_le16((u64 *) out - i->_data); 271 set_btree_bset_end(b, t); 272 bch2_bset_set_no_aux_tree(b, t); 273 ret = true; 274 } 275 276 bch2_verify_btree_nr_keys(b); 277 278 bch2_btree_build_aux_trees(b); 279 280 return ret; 281 } 282 283 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, 284 enum compact_mode mode) 285 { 286 return bch2_drop_whiteouts(b, mode); 287 } 288 289 static void btree_node_sort(struct bch_fs *c, struct btree *b, 290 unsigned start_idx, 291 unsigned end_idx, 292 bool filter_whiteouts) 293 { 294 struct btree_node *out; 295 struct sort_iter_stack sort_iter; 296 struct bset_tree *t; 297 struct bset *start_bset = bset(b, &b->set[start_idx]); 298 bool used_mempool = false; 299 u64 start_time, seq = 0; 300 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1; 301 bool sorting_entire_node = start_idx == 0 && 302 end_idx == b->nsets; 303 304 sort_iter_stack_init(&sort_iter, b); 305 306 for (t = b->set + start_idx; 307 t < b->set + end_idx; 308 t++) { 309 u64s += le16_to_cpu(bset(b, t)->u64s); 310 sort_iter_add(&sort_iter.iter, 311 btree_bkey_first(b, t), 312 btree_bkey_last(b, t)); 313 } 314 315 bytes = sorting_entire_node 316 ? btree_bytes(c) 317 : __vstruct_bytes(struct btree_node, u64s); 318 319 out = btree_bounce_alloc(c, bytes, &used_mempool); 320 321 start_time = local_clock(); 322 323 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts); 324 325 out->keys.u64s = cpu_to_le16(u64s); 326 327 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes); 328 329 if (sorting_entire_node) 330 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], 331 start_time); 332 333 /* Make sure we preserve bset journal_seq: */ 334 for (t = b->set + start_idx; t < b->set + end_idx; t++) 335 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq)); 336 start_bset->journal_seq = cpu_to_le64(seq); 337 338 if (sorting_entire_node) { 339 u64s = le16_to_cpu(out->keys.u64s); 340 341 BUG_ON(bytes != btree_bytes(c)); 342 343 /* 344 * Our temporary buffer is the same size as the btree node's 345 * buffer, we can just swap buffers instead of doing a big 346 * memcpy() 347 */ 348 *out = *b->data; 349 out->keys.u64s = cpu_to_le16(u64s); 350 swap(out, b->data); 351 set_btree_bset(b, b->set, &b->data->keys); 352 } else { 353 start_bset->u64s = out->keys.u64s; 354 memcpy_u64s(start_bset->start, 355 out->keys.start, 356 le16_to_cpu(out->keys.u64s)); 357 } 358 359 for (i = start_idx + 1; i < end_idx; i++) 360 b->nr.bset_u64s[start_idx] += 361 b->nr.bset_u64s[i]; 362 363 b->nsets -= shift; 364 365 for (i = start_idx + 1; i < b->nsets; i++) { 366 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift]; 367 b->set[i] = b->set[i + shift]; 368 } 369 370 for (i = b->nsets; i < MAX_BSETS; i++) 371 b->nr.bset_u64s[i] = 0; 372 373 set_btree_bset_end(b, &b->set[start_idx]); 374 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]); 375 376 btree_bounce_free(c, bytes, used_mempool, out); 377 378 bch2_verify_btree_nr_keys(b); 379 } 380 381 void bch2_btree_sort_into(struct bch_fs *c, 382 struct btree *dst, 383 struct btree *src) 384 { 385 struct btree_nr_keys nr; 386 struct btree_node_iter src_iter; 387 u64 start_time = local_clock(); 388 389 BUG_ON(dst->nsets != 1); 390 391 bch2_bset_set_no_aux_tree(dst, dst->set); 392 393 bch2_btree_node_iter_init_from_start(&src_iter, src); 394 395 nr = bch2_sort_repack(btree_bset_first(dst), 396 src, &src_iter, 397 &dst->format, 398 true); 399 400 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], 401 start_time); 402 403 set_btree_bset_end(dst, dst->set); 404 405 dst->nr.live_u64s += nr.live_u64s; 406 dst->nr.bset_u64s[0] += nr.bset_u64s[0]; 407 dst->nr.packed_keys += nr.packed_keys; 408 dst->nr.unpacked_keys += nr.unpacked_keys; 409 410 bch2_verify_btree_nr_keys(dst); 411 } 412 413 /* 414 * We're about to add another bset to the btree node, so if there's currently 415 * too many bsets - sort some of them together: 416 */ 417 static bool btree_node_compact(struct bch_fs *c, struct btree *b) 418 { 419 unsigned unwritten_idx; 420 bool ret = false; 421 422 for (unwritten_idx = 0; 423 unwritten_idx < b->nsets; 424 unwritten_idx++) 425 if (!bset_written(b, bset(b, &b->set[unwritten_idx]))) 426 break; 427 428 if (b->nsets - unwritten_idx > 1) { 429 btree_node_sort(c, b, unwritten_idx, 430 b->nsets, false); 431 ret = true; 432 } 433 434 if (unwritten_idx > 1) { 435 btree_node_sort(c, b, 0, unwritten_idx, false); 436 ret = true; 437 } 438 439 return ret; 440 } 441 442 void bch2_btree_build_aux_trees(struct btree *b) 443 { 444 struct bset_tree *t; 445 446 for_each_bset(b, t) 447 bch2_bset_build_aux_tree(b, t, 448 !bset_written(b, bset(b, t)) && 449 t == bset_tree_last(b)); 450 } 451 452 /* 453 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one? 454 * 455 * The first bset is going to be of similar order to the size of the node, the 456 * last bset is bounded by btree_write_set_buffer(), which is set to keep the 457 * memmove on insert from being too expensive: the middle bset should, ideally, 458 * be the geometric mean of the first and the last. 459 * 460 * Returns true if the middle bset is greater than that geometric mean: 461 */ 462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b) 463 { 464 unsigned mid_u64s_bits = 465 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2; 466 467 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits; 468 } 469 470 /* 471 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be 472 * inserted into 473 * 474 * Safe to call if there already is an unwritten bset - will only add a new bset 475 * if @b doesn't already have one. 476 * 477 * Returns true if we sorted (i.e. invalidated iterators 478 */ 479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) 480 { 481 struct bch_fs *c = trans->c; 482 struct btree_node_entry *bne; 483 bool reinit_iter = false; 484 485 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); 486 BUG_ON(bset_written(b, bset(b, &b->set[1]))); 487 BUG_ON(btree_node_just_written(b)); 488 489 if (b->nsets == MAX_BSETS && 490 !btree_node_write_in_flight(b) && 491 should_compact_all(c, b)) { 492 bch2_btree_node_write(c, b, SIX_LOCK_write, 493 BTREE_WRITE_init_next_bset); 494 reinit_iter = true; 495 } 496 497 if (b->nsets == MAX_BSETS && 498 btree_node_compact(c, b)) 499 reinit_iter = true; 500 501 BUG_ON(b->nsets >= MAX_BSETS); 502 503 bne = want_new_bset(c, b); 504 if (bne) 505 bch2_bset_init_next(c, b, bne); 506 507 bch2_btree_build_aux_trees(b); 508 509 if (reinit_iter) 510 bch2_trans_node_reinit_iter(trans, b); 511 } 512 513 static void btree_err_msg(struct printbuf *out, struct bch_fs *c, 514 struct bch_dev *ca, 515 struct btree *b, struct bset *i, 516 unsigned offset, int write) 517 { 518 prt_printf(out, bch2_log_msg(c, "%s"), 519 write == READ 520 ? "error validating btree node " 521 : "corrupt btree node before write "); 522 if (ca) 523 prt_printf(out, "on %s ", ca->name); 524 prt_printf(out, "at btree "); 525 bch2_btree_pos_to_text(out, c, b); 526 527 prt_printf(out, "\n node offset %u", b->written); 528 if (i) 529 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); 530 prt_str(out, ": "); 531 } 532 533 __printf(9, 10) 534 static int __btree_err(int ret, 535 struct bch_fs *c, 536 struct bch_dev *ca, 537 struct btree *b, 538 struct bset *i, 539 int write, 540 bool have_retry, 541 enum bch_sb_error_id err_type, 542 const char *fmt, ...) 543 { 544 struct printbuf out = PRINTBUF; 545 va_list args; 546 547 btree_err_msg(&out, c, ca, b, i, b->written, write); 548 549 va_start(args, fmt); 550 prt_vprintf(&out, fmt, args); 551 va_end(args); 552 553 if (write == WRITE) { 554 bch2_print_string_as_lines(KERN_ERR, out.buf); 555 ret = c->opts.errors == BCH_ON_ERROR_continue 556 ? 0 557 : -BCH_ERR_fsck_errors_not_fixed; 558 goto out; 559 } 560 561 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) 562 ret = -BCH_ERR_btree_node_read_err_fixable; 563 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) 564 ret = -BCH_ERR_btree_node_read_err_bad_node; 565 566 if (ret != -BCH_ERR_btree_node_read_err_fixable) 567 bch2_sb_error_count(c, err_type); 568 569 switch (ret) { 570 case -BCH_ERR_btree_node_read_err_fixable: 571 ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf); 572 if (ret != -BCH_ERR_fsck_fix && 573 ret != -BCH_ERR_fsck_ignore) 574 goto fsck_err; 575 ret = -BCH_ERR_fsck_fix; 576 break; 577 case -BCH_ERR_btree_node_read_err_want_retry: 578 case -BCH_ERR_btree_node_read_err_must_retry: 579 bch2_print_string_as_lines(KERN_ERR, out.buf); 580 break; 581 case -BCH_ERR_btree_node_read_err_bad_node: 582 bch2_print_string_as_lines(KERN_ERR, out.buf); 583 bch2_topology_error(c); 584 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?: -EIO; 585 break; 586 case -BCH_ERR_btree_node_read_err_incompatible: 587 bch2_print_string_as_lines(KERN_ERR, out.buf); 588 ret = -BCH_ERR_fsck_errors_not_fixed; 589 break; 590 default: 591 BUG(); 592 } 593 out: 594 fsck_err: 595 printbuf_exit(&out); 596 return ret; 597 } 598 599 #define btree_err(type, c, ca, b, i, _err_type, msg, ...) \ 600 ({ \ 601 int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \ 602 BCH_FSCK_ERR_##_err_type, \ 603 msg, ##__VA_ARGS__); \ 604 \ 605 if (_ret != -BCH_ERR_fsck_fix) { \ 606 ret = _ret; \ 607 goto fsck_err; \ 608 } \ 609 \ 610 *saw_error = true; \ 611 }) 612 613 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) 614 615 /* 616 * When btree topology repair changes the start or end of a node, that might 617 * mean we have to drop keys that are no longer inside the node: 618 */ 619 __cold 620 void bch2_btree_node_drop_keys_outside_node(struct btree *b) 621 { 622 struct bset_tree *t; 623 624 for_each_bset(b, t) { 625 struct bset *i = bset(b, t); 626 struct bkey_packed *k; 627 628 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 629 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) 630 break; 631 632 if (k != i->start) { 633 unsigned shift = (u64 *) k - (u64 *) i->start; 634 635 memmove_u64s_down(i->start, k, 636 (u64 *) vstruct_end(i) - (u64 *) k); 637 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift); 638 set_btree_bset_end(b, t); 639 } 640 641 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 642 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) 643 break; 644 645 if (k != vstruct_last(i)) { 646 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start); 647 set_btree_bset_end(b, t); 648 } 649 } 650 651 /* 652 * Always rebuild search trees: eytzinger search tree nodes directly 653 * depend on the values of min/max key: 654 */ 655 bch2_bset_set_no_aux_tree(b, b->set); 656 bch2_btree_build_aux_trees(b); 657 658 struct bkey_s_c k; 659 struct bkey unpacked; 660 struct btree_node_iter iter; 661 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { 662 BUG_ON(bpos_lt(k.k->p, b->data->min_key)); 663 BUG_ON(bpos_gt(k.k->p, b->data->max_key)); 664 } 665 } 666 667 static int validate_bset(struct bch_fs *c, struct bch_dev *ca, 668 struct btree *b, struct bset *i, 669 unsigned offset, unsigned sectors, 670 int write, bool have_retry, bool *saw_error) 671 { 672 unsigned version = le16_to_cpu(i->version); 673 struct printbuf buf1 = PRINTBUF; 674 struct printbuf buf2 = PRINTBUF; 675 int ret = 0; 676 677 btree_err_on(!bch2_version_compatible(version), 678 -BCH_ERR_btree_node_read_err_incompatible, 679 c, ca, b, i, 680 btree_node_unsupported_version, 681 "unsupported bset version %u.%u", 682 BCH_VERSION_MAJOR(version), 683 BCH_VERSION_MINOR(version)); 684 685 if (btree_err_on(version < c->sb.version_min, 686 -BCH_ERR_btree_node_read_err_fixable, 687 c, NULL, b, i, 688 btree_node_bset_older_than_sb_min, 689 "bset version %u older than superblock version_min %u", 690 version, c->sb.version_min)) { 691 mutex_lock(&c->sb_lock); 692 c->disk_sb.sb->version_min = cpu_to_le16(version); 693 bch2_write_super(c); 694 mutex_unlock(&c->sb_lock); 695 } 696 697 if (btree_err_on(BCH_VERSION_MAJOR(version) > 698 BCH_VERSION_MAJOR(c->sb.version), 699 -BCH_ERR_btree_node_read_err_fixable, 700 c, NULL, b, i, 701 btree_node_bset_newer_than_sb, 702 "bset version %u newer than superblock version %u", 703 version, c->sb.version)) { 704 mutex_lock(&c->sb_lock); 705 c->disk_sb.sb->version = cpu_to_le16(version); 706 bch2_write_super(c); 707 mutex_unlock(&c->sb_lock); 708 } 709 710 btree_err_on(BSET_SEPARATE_WHITEOUTS(i), 711 -BCH_ERR_btree_node_read_err_incompatible, 712 c, ca, b, i, 713 btree_node_unsupported_version, 714 "BSET_SEPARATE_WHITEOUTS no longer supported"); 715 716 if (btree_err_on(offset + sectors > btree_sectors(c), 717 -BCH_ERR_btree_node_read_err_fixable, 718 c, ca, b, i, 719 bset_past_end_of_btree_node, 720 "bset past end of btree node")) { 721 i->u64s = 0; 722 ret = 0; 723 goto out; 724 } 725 726 btree_err_on(offset && !i->u64s, 727 -BCH_ERR_btree_node_read_err_fixable, 728 c, ca, b, i, 729 bset_empty, 730 "empty bset"); 731 732 btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset, 733 -BCH_ERR_btree_node_read_err_want_retry, 734 c, ca, b, i, 735 bset_wrong_sector_offset, 736 "bset at wrong sector offset"); 737 738 if (!offset) { 739 struct btree_node *bn = 740 container_of(i, struct btree_node, keys); 741 /* These indicate that we read the wrong btree node: */ 742 743 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 744 struct bch_btree_ptr_v2 *bp = 745 &bkey_i_to_btree_ptr_v2(&b->key)->v; 746 747 /* XXX endianness */ 748 btree_err_on(bp->seq != bn->keys.seq, 749 -BCH_ERR_btree_node_read_err_must_retry, 750 c, ca, b, NULL, 751 bset_bad_seq, 752 "incorrect sequence number (wrong btree node)"); 753 } 754 755 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, 756 -BCH_ERR_btree_node_read_err_must_retry, 757 c, ca, b, i, 758 btree_node_bad_btree, 759 "incorrect btree id"); 760 761 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, 762 -BCH_ERR_btree_node_read_err_must_retry, 763 c, ca, b, i, 764 btree_node_bad_level, 765 "incorrect level"); 766 767 if (!write) 768 compat_btree_node(b->c.level, b->c.btree_id, version, 769 BSET_BIG_ENDIAN(i), write, bn); 770 771 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 772 struct bch_btree_ptr_v2 *bp = 773 &bkey_i_to_btree_ptr_v2(&b->key)->v; 774 775 if (BTREE_PTR_RANGE_UPDATED(bp)) { 776 b->data->min_key = bp->min_key; 777 b->data->max_key = b->key.k.p; 778 } 779 780 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), 781 -BCH_ERR_btree_node_read_err_must_retry, 782 c, ca, b, NULL, 783 btree_node_bad_min_key, 784 "incorrect min_key: got %s should be %s", 785 (printbuf_reset(&buf1), 786 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), 787 (printbuf_reset(&buf2), 788 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); 789 } 790 791 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), 792 -BCH_ERR_btree_node_read_err_must_retry, 793 c, ca, b, i, 794 btree_node_bad_max_key, 795 "incorrect max key %s", 796 (printbuf_reset(&buf1), 797 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); 798 799 if (write) 800 compat_btree_node(b->c.level, b->c.btree_id, version, 801 BSET_BIG_ENDIAN(i), write, bn); 802 803 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1), 804 -BCH_ERR_btree_node_read_err_bad_node, 805 c, ca, b, i, 806 btree_node_bad_format, 807 "invalid bkey format: %s\n %s", buf1.buf, 808 (printbuf_reset(&buf2), 809 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf)); 810 printbuf_reset(&buf1); 811 812 compat_bformat(b->c.level, b->c.btree_id, version, 813 BSET_BIG_ENDIAN(i), write, 814 &bn->format); 815 } 816 out: 817 fsck_err: 818 printbuf_exit(&buf2); 819 printbuf_exit(&buf1); 820 return ret; 821 } 822 823 static int bset_key_invalid(struct bch_fs *c, struct btree *b, 824 struct bkey_s_c k, 825 bool updated_range, int rw, 826 struct printbuf *err) 827 { 828 return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?: 829 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?: 830 (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0); 831 } 832 833 static int validate_bset_keys(struct bch_fs *c, struct btree *b, 834 struct bset *i, int write, 835 bool have_retry, bool *saw_error) 836 { 837 unsigned version = le16_to_cpu(i->version); 838 struct bkey_packed *k, *prev = NULL; 839 struct printbuf buf = PRINTBUF; 840 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && 841 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); 842 int ret = 0; 843 844 for (k = i->start; 845 k != vstruct_last(i);) { 846 struct bkey_s u; 847 struct bkey tmp; 848 849 if (btree_err_on(bkey_p_next(k) > vstruct_last(i), 850 -BCH_ERR_btree_node_read_err_fixable, 851 c, NULL, b, i, 852 btree_node_bkey_past_bset_end, 853 "key extends past end of bset")) { 854 i->u64s = cpu_to_le16((u64 *) k - i->_data); 855 break; 856 } 857 858 if (btree_err_on(k->format > KEY_FORMAT_CURRENT, 859 -BCH_ERR_btree_node_read_err_fixable, 860 c, NULL, b, i, 861 btree_node_bkey_bad_format, 862 "invalid bkey format %u", k->format)) { 863 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 864 memmove_u64s_down(k, bkey_p_next(k), 865 (u64 *) vstruct_end(i) - (u64 *) k); 866 continue; 867 } 868 869 /* XXX: validate k->u64s */ 870 if (!write) 871 bch2_bkey_compat(b->c.level, b->c.btree_id, version, 872 BSET_BIG_ENDIAN(i), write, 873 &b->format, k); 874 875 u = __bkey_disassemble(b, k, &tmp); 876 877 printbuf_reset(&buf); 878 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) { 879 printbuf_reset(&buf); 880 bset_key_invalid(c, b, u.s_c, updated_range, write, &buf); 881 prt_printf(&buf, "\n "); 882 bch2_bkey_val_to_text(&buf, c, u.s_c); 883 884 btree_err(-BCH_ERR_btree_node_read_err_fixable, 885 c, NULL, b, i, 886 btree_node_bad_bkey, 887 "invalid bkey: %s", buf.buf); 888 889 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 890 memmove_u64s_down(k, bkey_p_next(k), 891 (u64 *) vstruct_end(i) - (u64 *) k); 892 continue; 893 } 894 895 if (write) 896 bch2_bkey_compat(b->c.level, b->c.btree_id, version, 897 BSET_BIG_ENDIAN(i), write, 898 &b->format, k); 899 900 if (prev && bkey_iter_cmp(b, prev, k) > 0) { 901 struct bkey up = bkey_unpack_key(b, prev); 902 903 printbuf_reset(&buf); 904 prt_printf(&buf, "keys out of order: "); 905 bch2_bkey_to_text(&buf, &up); 906 prt_printf(&buf, " > "); 907 bch2_bkey_to_text(&buf, u.k); 908 909 bch2_dump_bset(c, b, i, 0); 910 911 if (btree_err(-BCH_ERR_btree_node_read_err_fixable, 912 c, NULL, b, i, 913 btree_node_bkey_out_of_order, 914 "%s", buf.buf)) { 915 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 916 memmove_u64s_down(k, bkey_p_next(k), 917 (u64 *) vstruct_end(i) - (u64 *) k); 918 continue; 919 } 920 } 921 922 prev = k; 923 k = bkey_p_next(k); 924 } 925 fsck_err: 926 printbuf_exit(&buf); 927 return ret; 928 } 929 930 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, 931 struct btree *b, bool have_retry, bool *saw_error) 932 { 933 struct btree_node_entry *bne; 934 struct sort_iter *iter; 935 struct btree_node *sorted; 936 struct bkey_packed *k; 937 struct bch_extent_ptr *ptr; 938 struct bset *i; 939 bool used_mempool, blacklisted; 940 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && 941 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); 942 unsigned u64s; 943 unsigned ptr_written = btree_ptr_sectors_written(&b->key); 944 struct printbuf buf = PRINTBUF; 945 int ret = 0, retry_read = 0, write = READ; 946 947 b->version_ondisk = U16_MAX; 948 /* We might get called multiple times on read retry: */ 949 b->written = 0; 950 951 iter = mempool_alloc(&c->fill_iter, GFP_NOFS); 952 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); 953 954 if (bch2_meta_read_fault("btree")) 955 btree_err(-BCH_ERR_btree_node_read_err_must_retry, 956 c, ca, b, NULL, 957 btree_node_fault_injected, 958 "dynamic fault"); 959 960 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), 961 -BCH_ERR_btree_node_read_err_must_retry, 962 c, ca, b, NULL, 963 btree_node_bad_magic, 964 "bad magic: want %llx, got %llx", 965 bset_magic(c), le64_to_cpu(b->data->magic)); 966 967 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 968 struct bch_btree_ptr_v2 *bp = 969 &bkey_i_to_btree_ptr_v2(&b->key)->v; 970 971 btree_err_on(b->data->keys.seq != bp->seq, 972 -BCH_ERR_btree_node_read_err_must_retry, 973 c, ca, b, NULL, 974 btree_node_bad_seq, 975 "got wrong btree node (seq %llx want %llx)", 976 b->data->keys.seq, bp->seq); 977 } else { 978 btree_err_on(!b->data->keys.seq, 979 -BCH_ERR_btree_node_read_err_must_retry, 980 c, ca, b, NULL, 981 btree_node_bad_seq, 982 "bad btree header: seq 0"); 983 } 984 985 while (b->written < (ptr_written ?: btree_sectors(c))) { 986 unsigned sectors; 987 struct nonce nonce; 988 bool first = !b->written; 989 bool csum_bad; 990 991 if (!b->written) { 992 i = &b->data->keys; 993 994 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), 995 -BCH_ERR_btree_node_read_err_want_retry, 996 c, ca, b, i, 997 bset_unknown_csum, 998 "unknown checksum type %llu", BSET_CSUM_TYPE(i)); 999 1000 nonce = btree_nonce(i, b->written << 9); 1001 1002 csum_bad = bch2_crc_cmp(b->data->csum, 1003 csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data)); 1004 if (csum_bad) 1005 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1006 1007 btree_err_on(csum_bad, 1008 -BCH_ERR_btree_node_read_err_want_retry, 1009 c, ca, b, i, 1010 bset_bad_csum, 1011 "invalid checksum"); 1012 1013 ret = bset_encrypt(c, i, b->written << 9); 1014 if (bch2_fs_fatal_err_on(ret, c, 1015 "error decrypting btree node: %i", ret)) 1016 goto fsck_err; 1017 1018 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && 1019 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), 1020 -BCH_ERR_btree_node_read_err_incompatible, 1021 c, NULL, b, NULL, 1022 btree_node_unsupported_version, 1023 "btree node does not have NEW_EXTENT_OVERWRITE set"); 1024 1025 sectors = vstruct_sectors(b->data, c->block_bits); 1026 } else { 1027 bne = write_block(b); 1028 i = &bne->keys; 1029 1030 if (i->seq != b->data->keys.seq) 1031 break; 1032 1033 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), 1034 -BCH_ERR_btree_node_read_err_want_retry, 1035 c, ca, b, i, 1036 bset_unknown_csum, 1037 "unknown checksum type %llu", BSET_CSUM_TYPE(i)); 1038 1039 nonce = btree_nonce(i, b->written << 9); 1040 csum_bad = bch2_crc_cmp(bne->csum, 1041 csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne)); 1042 if (csum_bad) 1043 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1044 1045 btree_err_on(csum_bad, 1046 -BCH_ERR_btree_node_read_err_want_retry, 1047 c, ca, b, i, 1048 bset_bad_csum, 1049 "invalid checksum"); 1050 1051 ret = bset_encrypt(c, i, b->written << 9); 1052 if (bch2_fs_fatal_err_on(ret, c, 1053 "error decrypting btree node: %i\n", ret)) 1054 goto fsck_err; 1055 1056 sectors = vstruct_sectors(bne, c->block_bits); 1057 } 1058 1059 b->version_ondisk = min(b->version_ondisk, 1060 le16_to_cpu(i->version)); 1061 1062 ret = validate_bset(c, ca, b, i, b->written, sectors, 1063 READ, have_retry, saw_error); 1064 if (ret) 1065 goto fsck_err; 1066 1067 if (!b->written) 1068 btree_node_set_format(b, b->data->format); 1069 1070 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error); 1071 if (ret) 1072 goto fsck_err; 1073 1074 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); 1075 1076 blacklisted = bch2_journal_seq_is_blacklisted(c, 1077 le64_to_cpu(i->journal_seq), 1078 true); 1079 1080 btree_err_on(blacklisted && first, 1081 -BCH_ERR_btree_node_read_err_fixable, 1082 c, ca, b, i, 1083 bset_blacklisted_journal_seq, 1084 "first btree node bset has blacklisted journal seq (%llu)", 1085 le64_to_cpu(i->journal_seq)); 1086 1087 btree_err_on(blacklisted && ptr_written, 1088 -BCH_ERR_btree_node_read_err_fixable, 1089 c, ca, b, i, 1090 first_bset_blacklisted_journal_seq, 1091 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", 1092 le64_to_cpu(i->journal_seq), 1093 b->written, b->written + sectors, ptr_written); 1094 1095 b->written += sectors; 1096 1097 if (blacklisted && !first) 1098 continue; 1099 1100 sort_iter_add(iter, 1101 vstruct_idx(i, 0), 1102 vstruct_last(i)); 1103 } 1104 1105 if (ptr_written) { 1106 btree_err_on(b->written < ptr_written, 1107 -BCH_ERR_btree_node_read_err_want_retry, 1108 c, ca, b, NULL, 1109 btree_node_data_missing, 1110 "btree node data missing: expected %u sectors, found %u", 1111 ptr_written, b->written); 1112 } else { 1113 for (bne = write_block(b); 1114 bset_byte_offset(b, bne) < btree_bytes(c); 1115 bne = (void *) bne + block_bytes(c)) 1116 btree_err_on(bne->keys.seq == b->data->keys.seq && 1117 !bch2_journal_seq_is_blacklisted(c, 1118 le64_to_cpu(bne->keys.journal_seq), 1119 true), 1120 -BCH_ERR_btree_node_read_err_want_retry, 1121 c, ca, b, NULL, 1122 btree_node_bset_after_end, 1123 "found bset signature after last bset"); 1124 } 1125 1126 sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool); 1127 sorted->keys.u64s = 0; 1128 1129 set_btree_bset(b, b->set, &b->data->keys); 1130 1131 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); 1132 1133 u64s = le16_to_cpu(sorted->keys.u64s); 1134 *sorted = *b->data; 1135 sorted->keys.u64s = cpu_to_le16(u64s); 1136 swap(sorted, b->data); 1137 set_btree_bset(b, b->set, &b->data->keys); 1138 b->nsets = 1; 1139 1140 BUG_ON(b->nr.live_u64s != u64s); 1141 1142 btree_bounce_free(c, btree_bytes(c), used_mempool, sorted); 1143 1144 if (updated_range) 1145 bch2_btree_node_drop_keys_outside_node(b); 1146 1147 i = &b->data->keys; 1148 for (k = i->start; k != vstruct_last(i);) { 1149 struct bkey tmp; 1150 struct bkey_s u = __bkey_disassemble(b, k, &tmp); 1151 1152 printbuf_reset(&buf); 1153 1154 if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) || 1155 (bch2_inject_invalid_keys && 1156 !bversion_cmp(u.k->version, MAX_VERSION))) { 1157 printbuf_reset(&buf); 1158 1159 prt_printf(&buf, "invalid bkey: "); 1160 bch2_bkey_val_invalid(c, u.s_c, READ, &buf); 1161 prt_printf(&buf, "\n "); 1162 bch2_bkey_val_to_text(&buf, c, u.s_c); 1163 1164 btree_err(-BCH_ERR_btree_node_read_err_fixable, 1165 c, NULL, b, i, 1166 btree_node_bad_bkey, 1167 "%s", buf.buf); 1168 1169 btree_keys_account_key_drop(&b->nr, 0, k); 1170 1171 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 1172 memmove_u64s_down(k, bkey_p_next(k), 1173 (u64 *) vstruct_end(i) - (u64 *) k); 1174 set_btree_bset_end(b, b->set); 1175 continue; 1176 } 1177 1178 if (u.k->type == KEY_TYPE_btree_ptr_v2) { 1179 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u); 1180 1181 bp.v->mem_ptr = 0; 1182 } 1183 1184 k = bkey_p_next(k); 1185 } 1186 1187 bch2_bset_build_aux_tree(b, b->set, false); 1188 1189 set_needs_whiteout(btree_bset_first(b), true); 1190 1191 btree_node_reset_sib_u64s(b); 1192 1193 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) { 1194 struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev); 1195 1196 if (ca2->mi.state != BCH_MEMBER_STATE_rw) 1197 set_btree_node_need_rewrite(b); 1198 } 1199 1200 if (!ptr_written) 1201 set_btree_node_need_rewrite(b); 1202 out: 1203 mempool_free(iter, &c->fill_iter); 1204 printbuf_exit(&buf); 1205 return retry_read; 1206 fsck_err: 1207 if (ret == -BCH_ERR_btree_node_read_err_want_retry || 1208 ret == -BCH_ERR_btree_node_read_err_must_retry) 1209 retry_read = 1; 1210 else 1211 set_btree_node_read_error(b); 1212 goto out; 1213 } 1214 1215 static void btree_node_read_work(struct work_struct *work) 1216 { 1217 struct btree_read_bio *rb = 1218 container_of(work, struct btree_read_bio, work); 1219 struct bch_fs *c = rb->c; 1220 struct btree *b = rb->b; 1221 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); 1222 struct bio *bio = &rb->bio; 1223 struct bch_io_failures failed = { .nr = 0 }; 1224 struct printbuf buf = PRINTBUF; 1225 bool saw_error = false; 1226 bool retry = false; 1227 bool can_retry; 1228 1229 goto start; 1230 while (1) { 1231 retry = true; 1232 bch_info(c, "retrying read"); 1233 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); 1234 rb->have_ioref = bch2_dev_get_ioref(ca, READ); 1235 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META); 1236 bio->bi_iter.bi_sector = rb->pick.ptr.offset; 1237 bio->bi_iter.bi_size = btree_bytes(c); 1238 1239 if (rb->have_ioref) { 1240 bio_set_dev(bio, ca->disk_sb.bdev); 1241 submit_bio_wait(bio); 1242 } else { 1243 bio->bi_status = BLK_STS_REMOVED; 1244 } 1245 start: 1246 printbuf_reset(&buf); 1247 bch2_btree_pos_to_text(&buf, c, b); 1248 bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read, 1249 "btree read error %s for %s", 1250 bch2_blk_status_to_str(bio->bi_status), buf.buf); 1251 if (rb->have_ioref) 1252 percpu_ref_put(&ca->io_ref); 1253 rb->have_ioref = false; 1254 1255 bch2_mark_io_failure(&failed, &rb->pick); 1256 1257 can_retry = bch2_bkey_pick_read_device(c, 1258 bkey_i_to_s_c(&b->key), 1259 &failed, &rb->pick) > 0; 1260 1261 if (!bio->bi_status && 1262 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) { 1263 if (retry) 1264 bch_info(c, "retry success"); 1265 break; 1266 } 1267 1268 saw_error = true; 1269 1270 if (!can_retry) { 1271 set_btree_node_read_error(b); 1272 break; 1273 } 1274 } 1275 1276 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], 1277 rb->start_time); 1278 bio_put(&rb->bio); 1279 1280 if (saw_error && !btree_node_read_error(b)) { 1281 printbuf_reset(&buf); 1282 bch2_bpos_to_text(&buf, b->key.k.p); 1283 bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error", 1284 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf); 1285 1286 bch2_btree_node_rewrite_async(c, b); 1287 } 1288 1289 printbuf_exit(&buf); 1290 clear_btree_node_read_in_flight(b); 1291 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1292 } 1293 1294 static void btree_node_read_endio(struct bio *bio) 1295 { 1296 struct btree_read_bio *rb = 1297 container_of(bio, struct btree_read_bio, bio); 1298 struct bch_fs *c = rb->c; 1299 1300 if (rb->have_ioref) { 1301 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); 1302 1303 bch2_latency_acct(ca, rb->start_time, READ); 1304 } 1305 1306 queue_work(c->io_complete_wq, &rb->work); 1307 } 1308 1309 struct btree_node_read_all { 1310 struct closure cl; 1311 struct bch_fs *c; 1312 struct btree *b; 1313 unsigned nr; 1314 void *buf[BCH_REPLICAS_MAX]; 1315 struct bio *bio[BCH_REPLICAS_MAX]; 1316 blk_status_t err[BCH_REPLICAS_MAX]; 1317 }; 1318 1319 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data) 1320 { 1321 struct btree_node *bn = data; 1322 struct btree_node_entry *bne; 1323 unsigned offset = 0; 1324 1325 if (le64_to_cpu(bn->magic) != bset_magic(c)) 1326 return 0; 1327 1328 while (offset < btree_sectors(c)) { 1329 if (!offset) { 1330 offset += vstruct_sectors(bn, c->block_bits); 1331 } else { 1332 bne = data + (offset << 9); 1333 if (bne->keys.seq != bn->keys.seq) 1334 break; 1335 offset += vstruct_sectors(bne, c->block_bits); 1336 } 1337 } 1338 1339 return offset; 1340 } 1341 1342 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data) 1343 { 1344 struct btree_node *bn = data; 1345 struct btree_node_entry *bne; 1346 1347 if (!offset) 1348 return false; 1349 1350 while (offset < btree_sectors(c)) { 1351 bne = data + (offset << 9); 1352 if (bne->keys.seq == bn->keys.seq) 1353 return true; 1354 offset++; 1355 } 1356 1357 return false; 1358 return offset; 1359 } 1360 1361 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) 1362 { 1363 closure_type(ra, struct btree_node_read_all, cl); 1364 struct bch_fs *c = ra->c; 1365 struct btree *b = ra->b; 1366 struct printbuf buf = PRINTBUF; 1367 bool dump_bset_maps = false; 1368 bool have_retry = false; 1369 int ret = 0, best = -1, write = READ; 1370 unsigned i, written = 0, written2 = 0; 1371 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 1372 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; 1373 bool _saw_error = false, *saw_error = &_saw_error; 1374 1375 for (i = 0; i < ra->nr; i++) { 1376 struct btree_node *bn = ra->buf[i]; 1377 1378 if (ra->err[i]) 1379 continue; 1380 1381 if (le64_to_cpu(bn->magic) != bset_magic(c) || 1382 (seq && seq != bn->keys.seq)) 1383 continue; 1384 1385 if (best < 0) { 1386 best = i; 1387 written = btree_node_sectors_written(c, bn); 1388 continue; 1389 } 1390 1391 written2 = btree_node_sectors_written(c, ra->buf[i]); 1392 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, 1393 c, NULL, b, NULL, 1394 btree_node_replicas_sectors_written_mismatch, 1395 "btree node sectors written mismatch: %u != %u", 1396 written, written2) || 1397 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]), 1398 -BCH_ERR_btree_node_read_err_fixable, 1399 c, NULL, b, NULL, 1400 btree_node_bset_after_end, 1401 "found bset signature after last bset") || 1402 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9), 1403 -BCH_ERR_btree_node_read_err_fixable, 1404 c, NULL, b, NULL, 1405 btree_node_replicas_data_mismatch, 1406 "btree node replicas content mismatch")) 1407 dump_bset_maps = true; 1408 1409 if (written2 > written) { 1410 written = written2; 1411 best = i; 1412 } 1413 } 1414 fsck_err: 1415 if (dump_bset_maps) { 1416 for (i = 0; i < ra->nr; i++) { 1417 struct btree_node *bn = ra->buf[i]; 1418 struct btree_node_entry *bne = NULL; 1419 unsigned offset = 0, sectors; 1420 bool gap = false; 1421 1422 if (ra->err[i]) 1423 continue; 1424 1425 printbuf_reset(&buf); 1426 1427 while (offset < btree_sectors(c)) { 1428 if (!offset) { 1429 sectors = vstruct_sectors(bn, c->block_bits); 1430 } else { 1431 bne = ra->buf[i] + (offset << 9); 1432 if (bne->keys.seq != bn->keys.seq) 1433 break; 1434 sectors = vstruct_sectors(bne, c->block_bits); 1435 } 1436 1437 prt_printf(&buf, " %u-%u", offset, offset + sectors); 1438 if (bne && bch2_journal_seq_is_blacklisted(c, 1439 le64_to_cpu(bne->keys.journal_seq), false)) 1440 prt_printf(&buf, "*"); 1441 offset += sectors; 1442 } 1443 1444 while (offset < btree_sectors(c)) { 1445 bne = ra->buf[i] + (offset << 9); 1446 if (bne->keys.seq == bn->keys.seq) { 1447 if (!gap) 1448 prt_printf(&buf, " GAP"); 1449 gap = true; 1450 1451 sectors = vstruct_sectors(bne, c->block_bits); 1452 prt_printf(&buf, " %u-%u", offset, offset + sectors); 1453 if (bch2_journal_seq_is_blacklisted(c, 1454 le64_to_cpu(bne->keys.journal_seq), false)) 1455 prt_printf(&buf, "*"); 1456 } 1457 offset++; 1458 } 1459 1460 bch_err(c, "replica %u:%s", i, buf.buf); 1461 } 1462 } 1463 1464 if (best >= 0) { 1465 memcpy(b->data, ra->buf[best], btree_bytes(c)); 1466 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error); 1467 } else { 1468 ret = -1; 1469 } 1470 1471 if (ret) 1472 set_btree_node_read_error(b); 1473 else if (*saw_error) 1474 bch2_btree_node_rewrite_async(c, b); 1475 1476 for (i = 0; i < ra->nr; i++) { 1477 mempool_free(ra->buf[i], &c->btree_bounce_pool); 1478 bio_put(ra->bio[i]); 1479 } 1480 1481 closure_debug_destroy(&ra->cl); 1482 kfree(ra); 1483 printbuf_exit(&buf); 1484 1485 clear_btree_node_read_in_flight(b); 1486 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1487 } 1488 1489 static void btree_node_read_all_replicas_endio(struct bio *bio) 1490 { 1491 struct btree_read_bio *rb = 1492 container_of(bio, struct btree_read_bio, bio); 1493 struct bch_fs *c = rb->c; 1494 struct btree_node_read_all *ra = rb->ra; 1495 1496 if (rb->have_ioref) { 1497 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); 1498 1499 bch2_latency_acct(ca, rb->start_time, READ); 1500 } 1501 1502 ra->err[rb->idx] = bio->bi_status; 1503 closure_put(&ra->cl); 1504 } 1505 1506 /* 1507 * XXX This allocates multiple times from the same mempools, and can deadlock 1508 * under sufficient memory pressure (but is only a debug path) 1509 */ 1510 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync) 1511 { 1512 struct bkey_s_c k = bkey_i_to_s_c(&b->key); 1513 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1514 const union bch_extent_entry *entry; 1515 struct extent_ptr_decoded pick; 1516 struct btree_node_read_all *ra; 1517 unsigned i; 1518 1519 ra = kzalloc(sizeof(*ra), GFP_NOFS); 1520 if (!ra) 1521 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas; 1522 1523 closure_init(&ra->cl, NULL); 1524 ra->c = c; 1525 ra->b = b; 1526 ra->nr = bch2_bkey_nr_ptrs(k); 1527 1528 for (i = 0; i < ra->nr; i++) { 1529 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); 1530 ra->bio[i] = bio_alloc_bioset(NULL, 1531 buf_pages(ra->buf[i], btree_bytes(c)), 1532 REQ_OP_READ|REQ_SYNC|REQ_META, 1533 GFP_NOFS, 1534 &c->btree_bio); 1535 } 1536 1537 i = 0; 1538 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) { 1539 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev); 1540 struct btree_read_bio *rb = 1541 container_of(ra->bio[i], struct btree_read_bio, bio); 1542 rb->c = c; 1543 rb->b = b; 1544 rb->ra = ra; 1545 rb->start_time = local_clock(); 1546 rb->have_ioref = bch2_dev_get_ioref(ca, READ); 1547 rb->idx = i; 1548 rb->pick = pick; 1549 rb->bio.bi_iter.bi_sector = pick.ptr.offset; 1550 rb->bio.bi_end_io = btree_node_read_all_replicas_endio; 1551 bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c)); 1552 1553 if (rb->have_ioref) { 1554 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], 1555 bio_sectors(&rb->bio)); 1556 bio_set_dev(&rb->bio, ca->disk_sb.bdev); 1557 1558 closure_get(&ra->cl); 1559 submit_bio(&rb->bio); 1560 } else { 1561 ra->err[i] = BLK_STS_REMOVED; 1562 } 1563 1564 i++; 1565 } 1566 1567 if (sync) { 1568 closure_sync(&ra->cl); 1569 btree_node_read_all_replicas_done(&ra->cl.work); 1570 } else { 1571 continue_at(&ra->cl, btree_node_read_all_replicas_done, 1572 c->io_complete_wq); 1573 } 1574 1575 return 0; 1576 } 1577 1578 void bch2_btree_node_read(struct bch_fs *c, struct btree *b, 1579 bool sync) 1580 { 1581 struct extent_ptr_decoded pick; 1582 struct btree_read_bio *rb; 1583 struct bch_dev *ca; 1584 struct bio *bio; 1585 int ret; 1586 1587 trace_and_count(c, btree_node_read, c, b); 1588 1589 if (bch2_verify_all_btree_replicas && 1590 !btree_node_read_all_replicas(c, b, sync)) 1591 return; 1592 1593 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), 1594 NULL, &pick); 1595 1596 if (ret <= 0) { 1597 struct printbuf buf = PRINTBUF; 1598 1599 prt_str(&buf, "btree node read error: no device to read from\n at "); 1600 bch2_btree_pos_to_text(&buf, c, b); 1601 bch_err(c, "%s", buf.buf); 1602 1603 if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && 1604 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) 1605 bch2_fatal_error(c); 1606 1607 set_btree_node_read_error(b); 1608 clear_btree_node_read_in_flight(b); 1609 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1610 printbuf_exit(&buf); 1611 return; 1612 } 1613 1614 ca = bch_dev_bkey_exists(c, pick.ptr.dev); 1615 1616 bio = bio_alloc_bioset(NULL, 1617 buf_pages(b->data, btree_bytes(c)), 1618 REQ_OP_READ|REQ_SYNC|REQ_META, 1619 GFP_NOFS, 1620 &c->btree_bio); 1621 rb = container_of(bio, struct btree_read_bio, bio); 1622 rb->c = c; 1623 rb->b = b; 1624 rb->ra = NULL; 1625 rb->start_time = local_clock(); 1626 rb->have_ioref = bch2_dev_get_ioref(ca, READ); 1627 rb->pick = pick; 1628 INIT_WORK(&rb->work, btree_node_read_work); 1629 bio->bi_iter.bi_sector = pick.ptr.offset; 1630 bio->bi_end_io = btree_node_read_endio; 1631 bch2_bio_map(bio, b->data, btree_bytes(c)); 1632 1633 if (rb->have_ioref) { 1634 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], 1635 bio_sectors(bio)); 1636 bio_set_dev(bio, ca->disk_sb.bdev); 1637 1638 if (sync) { 1639 submit_bio_wait(bio); 1640 1641 btree_node_read_work(&rb->work); 1642 } else { 1643 submit_bio(bio); 1644 } 1645 } else { 1646 bio->bi_status = BLK_STS_REMOVED; 1647 1648 if (sync) 1649 btree_node_read_work(&rb->work); 1650 else 1651 queue_work(c->io_complete_wq, &rb->work); 1652 } 1653 } 1654 1655 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, 1656 const struct bkey_i *k, unsigned level) 1657 { 1658 struct bch_fs *c = trans->c; 1659 struct closure cl; 1660 struct btree *b; 1661 int ret; 1662 1663 closure_init_stack(&cl); 1664 1665 do { 1666 ret = bch2_btree_cache_cannibalize_lock(c, &cl); 1667 closure_sync(&cl); 1668 } while (ret); 1669 1670 b = bch2_btree_node_mem_alloc(trans, level != 0); 1671 bch2_btree_cache_cannibalize_unlock(c); 1672 1673 BUG_ON(IS_ERR(b)); 1674 1675 bkey_copy(&b->key, k); 1676 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id)); 1677 1678 set_btree_node_read_in_flight(b); 1679 1680 bch2_btree_node_read(c, b, true); 1681 1682 if (btree_node_read_error(b)) { 1683 bch2_btree_node_hash_remove(&c->btree_cache, b); 1684 1685 mutex_lock(&c->btree_cache.lock); 1686 list_move(&b->list, &c->btree_cache.freeable); 1687 mutex_unlock(&c->btree_cache.lock); 1688 1689 ret = -EIO; 1690 goto err; 1691 } 1692 1693 bch2_btree_set_root_for_read(c, b); 1694 err: 1695 six_unlock_write(&b->c.lock); 1696 six_unlock_intent(&b->c.lock); 1697 1698 return ret; 1699 } 1700 1701 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, 1702 const struct bkey_i *k, unsigned level) 1703 { 1704 return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level)); 1705 } 1706 1707 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, 1708 struct btree_write *w) 1709 { 1710 unsigned long old, new, v = READ_ONCE(b->will_make_reachable); 1711 1712 do { 1713 old = new = v; 1714 if (!(old & 1)) 1715 break; 1716 1717 new &= ~1UL; 1718 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old); 1719 1720 if (old & 1) 1721 closure_put(&((struct btree_update *) new)->cl); 1722 1723 bch2_journal_pin_drop(&c->journal, &w->journal); 1724 } 1725 1726 static void __btree_node_write_done(struct bch_fs *c, struct btree *b) 1727 { 1728 struct btree_write *w = btree_prev_write(b); 1729 unsigned long old, new, v; 1730 unsigned type = 0; 1731 1732 bch2_btree_complete_write(c, b, w); 1733 1734 v = READ_ONCE(b->flags); 1735 do { 1736 old = new = v; 1737 1738 if ((old & (1U << BTREE_NODE_dirty)) && 1739 (old & (1U << BTREE_NODE_need_write)) && 1740 !(old & (1U << BTREE_NODE_never_write)) && 1741 !(old & (1U << BTREE_NODE_write_blocked)) && 1742 !(old & (1U << BTREE_NODE_will_make_reachable))) { 1743 new &= ~(1U << BTREE_NODE_dirty); 1744 new &= ~(1U << BTREE_NODE_need_write); 1745 new |= (1U << BTREE_NODE_write_in_flight); 1746 new |= (1U << BTREE_NODE_write_in_flight_inner); 1747 new |= (1U << BTREE_NODE_just_written); 1748 new ^= (1U << BTREE_NODE_write_idx); 1749 1750 type = new & BTREE_WRITE_TYPE_MASK; 1751 new &= ~BTREE_WRITE_TYPE_MASK; 1752 } else { 1753 new &= ~(1U << BTREE_NODE_write_in_flight); 1754 new &= ~(1U << BTREE_NODE_write_in_flight_inner); 1755 } 1756 } while ((v = cmpxchg(&b->flags, old, new)) != old); 1757 1758 if (new & (1U << BTREE_NODE_write_in_flight)) 1759 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type); 1760 else 1761 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); 1762 } 1763 1764 static void btree_node_write_done(struct bch_fs *c, struct btree *b) 1765 { 1766 struct btree_trans *trans = bch2_trans_get(c); 1767 1768 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 1769 __btree_node_write_done(c, b); 1770 six_unlock_read(&b->c.lock); 1771 1772 bch2_trans_put(trans); 1773 } 1774 1775 static void btree_node_write_work(struct work_struct *work) 1776 { 1777 struct btree_write_bio *wbio = 1778 container_of(work, struct btree_write_bio, work); 1779 struct bch_fs *c = wbio->wbio.c; 1780 struct btree *b = wbio->wbio.bio.bi_private; 1781 struct bch_extent_ptr *ptr; 1782 int ret = 0; 1783 1784 btree_bounce_free(c, 1785 wbio->data_bytes, 1786 wbio->wbio.used_mempool, 1787 wbio->data); 1788 1789 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr, 1790 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); 1791 1792 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) 1793 goto err; 1794 1795 if (wbio->wbio.first_btree_write) { 1796 if (wbio->wbio.failed.nr) { 1797 1798 } 1799 } else { 1800 ret = bch2_trans_do(c, NULL, NULL, 0, 1801 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, 1802 BCH_WATERMARK_reclaim| 1803 BTREE_INSERT_JOURNAL_RECLAIM| 1804 BTREE_INSERT_NOFAIL| 1805 BTREE_INSERT_NOCHECK_RW, 1806 !wbio->wbio.failed.nr)); 1807 if (ret) 1808 goto err; 1809 } 1810 out: 1811 bio_put(&wbio->wbio.bio); 1812 btree_node_write_done(c, b); 1813 return; 1814 err: 1815 set_btree_node_noevict(b); 1816 if (!bch2_err_matches(ret, EROFS)) 1817 bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret)); 1818 goto out; 1819 } 1820 1821 static void btree_node_write_endio(struct bio *bio) 1822 { 1823 struct bch_write_bio *wbio = to_wbio(bio); 1824 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL; 1825 struct bch_write_bio *orig = parent ?: wbio; 1826 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio); 1827 struct bch_fs *c = wbio->c; 1828 struct btree *b = wbio->bio.bi_private; 1829 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev); 1830 unsigned long flags; 1831 1832 if (wbio->have_ioref) 1833 bch2_latency_acct(ca, wbio->submit_time, WRITE); 1834 1835 if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write, 1836 "btree write error: %s", 1837 bch2_blk_status_to_str(bio->bi_status)) || 1838 bch2_meta_write_fault("btree")) { 1839 spin_lock_irqsave(&c->btree_write_error_lock, flags); 1840 bch2_dev_list_add_dev(&orig->failed, wbio->dev); 1841 spin_unlock_irqrestore(&c->btree_write_error_lock, flags); 1842 } 1843 1844 if (wbio->have_ioref) 1845 percpu_ref_put(&ca->io_ref); 1846 1847 if (parent) { 1848 bio_put(bio); 1849 bio_endio(&parent->bio); 1850 return; 1851 } 1852 1853 clear_btree_node_write_in_flight_inner(b); 1854 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); 1855 INIT_WORK(&wb->work, btree_node_write_work); 1856 queue_work(c->btree_io_complete_wq, &wb->work); 1857 } 1858 1859 static int validate_bset_for_write(struct bch_fs *c, struct btree *b, 1860 struct bset *i, unsigned sectors) 1861 { 1862 struct printbuf buf = PRINTBUF; 1863 bool saw_error; 1864 int ret; 1865 1866 ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), 1867 BKEY_TYPE_btree, WRITE, &buf); 1868 1869 if (ret) 1870 bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf); 1871 printbuf_exit(&buf); 1872 if (ret) 1873 return ret; 1874 1875 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?: 1876 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error); 1877 if (ret) { 1878 bch2_inconsistent_error(c); 1879 dump_stack(); 1880 } 1881 1882 return ret; 1883 } 1884 1885 static void btree_write_submit(struct work_struct *work) 1886 { 1887 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); 1888 struct bch_extent_ptr *ptr; 1889 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; 1890 1891 bkey_copy(&tmp.k, &wbio->key); 1892 1893 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr) 1894 ptr->offset += wbio->sector_offset; 1895 1896 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, 1897 &tmp.k, false); 1898 } 1899 1900 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) 1901 { 1902 struct btree_write_bio *wbio; 1903 struct bset_tree *t; 1904 struct bset *i; 1905 struct btree_node *bn = NULL; 1906 struct btree_node_entry *bne = NULL; 1907 struct sort_iter_stack sort_iter; 1908 struct nonce nonce; 1909 unsigned bytes_to_write, sectors_to_write, bytes, u64s; 1910 u64 seq = 0; 1911 bool used_mempool; 1912 unsigned long old, new; 1913 bool validate_before_checksum = false; 1914 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK; 1915 void *data; 1916 int ret; 1917 1918 if (flags & BTREE_WRITE_ALREADY_STARTED) 1919 goto do_write; 1920 1921 /* 1922 * We may only have a read lock on the btree node - the dirty bit is our 1923 * "lock" against racing with other threads that may be trying to start 1924 * a write, we do a write iff we clear the dirty bit. Since setting the 1925 * dirty bit requires a write lock, we can't race with other threads 1926 * redirtying it: 1927 */ 1928 do { 1929 old = new = READ_ONCE(b->flags); 1930 1931 if (!(old & (1 << BTREE_NODE_dirty))) 1932 return; 1933 1934 if ((flags & BTREE_WRITE_ONLY_IF_NEED) && 1935 !(old & (1 << BTREE_NODE_need_write))) 1936 return; 1937 1938 if (old & 1939 ((1 << BTREE_NODE_never_write)| 1940 (1 << BTREE_NODE_write_blocked))) 1941 return; 1942 1943 if (b->written && 1944 (old & (1 << BTREE_NODE_will_make_reachable))) 1945 return; 1946 1947 if (old & (1 << BTREE_NODE_write_in_flight)) 1948 return; 1949 1950 if (flags & BTREE_WRITE_ONLY_IF_NEED) 1951 type = new & BTREE_WRITE_TYPE_MASK; 1952 new &= ~BTREE_WRITE_TYPE_MASK; 1953 1954 new &= ~(1 << BTREE_NODE_dirty); 1955 new &= ~(1 << BTREE_NODE_need_write); 1956 new |= (1 << BTREE_NODE_write_in_flight); 1957 new |= (1 << BTREE_NODE_write_in_flight_inner); 1958 new |= (1 << BTREE_NODE_just_written); 1959 new ^= (1 << BTREE_NODE_write_idx); 1960 } while (cmpxchg_acquire(&b->flags, old, new) != old); 1961 1962 if (new & (1U << BTREE_NODE_need_write)) 1963 return; 1964 do_write: 1965 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); 1966 1967 atomic_dec(&c->btree_cache.dirty); 1968 1969 BUG_ON(btree_node_fake(b)); 1970 BUG_ON((b->will_make_reachable != 0) != !b->written); 1971 1972 BUG_ON(b->written >= btree_sectors(c)); 1973 BUG_ON(b->written & (block_sectors(c) - 1)); 1974 BUG_ON(bset_written(b, btree_bset_last(b))); 1975 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); 1976 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); 1977 1978 bch2_sort_whiteouts(c, b); 1979 1980 sort_iter_stack_init(&sort_iter, b); 1981 1982 bytes = !b->written 1983 ? sizeof(struct btree_node) 1984 : sizeof(struct btree_node_entry); 1985 1986 bytes += b->whiteout_u64s * sizeof(u64); 1987 1988 for_each_bset(b, t) { 1989 i = bset(b, t); 1990 1991 if (bset_written(b, i)) 1992 continue; 1993 1994 bytes += le16_to_cpu(i->u64s) * sizeof(u64); 1995 sort_iter_add(&sort_iter.iter, 1996 btree_bkey_first(b, t), 1997 btree_bkey_last(b, t)); 1998 seq = max(seq, le64_to_cpu(i->journal_seq)); 1999 } 2000 2001 BUG_ON(b->written && !seq); 2002 2003 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */ 2004 bytes += 8; 2005 2006 /* buffer must be a multiple of the block size */ 2007 bytes = round_up(bytes, block_bytes(c)); 2008 2009 data = btree_bounce_alloc(c, bytes, &used_mempool); 2010 2011 if (!b->written) { 2012 bn = data; 2013 *bn = *b->data; 2014 i = &bn->keys; 2015 } else { 2016 bne = data; 2017 bne->keys = b->data->keys; 2018 i = &bne->keys; 2019 } 2020 2021 i->journal_seq = cpu_to_le64(seq); 2022 i->u64s = 0; 2023 2024 sort_iter_add(&sort_iter.iter, 2025 unwritten_whiteouts_start(c, b), 2026 unwritten_whiteouts_end(c, b)); 2027 SET_BSET_SEPARATE_WHITEOUTS(i, false); 2028 2029 b->whiteout_u64s = 0; 2030 2031 u64s = bch2_sort_keys(i->start, &sort_iter.iter, false); 2032 le16_add_cpu(&i->u64s, u64s); 2033 2034 BUG_ON(!b->written && i->u64s != b->data->keys.u64s); 2035 2036 set_needs_whiteout(i, false); 2037 2038 /* do we have data to write? */ 2039 if (b->written && !i->u64s) 2040 goto nowrite; 2041 2042 bytes_to_write = vstruct_end(i) - data; 2043 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9; 2044 2045 if (!b->written && 2046 b->key.k.type == KEY_TYPE_btree_ptr_v2) 2047 BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write); 2048 2049 memset(data + bytes_to_write, 0, 2050 (sectors_to_write << 9) - bytes_to_write); 2051 2052 BUG_ON(b->written + sectors_to_write > btree_sectors(c)); 2053 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN); 2054 BUG_ON(i->seq != b->data->keys.seq); 2055 2056 i->version = cpu_to_le16(c->sb.version); 2057 SET_BSET_OFFSET(i, b->written); 2058 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c)); 2059 2060 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))) 2061 validate_before_checksum = true; 2062 2063 /* validate_bset will be modifying: */ 2064 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current) 2065 validate_before_checksum = true; 2066 2067 /* if we're going to be encrypting, check metadata validity first: */ 2068 if (validate_before_checksum && 2069 validate_bset_for_write(c, b, i, sectors_to_write)) 2070 goto err; 2071 2072 ret = bset_encrypt(c, i, b->written << 9); 2073 if (bch2_fs_fatal_err_on(ret, c, 2074 "error encrypting btree node: %i\n", ret)) 2075 goto err; 2076 2077 nonce = btree_nonce(i, b->written << 9); 2078 2079 if (bn) 2080 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn); 2081 else 2082 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); 2083 2084 /* if we're not encrypting, check metadata after checksumming: */ 2085 if (!validate_before_checksum && 2086 validate_bset_for_write(c, b, i, sectors_to_write)) 2087 goto err; 2088 2089 /* 2090 * We handle btree write errors by immediately halting the journal - 2091 * after we've done that, we can't issue any subsequent btree writes 2092 * because they might have pointers to new nodes that failed to write. 2093 * 2094 * Furthermore, there's no point in doing any more btree writes because 2095 * with the journal stopped, we're never going to update the journal to 2096 * reflect that those writes were done and the data flushed from the 2097 * journal: 2098 * 2099 * Also on journal error, the pending write may have updates that were 2100 * never journalled (interior nodes, see btree_update_nodes_written()) - 2101 * it's critical that we don't do the write in that case otherwise we 2102 * will have updates visible that weren't in the journal: 2103 * 2104 * Make sure to update b->written so bch2_btree_init_next() doesn't 2105 * break: 2106 */ 2107 if (bch2_journal_error(&c->journal) || 2108 c->opts.nochanges) 2109 goto err; 2110 2111 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); 2112 2113 wbio = container_of(bio_alloc_bioset(NULL, 2114 buf_pages(data, sectors_to_write << 9), 2115 REQ_OP_WRITE|REQ_META, 2116 GFP_NOFS, 2117 &c->btree_bio), 2118 struct btree_write_bio, wbio.bio); 2119 wbio_init(&wbio->wbio.bio); 2120 wbio->data = data; 2121 wbio->data_bytes = bytes; 2122 wbio->sector_offset = b->written; 2123 wbio->wbio.c = c; 2124 wbio->wbio.used_mempool = used_mempool; 2125 wbio->wbio.first_btree_write = !b->written; 2126 wbio->wbio.bio.bi_end_io = btree_node_write_endio; 2127 wbio->wbio.bio.bi_private = b; 2128 2129 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9); 2130 2131 bkey_copy(&wbio->key, &b->key); 2132 2133 b->written += sectors_to_write; 2134 2135 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2) 2136 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written = 2137 cpu_to_le16(b->written); 2138 2139 atomic64_inc(&c->btree_write_stats[type].nr); 2140 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); 2141 2142 INIT_WORK(&wbio->work, btree_write_submit); 2143 queue_work(c->io_complete_wq, &wbio->work); 2144 return; 2145 err: 2146 set_btree_node_noevict(b); 2147 b->written += sectors_to_write; 2148 nowrite: 2149 btree_bounce_free(c, bytes, used_mempool, data); 2150 __btree_node_write_done(c, b); 2151 } 2152 2153 /* 2154 * Work that must be done with write lock held: 2155 */ 2156 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) 2157 { 2158 bool invalidated_iter = false; 2159 struct btree_node_entry *bne; 2160 struct bset_tree *t; 2161 2162 if (!btree_node_just_written(b)) 2163 return false; 2164 2165 BUG_ON(b->whiteout_u64s); 2166 2167 clear_btree_node_just_written(b); 2168 2169 /* 2170 * Note: immediately after write, bset_written() doesn't work - the 2171 * amount of data we had to write after compaction might have been 2172 * smaller than the offset of the last bset. 2173 * 2174 * However, we know that all bsets have been written here, as long as 2175 * we're still holding the write lock: 2176 */ 2177 2178 /* 2179 * XXX: decide if we really want to unconditionally sort down to a 2180 * single bset: 2181 */ 2182 if (b->nsets > 1) { 2183 btree_node_sort(c, b, 0, b->nsets, true); 2184 invalidated_iter = true; 2185 } else { 2186 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL); 2187 } 2188 2189 for_each_bset(b, t) 2190 set_needs_whiteout(bset(b, t), true); 2191 2192 bch2_btree_verify(c, b); 2193 2194 /* 2195 * If later we don't unconditionally sort down to a single bset, we have 2196 * to ensure this is still true: 2197 */ 2198 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b)); 2199 2200 bne = want_new_bset(c, b); 2201 if (bne) 2202 bch2_bset_init_next(c, b, bne); 2203 2204 bch2_btree_build_aux_trees(b); 2205 2206 return invalidated_iter; 2207 } 2208 2209 /* 2210 * Use this one if the node is intent locked: 2211 */ 2212 void bch2_btree_node_write(struct bch_fs *c, struct btree *b, 2213 enum six_lock_type lock_type_held, 2214 unsigned flags) 2215 { 2216 if (lock_type_held == SIX_LOCK_intent || 2217 (lock_type_held == SIX_LOCK_read && 2218 six_lock_tryupgrade(&b->c.lock))) { 2219 __bch2_btree_node_write(c, b, flags); 2220 2221 /* don't cycle lock unnecessarily: */ 2222 if (btree_node_just_written(b) && 2223 six_trylock_write(&b->c.lock)) { 2224 bch2_btree_post_write_cleanup(c, b); 2225 six_unlock_write(&b->c.lock); 2226 } 2227 2228 if (lock_type_held == SIX_LOCK_read) 2229 six_lock_downgrade(&b->c.lock); 2230 } else { 2231 __bch2_btree_node_write(c, b, flags); 2232 if (lock_type_held == SIX_LOCK_write && 2233 btree_node_just_written(b)) 2234 bch2_btree_post_write_cleanup(c, b); 2235 } 2236 } 2237 2238 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag) 2239 { 2240 struct bucket_table *tbl; 2241 struct rhash_head *pos; 2242 struct btree *b; 2243 unsigned i; 2244 bool ret = false; 2245 restart: 2246 rcu_read_lock(); 2247 for_each_cached_btree(b, c, tbl, i, pos) 2248 if (test_bit(flag, &b->flags)) { 2249 rcu_read_unlock(); 2250 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE); 2251 ret = true; 2252 goto restart; 2253 } 2254 rcu_read_unlock(); 2255 2256 return ret; 2257 } 2258 2259 bool bch2_btree_flush_all_reads(struct bch_fs *c) 2260 { 2261 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight); 2262 } 2263 2264 bool bch2_btree_flush_all_writes(struct bch_fs *c) 2265 { 2266 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight); 2267 } 2268 2269 static const char * const bch2_btree_write_types[] = { 2270 #define x(t, n) [n] = #t, 2271 BCH_BTREE_WRITE_TYPES() 2272 NULL 2273 }; 2274 2275 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c) 2276 { 2277 printbuf_tabstop_push(out, 20); 2278 printbuf_tabstop_push(out, 10); 2279 2280 prt_tab(out); 2281 prt_str(out, "nr"); 2282 prt_tab(out); 2283 prt_str(out, "size"); 2284 prt_newline(out); 2285 2286 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) { 2287 u64 nr = atomic64_read(&c->btree_write_stats[i].nr); 2288 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes); 2289 2290 prt_printf(out, "%s:", bch2_btree_write_types[i]); 2291 prt_tab(out); 2292 prt_u64(out, nr); 2293 prt_tab(out); 2294 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0); 2295 prt_newline(out); 2296 } 2297 } 2298