1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "alloc_background.h" 4 #include "alloc_foreground.h" 5 #include "btree_io.h" 6 #include "btree_update_interior.h" 7 #include "btree_write_buffer.h" 8 #include "buckets.h" 9 #include "checksum.h" 10 #include "disk_groups.h" 11 #include "error.h" 12 #include "journal.h" 13 #include "journal_io.h" 14 #include "journal_reclaim.h" 15 #include "journal_seq_blacklist.h" 16 #include "replicas.h" 17 #include "sb-clean.h" 18 #include "trace.h" 19 20 static struct nonce journal_nonce(const struct jset *jset) 21 { 22 return (struct nonce) {{ 23 [0] = 0, 24 [1] = ((__le32 *) &jset->seq)[0], 25 [2] = ((__le32 *) &jset->seq)[1], 26 [3] = BCH_NONCE_JOURNAL, 27 }}; 28 } 29 30 static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum) 31 { 32 if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) { 33 *csum = (struct bch_csum) {}; 34 return false; 35 } 36 37 *csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j); 38 return !bch2_crc_cmp(j->csum, *csum); 39 } 40 41 static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq) 42 { 43 return (seq - c->journal_entries_base_seq) & (~0U >> 1); 44 } 45 46 static void __journal_replay_free(struct bch_fs *c, 47 struct journal_replay *i) 48 { 49 struct journal_replay **p = 50 genradix_ptr(&c->journal_entries, 51 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq))); 52 53 BUG_ON(*p != i); 54 *p = NULL; 55 kvpfree(i, offsetof(struct journal_replay, j) + 56 vstruct_bytes(&i->j)); 57 } 58 59 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i) 60 { 61 i->ignore = true; 62 63 if (!c->opts.read_entire_journal) 64 __journal_replay_free(c, i); 65 } 66 67 struct journal_list { 68 struct closure cl; 69 u64 last_seq; 70 struct mutex lock; 71 int ret; 72 }; 73 74 #define JOURNAL_ENTRY_ADD_OK 0 75 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5 76 77 /* 78 * Given a journal entry we just read, add it to the list of journal entries to 79 * be replayed: 80 */ 81 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, 82 struct journal_ptr entry_ptr, 83 struct journal_list *jlist, struct jset *j) 84 { 85 struct genradix_iter iter; 86 struct journal_replay **_i, *i, *dup; 87 struct journal_ptr *ptr; 88 size_t bytes = vstruct_bytes(j); 89 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0; 90 int ret = JOURNAL_ENTRY_ADD_OK; 91 92 /* Is this entry older than the range we need? */ 93 if (!c->opts.read_entire_journal && 94 le64_to_cpu(j->seq) < jlist->last_seq) 95 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE; 96 97 /* 98 * genradixes are indexed by a ulong, not a u64, so we can't index them 99 * by sequence number directly: Assume instead that they will all fall 100 * within the range of +-2billion of the filrst one we find. 101 */ 102 if (!c->journal_entries_base_seq) 103 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); 104 105 /* Drop entries we don't need anymore */ 106 if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) { 107 genradix_for_each_from(&c->journal_entries, iter, _i, 108 journal_entry_radix_idx(c, jlist->last_seq)) { 109 i = *_i; 110 111 if (!i || i->ignore) 112 continue; 113 114 if (le64_to_cpu(i->j.seq) >= last_seq) 115 break; 116 journal_replay_free(c, i); 117 } 118 } 119 120 jlist->last_seq = max(jlist->last_seq, last_seq); 121 122 _i = genradix_ptr_alloc(&c->journal_entries, 123 journal_entry_radix_idx(c, le64_to_cpu(j->seq)), 124 GFP_KERNEL); 125 if (!_i) 126 return -BCH_ERR_ENOMEM_journal_entry_add; 127 128 /* 129 * Duplicate journal entries? If so we want the one that didn't have a 130 * checksum error: 131 */ 132 dup = *_i; 133 if (dup) { 134 if (bytes == vstruct_bytes(&dup->j) && 135 !memcmp(j, &dup->j, bytes)) { 136 i = dup; 137 goto found; 138 } 139 140 if (!entry_ptr.csum_good) { 141 i = dup; 142 goto found; 143 } 144 145 if (!dup->csum_good) 146 goto replace; 147 148 fsck_err(c, journal_entry_replicas_data_mismatch, 149 "found duplicate but non identical journal entries (seq %llu)", 150 le64_to_cpu(j->seq)); 151 i = dup; 152 goto found; 153 } 154 replace: 155 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); 156 if (!i) 157 return -BCH_ERR_ENOMEM_journal_entry_add; 158 159 i->nr_ptrs = 0; 160 i->csum_good = entry_ptr.csum_good; 161 i->ignore = false; 162 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct"); 163 i->ptrs[i->nr_ptrs++] = entry_ptr; 164 165 if (dup) { 166 if (dup->nr_ptrs >= ARRAY_SIZE(dup->ptrs)) { 167 bch_err(c, "found too many copies of journal entry %llu", 168 le64_to_cpu(i->j.seq)); 169 dup->nr_ptrs = ARRAY_SIZE(dup->ptrs) - 1; 170 } 171 172 /* The first ptr should represent the jset we kept: */ 173 memcpy(i->ptrs + i->nr_ptrs, 174 dup->ptrs, 175 sizeof(dup->ptrs[0]) * dup->nr_ptrs); 176 i->nr_ptrs += dup->nr_ptrs; 177 __journal_replay_free(c, dup); 178 } 179 180 *_i = i; 181 return 0; 182 found: 183 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) { 184 if (ptr->dev == ca->dev_idx) { 185 bch_err(c, "duplicate journal entry %llu on same device", 186 le64_to_cpu(i->j.seq)); 187 goto out; 188 } 189 } 190 191 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) { 192 bch_err(c, "found too many copies of journal entry %llu", 193 le64_to_cpu(i->j.seq)); 194 goto out; 195 } 196 197 i->ptrs[i->nr_ptrs++] = entry_ptr; 198 out: 199 fsck_err: 200 return ret; 201 } 202 203 /* this fills in a range with empty jset_entries: */ 204 static void journal_entry_null_range(void *start, void *end) 205 { 206 struct jset_entry *entry; 207 208 for (entry = start; entry != end; entry = vstruct_next(entry)) 209 memset(entry, 0, sizeof(*entry)); 210 } 211 212 #define JOURNAL_ENTRY_REREAD 5 213 #define JOURNAL_ENTRY_NONE 6 214 #define JOURNAL_ENTRY_BAD 7 215 216 static void journal_entry_err_msg(struct printbuf *out, 217 u32 version, 218 struct jset *jset, 219 struct jset_entry *entry) 220 { 221 prt_str(out, "invalid journal entry, version="); 222 bch2_version_to_text(out, version); 223 224 if (entry) { 225 prt_str(out, " type="); 226 prt_str(out, bch2_jset_entry_types[entry->type]); 227 } 228 229 if (!jset) { 230 prt_printf(out, " in superblock"); 231 } else { 232 233 prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq)); 234 235 if (entry) 236 prt_printf(out, " offset=%zi/%u", 237 (u64 *) entry - jset->_data, 238 le32_to_cpu(jset->u64s)); 239 } 240 241 prt_str(out, ": "); 242 } 243 244 #define journal_entry_err(c, version, jset, entry, _err, msg, ...) \ 245 ({ \ 246 struct printbuf _buf = PRINTBUF; \ 247 \ 248 journal_entry_err_msg(&_buf, version, jset, entry); \ 249 prt_printf(&_buf, msg, ##__VA_ARGS__); \ 250 \ 251 switch (flags & BKEY_INVALID_WRITE) { \ 252 case READ: \ 253 mustfix_fsck_err(c, _err, "%s", _buf.buf); \ 254 break; \ 255 case WRITE: \ 256 bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \ 257 bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\ 258 if (bch2_fs_inconsistent(c)) { \ 259 ret = -BCH_ERR_fsck_errors_not_fixed; \ 260 goto fsck_err; \ 261 } \ 262 break; \ 263 } \ 264 \ 265 printbuf_exit(&_buf); \ 266 true; \ 267 }) 268 269 #define journal_entry_err_on(cond, ...) \ 270 ((cond) ? journal_entry_err(__VA_ARGS__) : false) 271 272 #define FSCK_DELETED_KEY 5 273 274 static int journal_validate_key(struct bch_fs *c, 275 struct jset *jset, 276 struct jset_entry *entry, 277 unsigned level, enum btree_id btree_id, 278 struct bkey_i *k, 279 unsigned version, int big_endian, 280 enum bkey_invalid_flags flags) 281 { 282 int write = flags & BKEY_INVALID_WRITE; 283 void *next = vstruct_next(entry); 284 struct printbuf buf = PRINTBUF; 285 int ret = 0; 286 287 if (journal_entry_err_on(!k->k.u64s, 288 c, version, jset, entry, 289 journal_entry_bkey_u64s_0, 290 "k->u64s 0")) { 291 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); 292 journal_entry_null_range(vstruct_next(entry), next); 293 return FSCK_DELETED_KEY; 294 } 295 296 if (journal_entry_err_on((void *) bkey_next(k) > 297 (void *) vstruct_next(entry), 298 c, version, jset, entry, 299 journal_entry_bkey_past_end, 300 "extends past end of journal entry")) { 301 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); 302 journal_entry_null_range(vstruct_next(entry), next); 303 return FSCK_DELETED_KEY; 304 } 305 306 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, 307 c, version, jset, entry, 308 journal_entry_bkey_bad_format, 309 "bad format %u", k->k.format)) { 310 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); 311 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); 312 journal_entry_null_range(vstruct_next(entry), next); 313 return FSCK_DELETED_KEY; 314 } 315 316 if (!write) 317 bch2_bkey_compat(level, btree_id, version, big_endian, 318 write, NULL, bkey_to_packed(k)); 319 320 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k), 321 __btree_node_type(level, btree_id), write, &buf)) { 322 printbuf_reset(&buf); 323 journal_entry_err_msg(&buf, version, jset, entry); 324 prt_newline(&buf); 325 printbuf_indent_add(&buf, 2); 326 327 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); 328 prt_newline(&buf); 329 bch2_bkey_invalid(c, bkey_i_to_s_c(k), 330 __btree_node_type(level, btree_id), write, &buf); 331 332 mustfix_fsck_err(c, journal_entry_bkey_invalid, 333 "%s", buf.buf); 334 335 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); 336 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); 337 journal_entry_null_range(vstruct_next(entry), next); 338 339 printbuf_exit(&buf); 340 return FSCK_DELETED_KEY; 341 } 342 343 if (write) 344 bch2_bkey_compat(level, btree_id, version, big_endian, 345 write, NULL, bkey_to_packed(k)); 346 fsck_err: 347 printbuf_exit(&buf); 348 return ret; 349 } 350 351 static int journal_entry_btree_keys_validate(struct bch_fs *c, 352 struct jset *jset, 353 struct jset_entry *entry, 354 unsigned version, int big_endian, 355 enum bkey_invalid_flags flags) 356 { 357 struct bkey_i *k = entry->start; 358 359 while (k != vstruct_last(entry)) { 360 int ret = journal_validate_key(c, jset, entry, 361 entry->level, 362 entry->btree_id, 363 k, version, big_endian, 364 flags|BKEY_INVALID_JOURNAL); 365 if (ret == FSCK_DELETED_KEY) 366 continue; 367 368 k = bkey_next(k); 369 } 370 371 return 0; 372 } 373 374 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c, 375 struct jset_entry *entry) 376 { 377 struct bkey_i *k; 378 bool first = true; 379 380 jset_entry_for_each_key(entry, k) { 381 if (!first) { 382 prt_newline(out); 383 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]); 384 } 385 prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level); 386 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); 387 first = false; 388 } 389 } 390 391 static int journal_entry_btree_root_validate(struct bch_fs *c, 392 struct jset *jset, 393 struct jset_entry *entry, 394 unsigned version, int big_endian, 395 enum bkey_invalid_flags flags) 396 { 397 struct bkey_i *k = entry->start; 398 int ret = 0; 399 400 if (journal_entry_err_on(!entry->u64s || 401 le16_to_cpu(entry->u64s) != k->k.u64s, 402 c, version, jset, entry, 403 journal_entry_btree_root_bad_size, 404 "invalid btree root journal entry: wrong number of keys")) { 405 void *next = vstruct_next(entry); 406 /* 407 * we don't want to null out this jset_entry, 408 * just the contents, so that later we can tell 409 * we were _supposed_ to have a btree root 410 */ 411 entry->u64s = 0; 412 journal_entry_null_range(vstruct_next(entry), next); 413 return 0; 414 } 415 416 ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k, 417 version, big_endian, flags); 418 if (ret == FSCK_DELETED_KEY) 419 ret = 0; 420 fsck_err: 421 return ret; 422 } 423 424 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c, 425 struct jset_entry *entry) 426 { 427 journal_entry_btree_keys_to_text(out, c, entry); 428 } 429 430 static int journal_entry_prio_ptrs_validate(struct bch_fs *c, 431 struct jset *jset, 432 struct jset_entry *entry, 433 unsigned version, int big_endian, 434 enum bkey_invalid_flags flags) 435 { 436 /* obsolete, don't care: */ 437 return 0; 438 } 439 440 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c, 441 struct jset_entry *entry) 442 { 443 } 444 445 static int journal_entry_blacklist_validate(struct bch_fs *c, 446 struct jset *jset, 447 struct jset_entry *entry, 448 unsigned version, int big_endian, 449 enum bkey_invalid_flags flags) 450 { 451 int ret = 0; 452 453 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, 454 c, version, jset, entry, 455 journal_entry_blacklist_bad_size, 456 "invalid journal seq blacklist entry: bad size")) { 457 journal_entry_null_range(entry, vstruct_next(entry)); 458 } 459 fsck_err: 460 return ret; 461 } 462 463 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c, 464 struct jset_entry *entry) 465 { 466 struct jset_entry_blacklist *bl = 467 container_of(entry, struct jset_entry_blacklist, entry); 468 469 prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq)); 470 } 471 472 static int journal_entry_blacklist_v2_validate(struct bch_fs *c, 473 struct jset *jset, 474 struct jset_entry *entry, 475 unsigned version, int big_endian, 476 enum bkey_invalid_flags flags) 477 { 478 struct jset_entry_blacklist_v2 *bl_entry; 479 int ret = 0; 480 481 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, 482 c, version, jset, entry, 483 journal_entry_blacklist_v2_bad_size, 484 "invalid journal seq blacklist entry: bad size")) { 485 journal_entry_null_range(entry, vstruct_next(entry)); 486 goto out; 487 } 488 489 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry); 490 491 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) > 492 le64_to_cpu(bl_entry->end), 493 c, version, jset, entry, 494 journal_entry_blacklist_v2_start_past_end, 495 "invalid journal seq blacklist entry: start > end")) { 496 journal_entry_null_range(entry, vstruct_next(entry)); 497 } 498 out: 499 fsck_err: 500 return ret; 501 } 502 503 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c, 504 struct jset_entry *entry) 505 { 506 struct jset_entry_blacklist_v2 *bl = 507 container_of(entry, struct jset_entry_blacklist_v2, entry); 508 509 prt_printf(out, "start=%llu end=%llu", 510 le64_to_cpu(bl->start), 511 le64_to_cpu(bl->end)); 512 } 513 514 static int journal_entry_usage_validate(struct bch_fs *c, 515 struct jset *jset, 516 struct jset_entry *entry, 517 unsigned version, int big_endian, 518 enum bkey_invalid_flags flags) 519 { 520 struct jset_entry_usage *u = 521 container_of(entry, struct jset_entry_usage, entry); 522 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 523 int ret = 0; 524 525 if (journal_entry_err_on(bytes < sizeof(*u), 526 c, version, jset, entry, 527 journal_entry_usage_bad_size, 528 "invalid journal entry usage: bad size")) { 529 journal_entry_null_range(entry, vstruct_next(entry)); 530 return ret; 531 } 532 533 fsck_err: 534 return ret; 535 } 536 537 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c, 538 struct jset_entry *entry) 539 { 540 struct jset_entry_usage *u = 541 container_of(entry, struct jset_entry_usage, entry); 542 543 prt_printf(out, "type=%s v=%llu", 544 bch2_fs_usage_types[u->entry.btree_id], 545 le64_to_cpu(u->v)); 546 } 547 548 static int journal_entry_data_usage_validate(struct bch_fs *c, 549 struct jset *jset, 550 struct jset_entry *entry, 551 unsigned version, int big_endian, 552 enum bkey_invalid_flags flags) 553 { 554 struct jset_entry_data_usage *u = 555 container_of(entry, struct jset_entry_data_usage, entry); 556 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 557 struct printbuf err = PRINTBUF; 558 int ret = 0; 559 560 if (journal_entry_err_on(bytes < sizeof(*u) || 561 bytes < sizeof(*u) + u->r.nr_devs, 562 c, version, jset, entry, 563 journal_entry_data_usage_bad_size, 564 "invalid journal entry usage: bad size")) { 565 journal_entry_null_range(entry, vstruct_next(entry)); 566 goto out; 567 } 568 569 if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err), 570 c, version, jset, entry, 571 journal_entry_data_usage_bad_size, 572 "invalid journal entry usage: %s", err.buf)) { 573 journal_entry_null_range(entry, vstruct_next(entry)); 574 goto out; 575 } 576 out: 577 fsck_err: 578 printbuf_exit(&err); 579 return ret; 580 } 581 582 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c, 583 struct jset_entry *entry) 584 { 585 struct jset_entry_data_usage *u = 586 container_of(entry, struct jset_entry_data_usage, entry); 587 588 bch2_replicas_entry_to_text(out, &u->r); 589 prt_printf(out, "=%llu", le64_to_cpu(u->v)); 590 } 591 592 static int journal_entry_clock_validate(struct bch_fs *c, 593 struct jset *jset, 594 struct jset_entry *entry, 595 unsigned version, int big_endian, 596 enum bkey_invalid_flags flags) 597 { 598 struct jset_entry_clock *clock = 599 container_of(entry, struct jset_entry_clock, entry); 600 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 601 int ret = 0; 602 603 if (journal_entry_err_on(bytes != sizeof(*clock), 604 c, version, jset, entry, 605 journal_entry_clock_bad_size, 606 "bad size")) { 607 journal_entry_null_range(entry, vstruct_next(entry)); 608 return ret; 609 } 610 611 if (journal_entry_err_on(clock->rw > 1, 612 c, version, jset, entry, 613 journal_entry_clock_bad_rw, 614 "bad rw")) { 615 journal_entry_null_range(entry, vstruct_next(entry)); 616 return ret; 617 } 618 619 fsck_err: 620 return ret; 621 } 622 623 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c, 624 struct jset_entry *entry) 625 { 626 struct jset_entry_clock *clock = 627 container_of(entry, struct jset_entry_clock, entry); 628 629 prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time)); 630 } 631 632 static int journal_entry_dev_usage_validate(struct bch_fs *c, 633 struct jset *jset, 634 struct jset_entry *entry, 635 unsigned version, int big_endian, 636 enum bkey_invalid_flags flags) 637 { 638 struct jset_entry_dev_usage *u = 639 container_of(entry, struct jset_entry_dev_usage, entry); 640 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 641 unsigned expected = sizeof(*u); 642 unsigned dev; 643 int ret = 0; 644 645 if (journal_entry_err_on(bytes < expected, 646 c, version, jset, entry, 647 journal_entry_dev_usage_bad_size, 648 "bad size (%u < %u)", 649 bytes, expected)) { 650 journal_entry_null_range(entry, vstruct_next(entry)); 651 return ret; 652 } 653 654 dev = le32_to_cpu(u->dev); 655 656 if (journal_entry_err_on(!bch2_dev_exists2(c, dev), 657 c, version, jset, entry, 658 journal_entry_dev_usage_bad_dev, 659 "bad dev")) { 660 journal_entry_null_range(entry, vstruct_next(entry)); 661 return ret; 662 } 663 664 if (journal_entry_err_on(u->pad, 665 c, version, jset, entry, 666 journal_entry_dev_usage_bad_pad, 667 "bad pad")) { 668 journal_entry_null_range(entry, vstruct_next(entry)); 669 return ret; 670 } 671 672 fsck_err: 673 return ret; 674 } 675 676 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c, 677 struct jset_entry *entry) 678 { 679 struct jset_entry_dev_usage *u = 680 container_of(entry, struct jset_entry_dev_usage, entry); 681 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); 682 683 prt_printf(out, "dev=%u", le32_to_cpu(u->dev)); 684 685 for (i = 0; i < nr_types; i++) { 686 bch2_prt_data_type(out, i); 687 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu", 688 le64_to_cpu(u->d[i].buckets), 689 le64_to_cpu(u->d[i].sectors), 690 le64_to_cpu(u->d[i].fragmented)); 691 } 692 } 693 694 static int journal_entry_log_validate(struct bch_fs *c, 695 struct jset *jset, 696 struct jset_entry *entry, 697 unsigned version, int big_endian, 698 enum bkey_invalid_flags flags) 699 { 700 return 0; 701 } 702 703 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c, 704 struct jset_entry *entry) 705 { 706 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry); 707 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d); 708 709 prt_printf(out, "%.*s", bytes, l->d); 710 } 711 712 static int journal_entry_overwrite_validate(struct bch_fs *c, 713 struct jset *jset, 714 struct jset_entry *entry, 715 unsigned version, int big_endian, 716 enum bkey_invalid_flags flags) 717 { 718 return journal_entry_btree_keys_validate(c, jset, entry, 719 version, big_endian, READ); 720 } 721 722 static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c, 723 struct jset_entry *entry) 724 { 725 journal_entry_btree_keys_to_text(out, c, entry); 726 } 727 728 static int journal_entry_write_buffer_keys_validate(struct bch_fs *c, 729 struct jset *jset, 730 struct jset_entry *entry, 731 unsigned version, int big_endian, 732 enum bkey_invalid_flags flags) 733 { 734 return journal_entry_btree_keys_validate(c, jset, entry, 735 version, big_endian, READ); 736 } 737 738 static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c, 739 struct jset_entry *entry) 740 { 741 journal_entry_btree_keys_to_text(out, c, entry); 742 } 743 744 struct jset_entry_ops { 745 int (*validate)(struct bch_fs *, struct jset *, 746 struct jset_entry *, unsigned, int, 747 enum bkey_invalid_flags); 748 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *); 749 }; 750 751 static const struct jset_entry_ops bch2_jset_entry_ops[] = { 752 #define x(f, nr) \ 753 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \ 754 .validate = journal_entry_##f##_validate, \ 755 .to_text = journal_entry_##f##_to_text, \ 756 }, 757 BCH_JSET_ENTRY_TYPES() 758 #undef x 759 }; 760 761 int bch2_journal_entry_validate(struct bch_fs *c, 762 struct jset *jset, 763 struct jset_entry *entry, 764 unsigned version, int big_endian, 765 enum bkey_invalid_flags flags) 766 { 767 return entry->type < BCH_JSET_ENTRY_NR 768 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry, 769 version, big_endian, flags) 770 : 0; 771 } 772 773 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c, 774 struct jset_entry *entry) 775 { 776 if (entry->type < BCH_JSET_ENTRY_NR) { 777 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]); 778 bch2_jset_entry_ops[entry->type].to_text(out, c, entry); 779 } else { 780 prt_printf(out, "(unknown type %u)", entry->type); 781 } 782 } 783 784 static int jset_validate_entries(struct bch_fs *c, struct jset *jset, 785 enum bkey_invalid_flags flags) 786 { 787 unsigned version = le32_to_cpu(jset->version); 788 int ret = 0; 789 790 vstruct_for_each(jset, entry) { 791 if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset), 792 c, version, jset, entry, 793 journal_entry_past_jset_end, 794 "journal entry extends past end of jset")) { 795 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data); 796 break; 797 } 798 799 ret = bch2_journal_entry_validate(c, jset, entry, 800 version, JSET_BIG_ENDIAN(jset), flags); 801 if (ret) 802 break; 803 } 804 fsck_err: 805 return ret; 806 } 807 808 static int jset_validate(struct bch_fs *c, 809 struct bch_dev *ca, 810 struct jset *jset, u64 sector, 811 enum bkey_invalid_flags flags) 812 { 813 unsigned version; 814 int ret = 0; 815 816 if (le64_to_cpu(jset->magic) != jset_magic(c)) 817 return JOURNAL_ENTRY_NONE; 818 819 version = le32_to_cpu(jset->version); 820 if (journal_entry_err_on(!bch2_version_compatible(version), 821 c, version, jset, NULL, 822 jset_unsupported_version, 823 "%s sector %llu seq %llu: incompatible journal entry version %u.%u", 824 ca ? ca->name : c->name, 825 sector, le64_to_cpu(jset->seq), 826 BCH_VERSION_MAJOR(version), 827 BCH_VERSION_MINOR(version))) { 828 /* don't try to continue: */ 829 return -EINVAL; 830 } 831 832 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), 833 c, version, jset, NULL, 834 jset_unknown_csum, 835 "%s sector %llu seq %llu: journal entry with unknown csum type %llu", 836 ca ? ca->name : c->name, 837 sector, le64_to_cpu(jset->seq), 838 JSET_CSUM_TYPE(jset))) 839 ret = JOURNAL_ENTRY_BAD; 840 841 /* last_seq is ignored when JSET_NO_FLUSH is true */ 842 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) && 843 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), 844 c, version, jset, NULL, 845 jset_last_seq_newer_than_seq, 846 "invalid journal entry: last_seq > seq (%llu > %llu)", 847 le64_to_cpu(jset->last_seq), 848 le64_to_cpu(jset->seq))) { 849 jset->last_seq = jset->seq; 850 return JOURNAL_ENTRY_BAD; 851 } 852 853 ret = jset_validate_entries(c, jset, flags); 854 fsck_err: 855 return ret; 856 } 857 858 static int jset_validate_early(struct bch_fs *c, 859 struct bch_dev *ca, 860 struct jset *jset, u64 sector, 861 unsigned bucket_sectors_left, 862 unsigned sectors_read) 863 { 864 size_t bytes = vstruct_bytes(jset); 865 unsigned version; 866 enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL; 867 int ret = 0; 868 869 if (le64_to_cpu(jset->magic) != jset_magic(c)) 870 return JOURNAL_ENTRY_NONE; 871 872 version = le32_to_cpu(jset->version); 873 if (journal_entry_err_on(!bch2_version_compatible(version), 874 c, version, jset, NULL, 875 jset_unsupported_version, 876 "%s sector %llu seq %llu: unknown journal entry version %u.%u", 877 ca ? ca->name : c->name, 878 sector, le64_to_cpu(jset->seq), 879 BCH_VERSION_MAJOR(version), 880 BCH_VERSION_MINOR(version))) { 881 /* don't try to continue: */ 882 return -EINVAL; 883 } 884 885 if (bytes > (sectors_read << 9) && 886 sectors_read < bucket_sectors_left) 887 return JOURNAL_ENTRY_REREAD; 888 889 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, 890 c, version, jset, NULL, 891 jset_past_bucket_end, 892 "%s sector %llu seq %llu: journal entry too big (%zu bytes)", 893 ca ? ca->name : c->name, 894 sector, le64_to_cpu(jset->seq), bytes)) 895 le32_add_cpu(&jset->u64s, 896 -((bytes - (bucket_sectors_left << 9)) / 8)); 897 fsck_err: 898 return ret; 899 } 900 901 struct journal_read_buf { 902 void *data; 903 size_t size; 904 }; 905 906 static int journal_read_buf_realloc(struct journal_read_buf *b, 907 size_t new_size) 908 { 909 void *n; 910 911 /* the bios are sized for this many pages, max: */ 912 if (new_size > JOURNAL_ENTRY_SIZE_MAX) 913 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; 914 915 new_size = roundup_pow_of_two(new_size); 916 n = kvpmalloc(new_size, GFP_KERNEL); 917 if (!n) 918 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; 919 920 kvpfree(b->data, b->size); 921 b->data = n; 922 b->size = new_size; 923 return 0; 924 } 925 926 static int journal_read_bucket(struct bch_dev *ca, 927 struct journal_read_buf *buf, 928 struct journal_list *jlist, 929 unsigned bucket) 930 { 931 struct bch_fs *c = ca->fs; 932 struct journal_device *ja = &ca->journal; 933 struct jset *j = NULL; 934 unsigned sectors, sectors_read = 0; 935 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]), 936 end = offset + ca->mi.bucket_size; 937 bool saw_bad = false, csum_good; 938 struct printbuf err = PRINTBUF; 939 int ret = 0; 940 941 pr_debug("reading %u", bucket); 942 943 while (offset < end) { 944 if (!sectors_read) { 945 struct bio *bio; 946 unsigned nr_bvecs; 947 reread: 948 sectors_read = min_t(unsigned, 949 end - offset, buf->size >> 9); 950 nr_bvecs = buf_pages(buf->data, sectors_read << 9); 951 952 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); 953 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); 954 955 bio->bi_iter.bi_sector = offset; 956 bch2_bio_map(bio, buf->data, sectors_read << 9); 957 958 ret = submit_bio_wait(bio); 959 kfree(bio); 960 961 if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read, 962 "journal read error: sector %llu", 963 offset) || 964 bch2_meta_read_fault("journal")) { 965 /* 966 * We don't error out of the recovery process 967 * here, since the relevant journal entry may be 968 * found on a different device, and missing or 969 * no journal entries will be handled later 970 */ 971 goto out; 972 } 973 974 j = buf->data; 975 } 976 977 ret = jset_validate_early(c, ca, j, offset, 978 end - offset, sectors_read); 979 switch (ret) { 980 case 0: 981 sectors = vstruct_sectors(j, c->block_bits); 982 break; 983 case JOURNAL_ENTRY_REREAD: 984 if (vstruct_bytes(j) > buf->size) { 985 ret = journal_read_buf_realloc(buf, 986 vstruct_bytes(j)); 987 if (ret) 988 goto err; 989 } 990 goto reread; 991 case JOURNAL_ENTRY_NONE: 992 if (!saw_bad) 993 goto out; 994 /* 995 * On checksum error we don't really trust the size 996 * field of the journal entry we read, so try reading 997 * again at next block boundary: 998 */ 999 sectors = block_sectors(c); 1000 goto next_block; 1001 default: 1002 goto err; 1003 } 1004 1005 /* 1006 * This happens sometimes if we don't have discards on - 1007 * when we've partially overwritten a bucket with new 1008 * journal entries. We don't need the rest of the 1009 * bucket: 1010 */ 1011 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket]) 1012 goto out; 1013 1014 ja->bucket_seq[bucket] = le64_to_cpu(j->seq); 1015 1016 enum bch_csum_type csum_type = JSET_CSUM_TYPE(j); 1017 struct bch_csum csum; 1018 csum_good = jset_csum_good(c, j, &csum); 1019 1020 if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum, 1021 "%s", 1022 (printbuf_reset(&err), 1023 prt_str(&err, "journal "), 1024 bch2_csum_err_msg(&err, csum_type, j->csum, csum), 1025 err.buf))) 1026 saw_bad = true; 1027 1028 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j), 1029 j->encrypted_start, 1030 vstruct_end(j) - (void *) j->encrypted_start); 1031 bch2_fs_fatal_err_on(ret, c, 1032 "error decrypting journal entry: %s", 1033 bch2_err_str(ret)); 1034 1035 mutex_lock(&jlist->lock); 1036 ret = journal_entry_add(c, ca, (struct journal_ptr) { 1037 .csum_good = csum_good, 1038 .dev = ca->dev_idx, 1039 .bucket = bucket, 1040 .bucket_offset = offset - 1041 bucket_to_sector(ca, ja->buckets[bucket]), 1042 .sector = offset, 1043 }, jlist, j); 1044 mutex_unlock(&jlist->lock); 1045 1046 switch (ret) { 1047 case JOURNAL_ENTRY_ADD_OK: 1048 break; 1049 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE: 1050 break; 1051 default: 1052 goto err; 1053 } 1054 next_block: 1055 pr_debug("next"); 1056 offset += sectors; 1057 sectors_read -= sectors; 1058 j = ((void *) j) + (sectors << 9); 1059 } 1060 1061 out: 1062 ret = 0; 1063 err: 1064 printbuf_exit(&err); 1065 return ret; 1066 } 1067 1068 static CLOSURE_CALLBACK(bch2_journal_read_device) 1069 { 1070 closure_type(ja, struct journal_device, read); 1071 struct bch_dev *ca = container_of(ja, struct bch_dev, journal); 1072 struct bch_fs *c = ca->fs; 1073 struct journal_list *jlist = 1074 container_of(cl->parent, struct journal_list, cl); 1075 struct journal_replay *r, **_r; 1076 struct genradix_iter iter; 1077 struct journal_read_buf buf = { NULL, 0 }; 1078 unsigned i; 1079 int ret = 0; 1080 1081 if (!ja->nr) 1082 goto out; 1083 1084 ret = journal_read_buf_realloc(&buf, PAGE_SIZE); 1085 if (ret) 1086 goto err; 1087 1088 pr_debug("%u journal buckets", ja->nr); 1089 1090 for (i = 0; i < ja->nr; i++) { 1091 ret = journal_read_bucket(ca, &buf, jlist, i); 1092 if (ret) 1093 goto err; 1094 } 1095 1096 ja->sectors_free = ca->mi.bucket_size; 1097 1098 mutex_lock(&jlist->lock); 1099 genradix_for_each_reverse(&c->journal_entries, iter, _r) { 1100 r = *_r; 1101 1102 if (!r) 1103 continue; 1104 1105 for (i = 0; i < r->nr_ptrs; i++) { 1106 if (r->ptrs[i].dev == ca->dev_idx) { 1107 unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) + 1108 vstruct_sectors(&r->j, c->block_bits); 1109 1110 ja->cur_idx = r->ptrs[i].bucket; 1111 ja->sectors_free = ca->mi.bucket_size - wrote; 1112 goto found; 1113 } 1114 } 1115 } 1116 found: 1117 mutex_unlock(&jlist->lock); 1118 1119 if (ja->bucket_seq[ja->cur_idx] && 1120 ja->sectors_free == ca->mi.bucket_size) { 1121 #if 0 1122 /* 1123 * Debug code for ZNS support, where we (probably) want to be 1124 * correlated where we stopped in the journal to the zone write 1125 * points: 1126 */ 1127 bch_err(c, "ja->sectors_free == ca->mi.bucket_size"); 1128 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr); 1129 for (i = 0; i < 3; i++) { 1130 unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr; 1131 1132 bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]); 1133 } 1134 #endif 1135 ja->sectors_free = 0; 1136 } 1137 1138 /* 1139 * Set dirty_idx to indicate the entire journal is full and needs to be 1140 * reclaimed - journal reclaim will immediately reclaim whatever isn't 1141 * pinned when it first runs: 1142 */ 1143 ja->discard_idx = ja->dirty_idx_ondisk = 1144 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr; 1145 out: 1146 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); 1147 kvpfree(buf.data, buf.size); 1148 percpu_ref_put(&ca->io_ref); 1149 closure_return(cl); 1150 return; 1151 err: 1152 mutex_lock(&jlist->lock); 1153 jlist->ret = ret; 1154 mutex_unlock(&jlist->lock); 1155 goto out; 1156 } 1157 1158 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c, 1159 struct journal_replay *j) 1160 { 1161 unsigned i; 1162 1163 for (i = 0; i < j->nr_ptrs; i++) { 1164 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev); 1165 u64 offset; 1166 1167 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset); 1168 1169 if (i) 1170 prt_printf(out, " "); 1171 prt_printf(out, "%u:%u:%u (sector %llu)", 1172 j->ptrs[i].dev, 1173 j->ptrs[i].bucket, 1174 j->ptrs[i].bucket_offset, 1175 j->ptrs[i].sector); 1176 } 1177 } 1178 1179 int bch2_journal_read(struct bch_fs *c, 1180 u64 *last_seq, 1181 u64 *blacklist_seq, 1182 u64 *start_seq) 1183 { 1184 struct journal_list jlist; 1185 struct journal_replay *i, **_i, *prev = NULL; 1186 struct genradix_iter radix_iter; 1187 struct printbuf buf = PRINTBUF; 1188 bool degraded = false, last_write_torn = false; 1189 u64 seq; 1190 int ret = 0; 1191 1192 closure_init_stack(&jlist.cl); 1193 mutex_init(&jlist.lock); 1194 jlist.last_seq = 0; 1195 jlist.ret = 0; 1196 1197 for_each_member_device(c, ca) { 1198 if (!c->opts.fsck && 1199 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal))) 1200 continue; 1201 1202 if ((ca->mi.state == BCH_MEMBER_STATE_rw || 1203 ca->mi.state == BCH_MEMBER_STATE_ro) && 1204 percpu_ref_tryget(&ca->io_ref)) 1205 closure_call(&ca->journal.read, 1206 bch2_journal_read_device, 1207 system_unbound_wq, 1208 &jlist.cl); 1209 else 1210 degraded = true; 1211 } 1212 1213 closure_sync(&jlist.cl); 1214 1215 if (jlist.ret) 1216 return jlist.ret; 1217 1218 *last_seq = 0; 1219 *start_seq = 0; 1220 *blacklist_seq = 0; 1221 1222 /* 1223 * Find most recent flush entry, and ignore newer non flush entries - 1224 * those entries will be blacklisted: 1225 */ 1226 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) { 1227 enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL; 1228 1229 i = *_i; 1230 1231 if (!i || i->ignore) 1232 continue; 1233 1234 if (!*start_seq) 1235 *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1; 1236 1237 if (JSET_NO_FLUSH(&i->j)) { 1238 i->ignore = true; 1239 continue; 1240 } 1241 1242 if (!last_write_torn && !i->csum_good) { 1243 last_write_torn = true; 1244 i->ignore = true; 1245 continue; 1246 } 1247 1248 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq), 1249 c, le32_to_cpu(i->j.version), &i->j, NULL, 1250 jset_last_seq_newer_than_seq, 1251 "invalid journal entry: last_seq > seq (%llu > %llu)", 1252 le64_to_cpu(i->j.last_seq), 1253 le64_to_cpu(i->j.seq))) 1254 i->j.last_seq = i->j.seq; 1255 1256 *last_seq = le64_to_cpu(i->j.last_seq); 1257 *blacklist_seq = le64_to_cpu(i->j.seq) + 1; 1258 break; 1259 } 1260 1261 if (!*start_seq) { 1262 bch_info(c, "journal read done, but no entries found"); 1263 return 0; 1264 } 1265 1266 if (!*last_seq) { 1267 fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes, 1268 "journal read done, but no entries found after dropping non-flushes"); 1269 return 0; 1270 } 1271 1272 bch_info(c, "journal read done, replaying entries %llu-%llu", 1273 *last_seq, *blacklist_seq - 1); 1274 1275 if (*start_seq != *blacklist_seq) 1276 bch_info(c, "dropped unflushed entries %llu-%llu", 1277 *blacklist_seq, *start_seq - 1); 1278 1279 /* Drop blacklisted entries and entries older than last_seq: */ 1280 genradix_for_each(&c->journal_entries, radix_iter, _i) { 1281 i = *_i; 1282 1283 if (!i || i->ignore) 1284 continue; 1285 1286 seq = le64_to_cpu(i->j.seq); 1287 if (seq < *last_seq) { 1288 journal_replay_free(c, i); 1289 continue; 1290 } 1291 1292 if (bch2_journal_seq_is_blacklisted(c, seq, true)) { 1293 fsck_err_on(!JSET_NO_FLUSH(&i->j), c, 1294 jset_seq_blacklisted, 1295 "found blacklisted journal entry %llu", seq); 1296 i->ignore = true; 1297 } 1298 } 1299 1300 /* Check for missing entries: */ 1301 seq = *last_seq; 1302 genradix_for_each(&c->journal_entries, radix_iter, _i) { 1303 i = *_i; 1304 1305 if (!i || i->ignore) 1306 continue; 1307 1308 BUG_ON(seq > le64_to_cpu(i->j.seq)); 1309 1310 while (seq < le64_to_cpu(i->j.seq)) { 1311 u64 missing_start, missing_end; 1312 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; 1313 1314 while (seq < le64_to_cpu(i->j.seq) && 1315 bch2_journal_seq_is_blacklisted(c, seq, false)) 1316 seq++; 1317 1318 if (seq == le64_to_cpu(i->j.seq)) 1319 break; 1320 1321 missing_start = seq; 1322 1323 while (seq < le64_to_cpu(i->j.seq) && 1324 !bch2_journal_seq_is_blacklisted(c, seq, false)) 1325 seq++; 1326 1327 if (prev) { 1328 bch2_journal_ptrs_to_text(&buf1, c, prev); 1329 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits)); 1330 } else 1331 prt_printf(&buf1, "(none)"); 1332 bch2_journal_ptrs_to_text(&buf2, c, i); 1333 1334 missing_end = seq - 1; 1335 fsck_err(c, journal_entries_missing, 1336 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" 1337 " prev at %s\n" 1338 " next at %s", 1339 missing_start, missing_end, 1340 *last_seq, *blacklist_seq - 1, 1341 buf1.buf, buf2.buf); 1342 1343 printbuf_exit(&buf1); 1344 printbuf_exit(&buf2); 1345 } 1346 1347 prev = i; 1348 seq++; 1349 } 1350 1351 genradix_for_each(&c->journal_entries, radix_iter, _i) { 1352 struct bch_replicas_padded replicas = { 1353 .e.data_type = BCH_DATA_journal, 1354 .e.nr_required = 1, 1355 }; 1356 unsigned ptr; 1357 1358 i = *_i; 1359 if (!i || i->ignore) 1360 continue; 1361 1362 for (ptr = 0; ptr < i->nr_ptrs; ptr++) { 1363 struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev); 1364 1365 if (!i->ptrs[ptr].csum_good) 1366 bch_err_dev_offset(ca, i->ptrs[ptr].sector, 1367 "invalid journal checksum, seq %llu%s", 1368 le64_to_cpu(i->j.seq), 1369 i->csum_good ? " (had good copy on another device)" : ""); 1370 } 1371 1372 ret = jset_validate(c, 1373 bch_dev_bkey_exists(c, i->ptrs[0].dev), 1374 &i->j, 1375 i->ptrs[0].sector, 1376 READ); 1377 if (ret) 1378 goto err; 1379 1380 for (ptr = 0; ptr < i->nr_ptrs; ptr++) 1381 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev; 1382 1383 bch2_replicas_entry_sort(&replicas.e); 1384 1385 printbuf_reset(&buf); 1386 bch2_replicas_entry_to_text(&buf, &replicas.e); 1387 1388 if (!degraded && 1389 !bch2_replicas_marked(c, &replicas.e) && 1390 (le64_to_cpu(i->j.seq) == *last_seq || 1391 fsck_err(c, journal_entry_replicas_not_marked, 1392 "superblock not marked as containing replicas for journal entry %llu\n %s", 1393 le64_to_cpu(i->j.seq), buf.buf))) { 1394 ret = bch2_mark_replicas(c, &replicas.e); 1395 if (ret) 1396 goto err; 1397 } 1398 } 1399 err: 1400 fsck_err: 1401 printbuf_exit(&buf); 1402 return ret; 1403 } 1404 1405 /* journal write: */ 1406 1407 static void __journal_write_alloc(struct journal *j, 1408 struct journal_buf *w, 1409 struct dev_alloc_list *devs_sorted, 1410 unsigned sectors, 1411 unsigned *replicas, 1412 unsigned replicas_want) 1413 { 1414 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1415 struct journal_device *ja; 1416 struct bch_dev *ca; 1417 unsigned i; 1418 1419 if (*replicas >= replicas_want) 1420 return; 1421 1422 for (i = 0; i < devs_sorted->nr; i++) { 1423 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]); 1424 if (!ca) 1425 continue; 1426 1427 ja = &ca->journal; 1428 1429 /* 1430 * Check that we can use this device, and aren't already using 1431 * it: 1432 */ 1433 if (!ca->mi.durability || 1434 ca->mi.state != BCH_MEMBER_STATE_rw || 1435 !ja->nr || 1436 bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) || 1437 sectors > ja->sectors_free) 1438 continue; 1439 1440 bch2_dev_stripe_increment(ca, &j->wp.stripe); 1441 1442 bch2_bkey_append_ptr(&w->key, 1443 (struct bch_extent_ptr) { 1444 .offset = bucket_to_sector(ca, 1445 ja->buckets[ja->cur_idx]) + 1446 ca->mi.bucket_size - 1447 ja->sectors_free, 1448 .dev = ca->dev_idx, 1449 }); 1450 1451 ja->sectors_free -= sectors; 1452 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); 1453 1454 *replicas += ca->mi.durability; 1455 1456 if (*replicas >= replicas_want) 1457 break; 1458 } 1459 } 1460 1461 /** 1462 * journal_write_alloc - decide where to write next journal entry 1463 * 1464 * @j: journal object 1465 * @w: journal buf (entry to be written) 1466 * 1467 * Returns: 0 on success, or -EROFS on failure 1468 */ 1469 static int journal_write_alloc(struct journal *j, struct journal_buf *w) 1470 { 1471 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1472 struct bch_devs_mask devs; 1473 struct journal_device *ja; 1474 struct bch_dev *ca; 1475 struct dev_alloc_list devs_sorted; 1476 unsigned sectors = vstruct_sectors(w->data, c->block_bits); 1477 unsigned target = c->opts.metadata_target ?: 1478 c->opts.foreground_target; 1479 unsigned i, replicas = 0, replicas_want = 1480 READ_ONCE(c->opts.metadata_replicas); 1481 unsigned replicas_need = min_t(unsigned, replicas_want, 1482 READ_ONCE(c->opts.metadata_replicas_required)); 1483 1484 rcu_read_lock(); 1485 retry: 1486 devs = target_rw_devs(c, BCH_DATA_journal, target); 1487 1488 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); 1489 1490 __journal_write_alloc(j, w, &devs_sorted, 1491 sectors, &replicas, replicas_want); 1492 1493 if (replicas >= replicas_want) 1494 goto done; 1495 1496 for (i = 0; i < devs_sorted.nr; i++) { 1497 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]); 1498 if (!ca) 1499 continue; 1500 1501 ja = &ca->journal; 1502 1503 if (sectors > ja->sectors_free && 1504 sectors <= ca->mi.bucket_size && 1505 bch2_journal_dev_buckets_available(j, ja, 1506 journal_space_discarded)) { 1507 ja->cur_idx = (ja->cur_idx + 1) % ja->nr; 1508 ja->sectors_free = ca->mi.bucket_size; 1509 1510 /* 1511 * ja->bucket_seq[ja->cur_idx] must always have 1512 * something sensible: 1513 */ 1514 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); 1515 } 1516 } 1517 1518 __journal_write_alloc(j, w, &devs_sorted, 1519 sectors, &replicas, replicas_want); 1520 1521 if (replicas < replicas_want && target) { 1522 /* Retry from all devices: */ 1523 target = 0; 1524 goto retry; 1525 } 1526 done: 1527 rcu_read_unlock(); 1528 1529 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX); 1530 1531 return replicas >= replicas_need ? 0 : -EROFS; 1532 } 1533 1534 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) 1535 { 1536 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1537 1538 /* we aren't holding j->lock: */ 1539 unsigned new_size = READ_ONCE(j->buf_size_want); 1540 void *new_buf; 1541 1542 if (buf->buf_size >= new_size) 1543 return; 1544 1545 size_t btree_write_buffer_size = new_size / 64; 1546 1547 if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size)) 1548 return; 1549 1550 new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN); 1551 if (!new_buf) 1552 return; 1553 1554 memcpy(new_buf, buf->data, buf->buf_size); 1555 1556 spin_lock(&j->lock); 1557 swap(buf->data, new_buf); 1558 swap(buf->buf_size, new_size); 1559 spin_unlock(&j->lock); 1560 1561 kvpfree(new_buf, new_size); 1562 } 1563 1564 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) 1565 { 1566 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); 1567 } 1568 1569 static CLOSURE_CALLBACK(journal_write_done) 1570 { 1571 closure_type(j, struct journal, io); 1572 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1573 struct journal_buf *w = journal_last_unwritten_buf(j); 1574 struct bch_replicas_padded replicas; 1575 union journal_res_state old, new; 1576 u64 v, seq; 1577 int err = 0; 1578 1579 bch2_time_stats_update(!JSET_NO_FLUSH(w->data) 1580 ? j->flush_write_time 1581 : j->noflush_write_time, j->write_start_time); 1582 1583 if (!w->devs_written.nr) { 1584 bch_err(c, "unable to write journal to sufficient devices"); 1585 err = -EIO; 1586 } else { 1587 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, 1588 w->devs_written); 1589 if (bch2_mark_replicas(c, &replicas.e)) 1590 err = -EIO; 1591 } 1592 1593 if (err) 1594 bch2_fatal_error(c); 1595 1596 spin_lock(&j->lock); 1597 seq = le64_to_cpu(w->data->seq); 1598 1599 if (seq >= j->pin.front) 1600 journal_seq_pin(j, seq)->devs = w->devs_written; 1601 1602 if (!err) { 1603 if (!JSET_NO_FLUSH(w->data)) { 1604 j->flushed_seq_ondisk = seq; 1605 j->last_seq_ondisk = w->last_seq; 1606 1607 bch2_do_discards(c); 1608 closure_wake_up(&c->freelist_wait); 1609 1610 bch2_reset_alloc_cursors(c); 1611 } 1612 } else if (!j->err_seq || seq < j->err_seq) 1613 j->err_seq = seq; 1614 1615 j->seq_ondisk = seq; 1616 1617 /* 1618 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard 1619 * more buckets: 1620 * 1621 * Must come before signaling write completion, for 1622 * bch2_fs_journal_stop(): 1623 */ 1624 if (j->watermark != BCH_WATERMARK_stripe) 1625 journal_reclaim_kick(&c->journal); 1626 1627 /* also must come before signalling write completion: */ 1628 closure_debug_destroy(cl); 1629 1630 v = atomic64_read(&j->reservations.counter); 1631 do { 1632 old.v = new.v = v; 1633 BUG_ON(journal_state_count(new, new.unwritten_idx)); 1634 1635 new.unwritten_idx++; 1636 } while ((v = atomic64_cmpxchg(&j->reservations.counter, 1637 old.v, new.v)) != old.v); 1638 1639 bch2_journal_reclaim_fast(j); 1640 bch2_journal_space_available(j); 1641 1642 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], 1643 &j->max_in_flight_start, false); 1644 1645 closure_wake_up(&w->wait); 1646 journal_wake(j); 1647 1648 if (!journal_state_count(new, new.unwritten_idx) && 1649 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) { 1650 spin_unlock(&j->lock); 1651 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL); 1652 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) && 1653 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) { 1654 struct journal_buf *buf = journal_cur_buf(j); 1655 long delta = buf->expires - jiffies; 1656 1657 /* 1658 * We don't close a journal entry to write it while there's 1659 * previous entries still in flight - the current journal entry 1660 * might want to be written now: 1661 */ 1662 1663 spin_unlock(&j->lock); 1664 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta)); 1665 } else { 1666 spin_unlock(&j->lock); 1667 } 1668 } 1669 1670 static void journal_write_endio(struct bio *bio) 1671 { 1672 struct bch_dev *ca = bio->bi_private; 1673 struct journal *j = &ca->fs->journal; 1674 struct journal_buf *w = journal_last_unwritten_buf(j); 1675 unsigned long flags; 1676 1677 if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write, 1678 "error writing journal entry %llu: %s", 1679 le64_to_cpu(w->data->seq), 1680 bch2_blk_status_to_str(bio->bi_status)) || 1681 bch2_meta_write_fault("journal")) { 1682 spin_lock_irqsave(&j->err_lock, flags); 1683 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx); 1684 spin_unlock_irqrestore(&j->err_lock, flags); 1685 } 1686 1687 closure_put(&j->io); 1688 percpu_ref_put(&ca->io_ref); 1689 } 1690 1691 static CLOSURE_CALLBACK(do_journal_write) 1692 { 1693 closure_type(j, struct journal, io); 1694 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1695 struct bch_dev *ca; 1696 struct journal_buf *w = journal_last_unwritten_buf(j); 1697 struct bio *bio; 1698 unsigned sectors = vstruct_sectors(w->data, c->block_bits); 1699 1700 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { 1701 ca = bch_dev_bkey_exists(c, ptr->dev); 1702 if (!percpu_ref_tryget(&ca->io_ref)) { 1703 /* XXX: fix this */ 1704 bch_err(c, "missing device for journal write\n"); 1705 continue; 1706 } 1707 1708 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal], 1709 sectors); 1710 1711 bio = ca->journal.bio; 1712 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); 1713 bio->bi_iter.bi_sector = ptr->offset; 1714 bio->bi_end_io = journal_write_endio; 1715 bio->bi_private = ca; 1716 1717 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); 1718 ca->prev_journal_sector = bio->bi_iter.bi_sector; 1719 1720 if (!JSET_NO_FLUSH(w->data)) 1721 bio->bi_opf |= REQ_FUA; 1722 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush) 1723 bio->bi_opf |= REQ_PREFLUSH; 1724 1725 bch2_bio_map(bio, w->data, sectors << 9); 1726 1727 trace_and_count(c, journal_write, bio); 1728 closure_bio_submit(bio, cl); 1729 1730 ca->journal.bucket_seq[ca->journal.cur_idx] = 1731 le64_to_cpu(w->data->seq); 1732 } 1733 1734 continue_at(cl, journal_write_done, c->io_complete_wq); 1735 } 1736 1737 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) 1738 { 1739 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1740 struct jset_entry *start, *end; 1741 struct jset *jset = w->data; 1742 struct journal_keys_to_wb wb = { NULL }; 1743 unsigned sectors, bytes, u64s; 1744 unsigned long btree_roots_have = 0; 1745 bool validate_before_checksum = false; 1746 u64 seq = le64_to_cpu(jset->seq); 1747 int ret; 1748 1749 /* 1750 * Simple compaction, dropping empty jset_entries (from journal 1751 * reservations that weren't fully used) and merging jset_entries that 1752 * can be. 1753 * 1754 * If we wanted to be really fancy here, we could sort all the keys in 1755 * the jset and drop keys that were overwritten - probably not worth it: 1756 */ 1757 vstruct_for_each(jset, i) { 1758 unsigned u64s = le16_to_cpu(i->u64s); 1759 1760 /* Empty entry: */ 1761 if (!u64s) 1762 continue; 1763 1764 /* 1765 * New btree roots are set by journalling them; when the journal 1766 * entry gets written we have to propagate them to 1767 * c->btree_roots 1768 * 1769 * But, every journal entry we write has to contain all the 1770 * btree roots (at least for now); so after we copy btree roots 1771 * to c->btree_roots we have to get any missing btree roots and 1772 * add them to this journal entry: 1773 */ 1774 switch (i->type) { 1775 case BCH_JSET_ENTRY_btree_root: 1776 bch2_journal_entry_to_btree_root(c, i); 1777 __set_bit(i->btree_id, &btree_roots_have); 1778 break; 1779 case BCH_JSET_ENTRY_write_buffer_keys: 1780 EBUG_ON(!w->need_flush_to_write_buffer); 1781 1782 if (!wb.wb) 1783 bch2_journal_keys_to_write_buffer_start(c, &wb, seq); 1784 1785 struct bkey_i *k; 1786 jset_entry_for_each_key(i, k) { 1787 ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k); 1788 if (ret) { 1789 bch2_fs_fatal_error(c, "-ENOMEM flushing journal keys to btree write buffer"); 1790 bch2_journal_keys_to_write_buffer_end(c, &wb); 1791 return ret; 1792 } 1793 } 1794 i->type = BCH_JSET_ENTRY_btree_keys; 1795 break; 1796 } 1797 } 1798 1799 if (wb.wb) 1800 bch2_journal_keys_to_write_buffer_end(c, &wb); 1801 w->need_flush_to_write_buffer = false; 1802 1803 start = end = vstruct_last(jset); 1804 1805 end = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have); 1806 1807 bch2_journal_super_entries_add_common(c, &end, seq); 1808 u64s = (u64 *) end - (u64 *) start; 1809 BUG_ON(u64s > j->entry_u64s_reserved); 1810 1811 le32_add_cpu(&jset->u64s, u64s); 1812 1813 sectors = vstruct_sectors(jset, c->block_bits); 1814 bytes = vstruct_bytes(jset); 1815 1816 if (sectors > w->sectors) { 1817 bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)", 1818 vstruct_bytes(jset), w->sectors << 9, 1819 u64s, w->u64s_reserved, j->entry_u64s_reserved); 1820 return -EINVAL; 1821 } 1822 1823 jset->magic = cpu_to_le64(jset_magic(c)); 1824 jset->version = cpu_to_le32(c->sb.version); 1825 1826 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN); 1827 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c)); 1828 1829 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset)) 1830 j->last_empty_seq = seq; 1831 1832 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset))) 1833 validate_before_checksum = true; 1834 1835 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current) 1836 validate_before_checksum = true; 1837 1838 if (validate_before_checksum && 1839 (ret = jset_validate(c, NULL, jset, 0, WRITE))) 1840 return ret; 1841 1842 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), 1843 jset->encrypted_start, 1844 vstruct_end(jset) - (void *) jset->encrypted_start); 1845 if (bch2_fs_fatal_err_on(ret, c, 1846 "error decrypting journal entry: %i", ret)) 1847 return ret; 1848 1849 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), 1850 journal_nonce(jset), jset); 1851 1852 if (!validate_before_checksum && 1853 (ret = jset_validate(c, NULL, jset, 0, WRITE))) 1854 return ret; 1855 1856 memset((void *) jset + bytes, 0, (sectors << 9) - bytes); 1857 return 0; 1858 } 1859 1860 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w) 1861 { 1862 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1863 int error = bch2_journal_error(j); 1864 1865 /* 1866 * If the journal is in an error state - we did an emergency shutdown - 1867 * we prefer to continue doing journal writes. We just mark them as 1868 * noflush so they'll never be used, but they'll still be visible by the 1869 * list_journal tool - this helps in debugging. 1870 * 1871 * There's a caveat: the first journal write after marking the 1872 * superblock dirty must always be a flush write, because on startup 1873 * from a clean shutdown we didn't necessarily read the journal and the 1874 * new journal write might overwrite whatever was in the journal 1875 * previously - we can't leave the journal without any flush writes in 1876 * it. 1877 * 1878 * So if we're in an error state, and we're still starting up, we don't 1879 * write anything at all. 1880 */ 1881 if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags)) 1882 return -EIO; 1883 1884 if (error || 1885 w->noflush || 1886 (!w->must_flush && 1887 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) && 1888 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) { 1889 w->noflush = true; 1890 SET_JSET_NO_FLUSH(w->data, true); 1891 w->data->last_seq = 0; 1892 w->last_seq = 0; 1893 1894 j->nr_noflush_writes++; 1895 } else { 1896 j->last_flush_write = jiffies; 1897 j->nr_flush_writes++; 1898 clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags); 1899 } 1900 1901 return 0; 1902 } 1903 1904 CLOSURE_CALLBACK(bch2_journal_write) 1905 { 1906 closure_type(j, struct journal, io); 1907 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1908 struct journal_buf *w = journal_last_unwritten_buf(j); 1909 struct bch_replicas_padded replicas; 1910 struct bio *bio; 1911 struct printbuf journal_debug_buf = PRINTBUF; 1912 unsigned nr_rw_members = 0; 1913 int ret; 1914 1915 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); 1916 1917 j->write_start_time = local_clock(); 1918 1919 spin_lock(&j->lock); 1920 ret = bch2_journal_write_pick_flush(j, w); 1921 spin_unlock(&j->lock); 1922 if (ret) 1923 goto err; 1924 1925 mutex_lock(&j->buf_lock); 1926 journal_buf_realloc(j, w); 1927 1928 ret = bch2_journal_write_prep(j, w); 1929 mutex_unlock(&j->buf_lock); 1930 if (ret) 1931 goto err; 1932 1933 j->entry_bytes_written += vstruct_bytes(w->data); 1934 1935 while (1) { 1936 spin_lock(&j->lock); 1937 ret = journal_write_alloc(j, w); 1938 if (!ret || !j->can_discard) 1939 break; 1940 1941 spin_unlock(&j->lock); 1942 bch2_journal_do_discards(j); 1943 } 1944 1945 if (ret) { 1946 __bch2_journal_debug_to_text(&journal_debug_buf, j); 1947 spin_unlock(&j->lock); 1948 bch_err(c, "Unable to allocate journal write:\n%s", 1949 journal_debug_buf.buf); 1950 printbuf_exit(&journal_debug_buf); 1951 goto err; 1952 } 1953 1954 /* 1955 * write is allocated, no longer need to account for it in 1956 * bch2_journal_space_available(): 1957 */ 1958 w->sectors = 0; 1959 1960 /* 1961 * journal entry has been compacted and allocated, recalculate space 1962 * available: 1963 */ 1964 bch2_journal_space_available(j); 1965 spin_unlock(&j->lock); 1966 1967 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); 1968 1969 if (c->opts.nochanges) 1970 goto no_io; 1971 1972 for_each_rw_member(c, ca) 1973 nr_rw_members++; 1974 1975 if (nr_rw_members > 1) 1976 w->separate_flush = true; 1977 1978 /* 1979 * Mark journal replicas before we submit the write to guarantee 1980 * recovery will find the journal entries after a crash. 1981 */ 1982 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, 1983 w->devs_written); 1984 ret = bch2_mark_replicas(c, &replicas.e); 1985 if (ret) 1986 goto err; 1987 1988 if (!JSET_NO_FLUSH(w->data) && w->separate_flush) { 1989 for_each_rw_member(c, ca) { 1990 percpu_ref_get(&ca->io_ref); 1991 1992 bio = ca->journal.bio; 1993 bio_reset(bio, ca->disk_sb.bdev, 1994 REQ_OP_WRITE|REQ_PREFLUSH); 1995 bio->bi_end_io = journal_write_endio; 1996 bio->bi_private = ca; 1997 closure_bio_submit(bio, cl); 1998 } 1999 } 2000 2001 continue_at(cl, do_journal_write, c->io_complete_wq); 2002 return; 2003 no_io: 2004 continue_at(cl, journal_write_done, c->io_complete_wq); 2005 return; 2006 err: 2007 bch2_fatal_error(c); 2008 continue_at(cl, journal_write_done, c->io_complete_wq); 2009 } 2010