1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "alloc_background.h" 4 #include "alloc_foreground.h" 5 #include "btree_io.h" 6 #include "btree_update_interior.h" 7 #include "btree_write_buffer.h" 8 #include "buckets.h" 9 #include "checksum.h" 10 #include "disk_groups.h" 11 #include "error.h" 12 #include "journal.h" 13 #include "journal_io.h" 14 #include "journal_reclaim.h" 15 #include "journal_seq_blacklist.h" 16 #include "replicas.h" 17 #include "sb-clean.h" 18 #include "trace.h" 19 20 void bch2_journal_pos_from_member_info_set(struct bch_fs *c) 21 { 22 lockdep_assert_held(&c->sb_lock); 23 24 for_each_member_device(c, ca) { 25 struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 26 27 m->last_journal_bucket = cpu_to_le32(ca->journal.cur_idx); 28 m->last_journal_bucket_offset = cpu_to_le32(ca->mi.bucket_size - ca->journal.sectors_free); 29 } 30 } 31 32 void bch2_journal_pos_from_member_info_resume(struct bch_fs *c) 33 { 34 mutex_lock(&c->sb_lock); 35 for_each_member_device(c, ca) { 36 struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); 37 38 unsigned idx = le32_to_cpu(m.last_journal_bucket); 39 if (idx < ca->journal.nr) 40 ca->journal.cur_idx = idx; 41 unsigned offset = le32_to_cpu(m.last_journal_bucket_offset); 42 if (offset <= ca->mi.bucket_size) 43 ca->journal.sectors_free = ca->mi.bucket_size - offset; 44 } 45 mutex_unlock(&c->sb_lock); 46 } 47 48 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c, 49 struct journal_replay *j) 50 { 51 darray_for_each(j->ptrs, i) { 52 if (i != j->ptrs.data) 53 prt_printf(out, " "); 54 prt_printf(out, "%u:%u:%u (sector %llu)", 55 i->dev, i->bucket, i->bucket_offset, i->sector); 56 } 57 } 58 59 static void bch2_journal_replay_to_text(struct printbuf *out, struct bch_fs *c, 60 struct journal_replay *j) 61 { 62 prt_printf(out, "seq %llu ", le64_to_cpu(j->j.seq)); 63 64 bch2_journal_ptrs_to_text(out, c, j); 65 66 for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) { 67 struct jset_entry_datetime *datetime = 68 container_of(entry, struct jset_entry_datetime, entry); 69 bch2_prt_datetime(out, le64_to_cpu(datetime->seconds)); 70 break; 71 } 72 } 73 74 static struct nonce journal_nonce(const struct jset *jset) 75 { 76 return (struct nonce) {{ 77 [0] = 0, 78 [1] = ((__le32 *) &jset->seq)[0], 79 [2] = ((__le32 *) &jset->seq)[1], 80 [3] = BCH_NONCE_JOURNAL, 81 }}; 82 } 83 84 static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum) 85 { 86 if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) { 87 *csum = (struct bch_csum) {}; 88 return false; 89 } 90 91 *csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j); 92 return !bch2_crc_cmp(j->csum, *csum); 93 } 94 95 static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq) 96 { 97 return (seq - c->journal_entries_base_seq) & (~0U >> 1); 98 } 99 100 static void __journal_replay_free(struct bch_fs *c, 101 struct journal_replay *i) 102 { 103 struct journal_replay **p = 104 genradix_ptr(&c->journal_entries, 105 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq))); 106 107 BUG_ON(*p != i); 108 *p = NULL; 109 kvfree(i); 110 } 111 112 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i, bool blacklisted) 113 { 114 if (blacklisted) 115 i->ignore_blacklisted = true; 116 else 117 i->ignore_not_dirty = true; 118 119 if (!c->opts.read_entire_journal) 120 __journal_replay_free(c, i); 121 } 122 123 struct journal_list { 124 struct closure cl; 125 u64 last_seq; 126 struct mutex lock; 127 int ret; 128 }; 129 130 #define JOURNAL_ENTRY_ADD_OK 0 131 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5 132 133 /* 134 * Given a journal entry we just read, add it to the list of journal entries to 135 * be replayed: 136 */ 137 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, 138 struct journal_ptr entry_ptr, 139 struct journal_list *jlist, struct jset *j) 140 { 141 struct genradix_iter iter; 142 struct journal_replay **_i, *i, *dup; 143 size_t bytes = vstruct_bytes(j); 144 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0; 145 struct printbuf buf = PRINTBUF; 146 int ret = JOURNAL_ENTRY_ADD_OK; 147 148 if (!c->journal.oldest_seq_found_ondisk || 149 le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk) 150 c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq); 151 152 /* Is this entry older than the range we need? */ 153 if (!c->opts.read_entire_journal && 154 le64_to_cpu(j->seq) < jlist->last_seq) 155 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE; 156 157 /* 158 * genradixes are indexed by a ulong, not a u64, so we can't index them 159 * by sequence number directly: Assume instead that they will all fall 160 * within the range of +-2billion of the filrst one we find. 161 */ 162 if (!c->journal_entries_base_seq) 163 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); 164 165 /* Drop entries we don't need anymore */ 166 if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) { 167 genradix_for_each_from(&c->journal_entries, iter, _i, 168 journal_entry_radix_idx(c, jlist->last_seq)) { 169 i = *_i; 170 171 if (journal_replay_ignore(i)) 172 continue; 173 174 if (le64_to_cpu(i->j.seq) >= last_seq) 175 break; 176 177 journal_replay_free(c, i, false); 178 } 179 } 180 181 jlist->last_seq = max(jlist->last_seq, last_seq); 182 183 _i = genradix_ptr_alloc(&c->journal_entries, 184 journal_entry_radix_idx(c, le64_to_cpu(j->seq)), 185 GFP_KERNEL); 186 if (!_i) 187 return -BCH_ERR_ENOMEM_journal_entry_add; 188 189 /* 190 * Duplicate journal entries? If so we want the one that didn't have a 191 * checksum error: 192 */ 193 dup = *_i; 194 if (dup) { 195 bool identical = bytes == vstruct_bytes(&dup->j) && 196 !memcmp(j, &dup->j, bytes); 197 bool not_identical = !identical && 198 entry_ptr.csum_good && 199 dup->csum_good; 200 201 bool same_device = false; 202 darray_for_each(dup->ptrs, ptr) 203 if (ptr->dev == ca->dev_idx) 204 same_device = true; 205 206 ret = darray_push(&dup->ptrs, entry_ptr); 207 if (ret) 208 goto out; 209 210 bch2_journal_replay_to_text(&buf, c, dup); 211 212 fsck_err_on(same_device, 213 c, journal_entry_dup_same_device, 214 "duplicate journal entry on same device\n %s", 215 buf.buf); 216 217 fsck_err_on(not_identical, 218 c, journal_entry_replicas_data_mismatch, 219 "found duplicate but non identical journal entries\n %s", 220 buf.buf); 221 222 if (entry_ptr.csum_good && !identical) 223 goto replace; 224 225 goto out; 226 } 227 replace: 228 i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); 229 if (!i) 230 return -BCH_ERR_ENOMEM_journal_entry_add; 231 232 darray_init(&i->ptrs); 233 i->csum_good = entry_ptr.csum_good; 234 i->ignore_blacklisted = false; 235 i->ignore_not_dirty = false; 236 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct"); 237 238 if (dup) { 239 /* The first ptr should represent the jset we kept: */ 240 darray_for_each(dup->ptrs, ptr) 241 darray_push(&i->ptrs, *ptr); 242 __journal_replay_free(c, dup); 243 } else { 244 darray_push(&i->ptrs, entry_ptr); 245 } 246 247 *_i = i; 248 out: 249 fsck_err: 250 printbuf_exit(&buf); 251 return ret; 252 } 253 254 /* this fills in a range with empty jset_entries: */ 255 static void journal_entry_null_range(void *start, void *end) 256 { 257 struct jset_entry *entry; 258 259 for (entry = start; entry != end; entry = vstruct_next(entry)) 260 memset(entry, 0, sizeof(*entry)); 261 } 262 263 #define JOURNAL_ENTRY_REREAD 5 264 #define JOURNAL_ENTRY_NONE 6 265 #define JOURNAL_ENTRY_BAD 7 266 267 static void journal_entry_err_msg(struct printbuf *out, 268 u32 version, 269 struct jset *jset, 270 struct jset_entry *entry) 271 { 272 prt_str(out, "invalid journal entry, version="); 273 bch2_version_to_text(out, version); 274 275 if (entry) { 276 prt_str(out, " type="); 277 bch2_prt_jset_entry_type(out, entry->type); 278 } 279 280 if (!jset) { 281 prt_printf(out, " in superblock"); 282 } else { 283 284 prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq)); 285 286 if (entry) 287 prt_printf(out, " offset=%zi/%u", 288 (u64 *) entry - jset->_data, 289 le32_to_cpu(jset->u64s)); 290 } 291 292 prt_str(out, ": "); 293 } 294 295 #define journal_entry_err(c, version, jset, entry, _err, msg, ...) \ 296 ({ \ 297 struct printbuf _buf = PRINTBUF; \ 298 \ 299 journal_entry_err_msg(&_buf, version, jset, entry); \ 300 prt_printf(&_buf, msg, ##__VA_ARGS__); \ 301 \ 302 switch (flags & BCH_VALIDATE_write) { \ 303 case READ: \ 304 mustfix_fsck_err(c, _err, "%s", _buf.buf); \ 305 break; \ 306 case WRITE: \ 307 bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \ 308 bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\ 309 if (bch2_fs_inconsistent(c)) { \ 310 ret = -BCH_ERR_fsck_errors_not_fixed; \ 311 goto fsck_err; \ 312 } \ 313 break; \ 314 } \ 315 \ 316 printbuf_exit(&_buf); \ 317 true; \ 318 }) 319 320 #define journal_entry_err_on(cond, ...) \ 321 ((cond) ? journal_entry_err(__VA_ARGS__) : false) 322 323 #define FSCK_DELETED_KEY 5 324 325 static int journal_validate_key(struct bch_fs *c, 326 struct jset *jset, 327 struct jset_entry *entry, 328 unsigned level, enum btree_id btree_id, 329 struct bkey_i *k, 330 unsigned version, int big_endian, 331 enum bch_validate_flags flags) 332 { 333 int write = flags & BCH_VALIDATE_write; 334 void *next = vstruct_next(entry); 335 struct printbuf buf = PRINTBUF; 336 int ret = 0; 337 338 if (journal_entry_err_on(!k->k.u64s, 339 c, version, jset, entry, 340 journal_entry_bkey_u64s_0, 341 "k->u64s 0")) { 342 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); 343 journal_entry_null_range(vstruct_next(entry), next); 344 return FSCK_DELETED_KEY; 345 } 346 347 if (journal_entry_err_on((void *) bkey_next(k) > 348 (void *) vstruct_next(entry), 349 c, version, jset, entry, 350 journal_entry_bkey_past_end, 351 "extends past end of journal entry")) { 352 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); 353 journal_entry_null_range(vstruct_next(entry), next); 354 return FSCK_DELETED_KEY; 355 } 356 357 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, 358 c, version, jset, entry, 359 journal_entry_bkey_bad_format, 360 "bad format %u", k->k.format)) { 361 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); 362 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); 363 journal_entry_null_range(vstruct_next(entry), next); 364 return FSCK_DELETED_KEY; 365 } 366 367 if (!write) 368 bch2_bkey_compat(level, btree_id, version, big_endian, 369 write, NULL, bkey_to_packed(k)); 370 371 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k), 372 __btree_node_type(level, btree_id), write, &buf)) { 373 printbuf_reset(&buf); 374 journal_entry_err_msg(&buf, version, jset, entry); 375 prt_newline(&buf); 376 printbuf_indent_add(&buf, 2); 377 378 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); 379 prt_newline(&buf); 380 bch2_bkey_invalid(c, bkey_i_to_s_c(k), 381 __btree_node_type(level, btree_id), write, &buf); 382 383 mustfix_fsck_err(c, journal_entry_bkey_invalid, 384 "%s", buf.buf); 385 386 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); 387 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); 388 journal_entry_null_range(vstruct_next(entry), next); 389 390 printbuf_exit(&buf); 391 return FSCK_DELETED_KEY; 392 } 393 394 if (write) 395 bch2_bkey_compat(level, btree_id, version, big_endian, 396 write, NULL, bkey_to_packed(k)); 397 fsck_err: 398 printbuf_exit(&buf); 399 return ret; 400 } 401 402 static int journal_entry_btree_keys_validate(struct bch_fs *c, 403 struct jset *jset, 404 struct jset_entry *entry, 405 unsigned version, int big_endian, 406 enum bch_validate_flags flags) 407 { 408 struct bkey_i *k = entry->start; 409 410 while (k != vstruct_last(entry)) { 411 int ret = journal_validate_key(c, jset, entry, 412 entry->level, 413 entry->btree_id, 414 k, version, big_endian, 415 flags|BCH_VALIDATE_journal); 416 if (ret == FSCK_DELETED_KEY) 417 continue; 418 else if (ret) 419 return ret; 420 421 k = bkey_next(k); 422 } 423 424 return 0; 425 } 426 427 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c, 428 struct jset_entry *entry) 429 { 430 bool first = true; 431 432 jset_entry_for_each_key(entry, k) { 433 if (!first) { 434 prt_newline(out); 435 bch2_prt_jset_entry_type(out, entry->type); 436 prt_str(out, ": "); 437 } 438 prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level); 439 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); 440 first = false; 441 } 442 } 443 444 static int journal_entry_btree_root_validate(struct bch_fs *c, 445 struct jset *jset, 446 struct jset_entry *entry, 447 unsigned version, int big_endian, 448 enum bch_validate_flags flags) 449 { 450 struct bkey_i *k = entry->start; 451 int ret = 0; 452 453 if (journal_entry_err_on(!entry->u64s || 454 le16_to_cpu(entry->u64s) != k->k.u64s, 455 c, version, jset, entry, 456 journal_entry_btree_root_bad_size, 457 "invalid btree root journal entry: wrong number of keys")) { 458 void *next = vstruct_next(entry); 459 /* 460 * we don't want to null out this jset_entry, 461 * just the contents, so that later we can tell 462 * we were _supposed_ to have a btree root 463 */ 464 entry->u64s = 0; 465 journal_entry_null_range(vstruct_next(entry), next); 466 return 0; 467 } 468 469 ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k, 470 version, big_endian, flags); 471 if (ret == FSCK_DELETED_KEY) 472 ret = 0; 473 fsck_err: 474 return ret; 475 } 476 477 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c, 478 struct jset_entry *entry) 479 { 480 journal_entry_btree_keys_to_text(out, c, entry); 481 } 482 483 static int journal_entry_prio_ptrs_validate(struct bch_fs *c, 484 struct jset *jset, 485 struct jset_entry *entry, 486 unsigned version, int big_endian, 487 enum bch_validate_flags flags) 488 { 489 /* obsolete, don't care: */ 490 return 0; 491 } 492 493 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c, 494 struct jset_entry *entry) 495 { 496 } 497 498 static int journal_entry_blacklist_validate(struct bch_fs *c, 499 struct jset *jset, 500 struct jset_entry *entry, 501 unsigned version, int big_endian, 502 enum bch_validate_flags flags) 503 { 504 int ret = 0; 505 506 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, 507 c, version, jset, entry, 508 journal_entry_blacklist_bad_size, 509 "invalid journal seq blacklist entry: bad size")) { 510 journal_entry_null_range(entry, vstruct_next(entry)); 511 } 512 fsck_err: 513 return ret; 514 } 515 516 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c, 517 struct jset_entry *entry) 518 { 519 struct jset_entry_blacklist *bl = 520 container_of(entry, struct jset_entry_blacklist, entry); 521 522 prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq)); 523 } 524 525 static int journal_entry_blacklist_v2_validate(struct bch_fs *c, 526 struct jset *jset, 527 struct jset_entry *entry, 528 unsigned version, int big_endian, 529 enum bch_validate_flags flags) 530 { 531 struct jset_entry_blacklist_v2 *bl_entry; 532 int ret = 0; 533 534 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, 535 c, version, jset, entry, 536 journal_entry_blacklist_v2_bad_size, 537 "invalid journal seq blacklist entry: bad size")) { 538 journal_entry_null_range(entry, vstruct_next(entry)); 539 goto out; 540 } 541 542 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry); 543 544 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) > 545 le64_to_cpu(bl_entry->end), 546 c, version, jset, entry, 547 journal_entry_blacklist_v2_start_past_end, 548 "invalid journal seq blacklist entry: start > end")) { 549 journal_entry_null_range(entry, vstruct_next(entry)); 550 } 551 out: 552 fsck_err: 553 return ret; 554 } 555 556 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c, 557 struct jset_entry *entry) 558 { 559 struct jset_entry_blacklist_v2 *bl = 560 container_of(entry, struct jset_entry_blacklist_v2, entry); 561 562 prt_printf(out, "start=%llu end=%llu", 563 le64_to_cpu(bl->start), 564 le64_to_cpu(bl->end)); 565 } 566 567 static int journal_entry_usage_validate(struct bch_fs *c, 568 struct jset *jset, 569 struct jset_entry *entry, 570 unsigned version, int big_endian, 571 enum bch_validate_flags flags) 572 { 573 struct jset_entry_usage *u = 574 container_of(entry, struct jset_entry_usage, entry); 575 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 576 int ret = 0; 577 578 if (journal_entry_err_on(bytes < sizeof(*u), 579 c, version, jset, entry, 580 journal_entry_usage_bad_size, 581 "invalid journal entry usage: bad size")) { 582 journal_entry_null_range(entry, vstruct_next(entry)); 583 return ret; 584 } 585 586 fsck_err: 587 return ret; 588 } 589 590 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c, 591 struct jset_entry *entry) 592 { 593 struct jset_entry_usage *u = 594 container_of(entry, struct jset_entry_usage, entry); 595 596 prt_str(out, "type="); 597 bch2_prt_fs_usage_type(out, u->entry.btree_id); 598 prt_printf(out, " v=%llu", le64_to_cpu(u->v)); 599 } 600 601 static int journal_entry_data_usage_validate(struct bch_fs *c, 602 struct jset *jset, 603 struct jset_entry *entry, 604 unsigned version, int big_endian, 605 enum bch_validate_flags flags) 606 { 607 struct jset_entry_data_usage *u = 608 container_of(entry, struct jset_entry_data_usage, entry); 609 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 610 struct printbuf err = PRINTBUF; 611 int ret = 0; 612 613 if (journal_entry_err_on(bytes < sizeof(*u) || 614 bytes < sizeof(*u) + u->r.nr_devs, 615 c, version, jset, entry, 616 journal_entry_data_usage_bad_size, 617 "invalid journal entry usage: bad size")) { 618 journal_entry_null_range(entry, vstruct_next(entry)); 619 goto out; 620 } 621 622 if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err), 623 c, version, jset, entry, 624 journal_entry_data_usage_bad_size, 625 "invalid journal entry usage: %s", err.buf)) { 626 journal_entry_null_range(entry, vstruct_next(entry)); 627 goto out; 628 } 629 out: 630 fsck_err: 631 printbuf_exit(&err); 632 return ret; 633 } 634 635 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c, 636 struct jset_entry *entry) 637 { 638 struct jset_entry_data_usage *u = 639 container_of(entry, struct jset_entry_data_usage, entry); 640 641 bch2_replicas_entry_to_text(out, &u->r); 642 prt_printf(out, "=%llu", le64_to_cpu(u->v)); 643 } 644 645 static int journal_entry_clock_validate(struct bch_fs *c, 646 struct jset *jset, 647 struct jset_entry *entry, 648 unsigned version, int big_endian, 649 enum bch_validate_flags flags) 650 { 651 struct jset_entry_clock *clock = 652 container_of(entry, struct jset_entry_clock, entry); 653 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 654 int ret = 0; 655 656 if (journal_entry_err_on(bytes != sizeof(*clock), 657 c, version, jset, entry, 658 journal_entry_clock_bad_size, 659 "bad size")) { 660 journal_entry_null_range(entry, vstruct_next(entry)); 661 return ret; 662 } 663 664 if (journal_entry_err_on(clock->rw > 1, 665 c, version, jset, entry, 666 journal_entry_clock_bad_rw, 667 "bad rw")) { 668 journal_entry_null_range(entry, vstruct_next(entry)); 669 return ret; 670 } 671 672 fsck_err: 673 return ret; 674 } 675 676 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c, 677 struct jset_entry *entry) 678 { 679 struct jset_entry_clock *clock = 680 container_of(entry, struct jset_entry_clock, entry); 681 682 prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time)); 683 } 684 685 static int journal_entry_dev_usage_validate(struct bch_fs *c, 686 struct jset *jset, 687 struct jset_entry *entry, 688 unsigned version, int big_endian, 689 enum bch_validate_flags flags) 690 { 691 struct jset_entry_dev_usage *u = 692 container_of(entry, struct jset_entry_dev_usage, entry); 693 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); 694 unsigned expected = sizeof(*u); 695 int ret = 0; 696 697 if (journal_entry_err_on(bytes < expected, 698 c, version, jset, entry, 699 journal_entry_dev_usage_bad_size, 700 "bad size (%u < %u)", 701 bytes, expected)) { 702 journal_entry_null_range(entry, vstruct_next(entry)); 703 return ret; 704 } 705 706 if (journal_entry_err_on(u->pad, 707 c, version, jset, entry, 708 journal_entry_dev_usage_bad_pad, 709 "bad pad")) { 710 journal_entry_null_range(entry, vstruct_next(entry)); 711 return ret; 712 } 713 714 fsck_err: 715 return ret; 716 } 717 718 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c, 719 struct jset_entry *entry) 720 { 721 struct jset_entry_dev_usage *u = 722 container_of(entry, struct jset_entry_dev_usage, entry); 723 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); 724 725 prt_printf(out, "dev=%u", le32_to_cpu(u->dev)); 726 727 for (i = 0; i < nr_types; i++) { 728 bch2_prt_data_type(out, i); 729 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu", 730 le64_to_cpu(u->d[i].buckets), 731 le64_to_cpu(u->d[i].sectors), 732 le64_to_cpu(u->d[i].fragmented)); 733 } 734 } 735 736 static int journal_entry_log_validate(struct bch_fs *c, 737 struct jset *jset, 738 struct jset_entry *entry, 739 unsigned version, int big_endian, 740 enum bch_validate_flags flags) 741 { 742 return 0; 743 } 744 745 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c, 746 struct jset_entry *entry) 747 { 748 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry); 749 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d); 750 751 prt_printf(out, "%.*s", bytes, l->d); 752 } 753 754 static int journal_entry_overwrite_validate(struct bch_fs *c, 755 struct jset *jset, 756 struct jset_entry *entry, 757 unsigned version, int big_endian, 758 enum bch_validate_flags flags) 759 { 760 return journal_entry_btree_keys_validate(c, jset, entry, 761 version, big_endian, READ); 762 } 763 764 static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c, 765 struct jset_entry *entry) 766 { 767 journal_entry_btree_keys_to_text(out, c, entry); 768 } 769 770 static int journal_entry_write_buffer_keys_validate(struct bch_fs *c, 771 struct jset *jset, 772 struct jset_entry *entry, 773 unsigned version, int big_endian, 774 enum bch_validate_flags flags) 775 { 776 return journal_entry_btree_keys_validate(c, jset, entry, 777 version, big_endian, READ); 778 } 779 780 static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c, 781 struct jset_entry *entry) 782 { 783 journal_entry_btree_keys_to_text(out, c, entry); 784 } 785 786 static int journal_entry_datetime_validate(struct bch_fs *c, 787 struct jset *jset, 788 struct jset_entry *entry, 789 unsigned version, int big_endian, 790 enum bch_validate_flags flags) 791 { 792 unsigned bytes = vstruct_bytes(entry); 793 unsigned expected = 16; 794 int ret = 0; 795 796 if (journal_entry_err_on(vstruct_bytes(entry) < expected, 797 c, version, jset, entry, 798 journal_entry_dev_usage_bad_size, 799 "bad size (%u < %u)", 800 bytes, expected)) { 801 journal_entry_null_range(entry, vstruct_next(entry)); 802 return ret; 803 } 804 fsck_err: 805 return ret; 806 } 807 808 static void journal_entry_datetime_to_text(struct printbuf *out, struct bch_fs *c, 809 struct jset_entry *entry) 810 { 811 struct jset_entry_datetime *datetime = 812 container_of(entry, struct jset_entry_datetime, entry); 813 814 bch2_prt_datetime(out, le64_to_cpu(datetime->seconds)); 815 } 816 817 struct jset_entry_ops { 818 int (*validate)(struct bch_fs *, struct jset *, 819 struct jset_entry *, unsigned, int, 820 enum bch_validate_flags); 821 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *); 822 }; 823 824 static const struct jset_entry_ops bch2_jset_entry_ops[] = { 825 #define x(f, nr) \ 826 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \ 827 .validate = journal_entry_##f##_validate, \ 828 .to_text = journal_entry_##f##_to_text, \ 829 }, 830 BCH_JSET_ENTRY_TYPES() 831 #undef x 832 }; 833 834 int bch2_journal_entry_validate(struct bch_fs *c, 835 struct jset *jset, 836 struct jset_entry *entry, 837 unsigned version, int big_endian, 838 enum bch_validate_flags flags) 839 { 840 return entry->type < BCH_JSET_ENTRY_NR 841 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry, 842 version, big_endian, flags) 843 : 0; 844 } 845 846 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c, 847 struct jset_entry *entry) 848 { 849 bch2_prt_jset_entry_type(out, entry->type); 850 851 if (entry->type < BCH_JSET_ENTRY_NR) { 852 prt_str(out, ": "); 853 bch2_jset_entry_ops[entry->type].to_text(out, c, entry); 854 } 855 } 856 857 static int jset_validate_entries(struct bch_fs *c, struct jset *jset, 858 enum bch_validate_flags flags) 859 { 860 unsigned version = le32_to_cpu(jset->version); 861 int ret = 0; 862 863 vstruct_for_each(jset, entry) { 864 if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset), 865 c, version, jset, entry, 866 journal_entry_past_jset_end, 867 "journal entry extends past end of jset")) { 868 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data); 869 break; 870 } 871 872 ret = bch2_journal_entry_validate(c, jset, entry, 873 version, JSET_BIG_ENDIAN(jset), flags); 874 if (ret) 875 break; 876 } 877 fsck_err: 878 return ret; 879 } 880 881 static int jset_validate(struct bch_fs *c, 882 struct bch_dev *ca, 883 struct jset *jset, u64 sector, 884 enum bch_validate_flags flags) 885 { 886 unsigned version; 887 int ret = 0; 888 889 if (le64_to_cpu(jset->magic) != jset_magic(c)) 890 return JOURNAL_ENTRY_NONE; 891 892 version = le32_to_cpu(jset->version); 893 if (journal_entry_err_on(!bch2_version_compatible(version), 894 c, version, jset, NULL, 895 jset_unsupported_version, 896 "%s sector %llu seq %llu: incompatible journal entry version %u.%u", 897 ca ? ca->name : c->name, 898 sector, le64_to_cpu(jset->seq), 899 BCH_VERSION_MAJOR(version), 900 BCH_VERSION_MINOR(version))) { 901 /* don't try to continue: */ 902 return -EINVAL; 903 } 904 905 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), 906 c, version, jset, NULL, 907 jset_unknown_csum, 908 "%s sector %llu seq %llu: journal entry with unknown csum type %llu", 909 ca ? ca->name : c->name, 910 sector, le64_to_cpu(jset->seq), 911 JSET_CSUM_TYPE(jset))) 912 ret = JOURNAL_ENTRY_BAD; 913 914 /* last_seq is ignored when JSET_NO_FLUSH is true */ 915 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) && 916 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), 917 c, version, jset, NULL, 918 jset_last_seq_newer_than_seq, 919 "invalid journal entry: last_seq > seq (%llu > %llu)", 920 le64_to_cpu(jset->last_seq), 921 le64_to_cpu(jset->seq))) { 922 jset->last_seq = jset->seq; 923 return JOURNAL_ENTRY_BAD; 924 } 925 926 ret = jset_validate_entries(c, jset, flags); 927 fsck_err: 928 return ret; 929 } 930 931 static int jset_validate_early(struct bch_fs *c, 932 struct bch_dev *ca, 933 struct jset *jset, u64 sector, 934 unsigned bucket_sectors_left, 935 unsigned sectors_read) 936 { 937 size_t bytes = vstruct_bytes(jset); 938 unsigned version; 939 enum bch_validate_flags flags = BCH_VALIDATE_journal; 940 int ret = 0; 941 942 if (le64_to_cpu(jset->magic) != jset_magic(c)) 943 return JOURNAL_ENTRY_NONE; 944 945 version = le32_to_cpu(jset->version); 946 if (journal_entry_err_on(!bch2_version_compatible(version), 947 c, version, jset, NULL, 948 jset_unsupported_version, 949 "%s sector %llu seq %llu: unknown journal entry version %u.%u", 950 ca ? ca->name : c->name, 951 sector, le64_to_cpu(jset->seq), 952 BCH_VERSION_MAJOR(version), 953 BCH_VERSION_MINOR(version))) { 954 /* don't try to continue: */ 955 return -EINVAL; 956 } 957 958 if (bytes > (sectors_read << 9) && 959 sectors_read < bucket_sectors_left) 960 return JOURNAL_ENTRY_REREAD; 961 962 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, 963 c, version, jset, NULL, 964 jset_past_bucket_end, 965 "%s sector %llu seq %llu: journal entry too big (%zu bytes)", 966 ca ? ca->name : c->name, 967 sector, le64_to_cpu(jset->seq), bytes)) 968 le32_add_cpu(&jset->u64s, 969 -((bytes - (bucket_sectors_left << 9)) / 8)); 970 fsck_err: 971 return ret; 972 } 973 974 struct journal_read_buf { 975 void *data; 976 size_t size; 977 }; 978 979 static int journal_read_buf_realloc(struct journal_read_buf *b, 980 size_t new_size) 981 { 982 void *n; 983 984 /* the bios are sized for this many pages, max: */ 985 if (new_size > JOURNAL_ENTRY_SIZE_MAX) 986 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; 987 988 new_size = roundup_pow_of_two(new_size); 989 n = kvmalloc(new_size, GFP_KERNEL); 990 if (!n) 991 return -BCH_ERR_ENOMEM_journal_read_buf_realloc; 992 993 kvfree(b->data); 994 b->data = n; 995 b->size = new_size; 996 return 0; 997 } 998 999 static int journal_read_bucket(struct bch_dev *ca, 1000 struct journal_read_buf *buf, 1001 struct journal_list *jlist, 1002 unsigned bucket) 1003 { 1004 struct bch_fs *c = ca->fs; 1005 struct journal_device *ja = &ca->journal; 1006 struct jset *j = NULL; 1007 unsigned sectors, sectors_read = 0; 1008 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]), 1009 end = offset + ca->mi.bucket_size; 1010 bool saw_bad = false, csum_good; 1011 struct printbuf err = PRINTBUF; 1012 int ret = 0; 1013 1014 pr_debug("reading %u", bucket); 1015 1016 while (offset < end) { 1017 if (!sectors_read) { 1018 struct bio *bio; 1019 unsigned nr_bvecs; 1020 reread: 1021 sectors_read = min_t(unsigned, 1022 end - offset, buf->size >> 9); 1023 nr_bvecs = buf_pages(buf->data, sectors_read << 9); 1024 1025 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); 1026 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); 1027 1028 bio->bi_iter.bi_sector = offset; 1029 bch2_bio_map(bio, buf->data, sectors_read << 9); 1030 1031 ret = submit_bio_wait(bio); 1032 kfree(bio); 1033 1034 if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read, 1035 "journal read error: sector %llu", 1036 offset) || 1037 bch2_meta_read_fault("journal")) { 1038 /* 1039 * We don't error out of the recovery process 1040 * here, since the relevant journal entry may be 1041 * found on a different device, and missing or 1042 * no journal entries will be handled later 1043 */ 1044 goto out; 1045 } 1046 1047 j = buf->data; 1048 } 1049 1050 ret = jset_validate_early(c, ca, j, offset, 1051 end - offset, sectors_read); 1052 switch (ret) { 1053 case 0: 1054 sectors = vstruct_sectors(j, c->block_bits); 1055 break; 1056 case JOURNAL_ENTRY_REREAD: 1057 if (vstruct_bytes(j) > buf->size) { 1058 ret = journal_read_buf_realloc(buf, 1059 vstruct_bytes(j)); 1060 if (ret) 1061 goto err; 1062 } 1063 goto reread; 1064 case JOURNAL_ENTRY_NONE: 1065 if (!saw_bad) 1066 goto out; 1067 /* 1068 * On checksum error we don't really trust the size 1069 * field of the journal entry we read, so try reading 1070 * again at next block boundary: 1071 */ 1072 sectors = block_sectors(c); 1073 goto next_block; 1074 default: 1075 goto err; 1076 } 1077 1078 if (le64_to_cpu(j->seq) > ja->highest_seq_found) { 1079 ja->highest_seq_found = le64_to_cpu(j->seq); 1080 ja->cur_idx = bucket; 1081 ja->sectors_free = ca->mi.bucket_size - 1082 bucket_remainder(ca, offset) - sectors; 1083 } 1084 1085 /* 1086 * This happens sometimes if we don't have discards on - 1087 * when we've partially overwritten a bucket with new 1088 * journal entries. We don't need the rest of the 1089 * bucket: 1090 */ 1091 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket]) 1092 goto out; 1093 1094 ja->bucket_seq[bucket] = le64_to_cpu(j->seq); 1095 1096 enum bch_csum_type csum_type = JSET_CSUM_TYPE(j); 1097 struct bch_csum csum; 1098 csum_good = jset_csum_good(c, j, &csum); 1099 1100 if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum, 1101 "%s", 1102 (printbuf_reset(&err), 1103 prt_str(&err, "journal "), 1104 bch2_csum_err_msg(&err, csum_type, j->csum, csum), 1105 err.buf))) 1106 saw_bad = true; 1107 1108 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j), 1109 j->encrypted_start, 1110 vstruct_end(j) - (void *) j->encrypted_start); 1111 bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret)); 1112 1113 mutex_lock(&jlist->lock); 1114 ret = journal_entry_add(c, ca, (struct journal_ptr) { 1115 .csum_good = csum_good, 1116 .dev = ca->dev_idx, 1117 .bucket = bucket, 1118 .bucket_offset = offset - 1119 bucket_to_sector(ca, ja->buckets[bucket]), 1120 .sector = offset, 1121 }, jlist, j); 1122 mutex_unlock(&jlist->lock); 1123 1124 switch (ret) { 1125 case JOURNAL_ENTRY_ADD_OK: 1126 break; 1127 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE: 1128 break; 1129 default: 1130 goto err; 1131 } 1132 next_block: 1133 pr_debug("next"); 1134 offset += sectors; 1135 sectors_read -= sectors; 1136 j = ((void *) j) + (sectors << 9); 1137 } 1138 1139 out: 1140 ret = 0; 1141 err: 1142 printbuf_exit(&err); 1143 return ret; 1144 } 1145 1146 static CLOSURE_CALLBACK(bch2_journal_read_device) 1147 { 1148 closure_type(ja, struct journal_device, read); 1149 struct bch_dev *ca = container_of(ja, struct bch_dev, journal); 1150 struct bch_fs *c = ca->fs; 1151 struct journal_list *jlist = 1152 container_of(cl->parent, struct journal_list, cl); 1153 struct journal_read_buf buf = { NULL, 0 }; 1154 unsigned i; 1155 int ret = 0; 1156 1157 if (!ja->nr) 1158 goto out; 1159 1160 ret = journal_read_buf_realloc(&buf, PAGE_SIZE); 1161 if (ret) 1162 goto err; 1163 1164 pr_debug("%u journal buckets", ja->nr); 1165 1166 for (i = 0; i < ja->nr; i++) { 1167 ret = journal_read_bucket(ca, &buf, jlist, i); 1168 if (ret) 1169 goto err; 1170 } 1171 1172 /* 1173 * Set dirty_idx to indicate the entire journal is full and needs to be 1174 * reclaimed - journal reclaim will immediately reclaim whatever isn't 1175 * pinned when it first runs: 1176 */ 1177 ja->discard_idx = ja->dirty_idx_ondisk = 1178 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr; 1179 out: 1180 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); 1181 kvfree(buf.data); 1182 percpu_ref_put(&ca->io_ref); 1183 closure_return(cl); 1184 return; 1185 err: 1186 mutex_lock(&jlist->lock); 1187 jlist->ret = ret; 1188 mutex_unlock(&jlist->lock); 1189 goto out; 1190 } 1191 1192 int bch2_journal_read(struct bch_fs *c, 1193 u64 *last_seq, 1194 u64 *blacklist_seq, 1195 u64 *start_seq) 1196 { 1197 struct journal_list jlist; 1198 struct journal_replay *i, **_i, *prev = NULL; 1199 struct genradix_iter radix_iter; 1200 struct printbuf buf = PRINTBUF; 1201 bool degraded = false, last_write_torn = false; 1202 u64 seq; 1203 int ret = 0; 1204 1205 closure_init_stack(&jlist.cl); 1206 mutex_init(&jlist.lock); 1207 jlist.last_seq = 0; 1208 jlist.ret = 0; 1209 1210 for_each_member_device(c, ca) { 1211 if (!c->opts.fsck && 1212 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal))) 1213 continue; 1214 1215 if ((ca->mi.state == BCH_MEMBER_STATE_rw || 1216 ca->mi.state == BCH_MEMBER_STATE_ro) && 1217 percpu_ref_tryget(&ca->io_ref)) 1218 closure_call(&ca->journal.read, 1219 bch2_journal_read_device, 1220 system_unbound_wq, 1221 &jlist.cl); 1222 else 1223 degraded = true; 1224 } 1225 1226 closure_sync(&jlist.cl); 1227 1228 if (jlist.ret) 1229 return jlist.ret; 1230 1231 *last_seq = 0; 1232 *start_seq = 0; 1233 *blacklist_seq = 0; 1234 1235 /* 1236 * Find most recent flush entry, and ignore newer non flush entries - 1237 * those entries will be blacklisted: 1238 */ 1239 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) { 1240 enum bch_validate_flags flags = BCH_VALIDATE_journal; 1241 1242 i = *_i; 1243 1244 if (journal_replay_ignore(i)) 1245 continue; 1246 1247 if (!*start_seq) 1248 *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1; 1249 1250 if (JSET_NO_FLUSH(&i->j)) { 1251 i->ignore_blacklisted = true; 1252 continue; 1253 } 1254 1255 if (!last_write_torn && !i->csum_good) { 1256 last_write_torn = true; 1257 i->ignore_blacklisted = true; 1258 continue; 1259 } 1260 1261 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq), 1262 c, le32_to_cpu(i->j.version), &i->j, NULL, 1263 jset_last_seq_newer_than_seq, 1264 "invalid journal entry: last_seq > seq (%llu > %llu)", 1265 le64_to_cpu(i->j.last_seq), 1266 le64_to_cpu(i->j.seq))) 1267 i->j.last_seq = i->j.seq; 1268 1269 *last_seq = le64_to_cpu(i->j.last_seq); 1270 *blacklist_seq = le64_to_cpu(i->j.seq) + 1; 1271 break; 1272 } 1273 1274 if (!*start_seq) { 1275 bch_info(c, "journal read done, but no entries found"); 1276 return 0; 1277 } 1278 1279 if (!*last_seq) { 1280 fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes, 1281 "journal read done, but no entries found after dropping non-flushes"); 1282 return 0; 1283 } 1284 1285 bch_info(c, "journal read done, replaying entries %llu-%llu", 1286 *last_seq, *blacklist_seq - 1); 1287 1288 if (*start_seq != *blacklist_seq) 1289 bch_info(c, "dropped unflushed entries %llu-%llu", 1290 *blacklist_seq, *start_seq - 1); 1291 1292 /* Drop blacklisted entries and entries older than last_seq: */ 1293 genradix_for_each(&c->journal_entries, radix_iter, _i) { 1294 i = *_i; 1295 1296 if (journal_replay_ignore(i)) 1297 continue; 1298 1299 seq = le64_to_cpu(i->j.seq); 1300 if (seq < *last_seq) { 1301 journal_replay_free(c, i, false); 1302 continue; 1303 } 1304 1305 if (bch2_journal_seq_is_blacklisted(c, seq, true)) { 1306 fsck_err_on(!JSET_NO_FLUSH(&i->j), c, 1307 jset_seq_blacklisted, 1308 "found blacklisted journal entry %llu", seq); 1309 i->ignore_blacklisted = true; 1310 } 1311 } 1312 1313 /* Check for missing entries: */ 1314 seq = *last_seq; 1315 genradix_for_each(&c->journal_entries, radix_iter, _i) { 1316 i = *_i; 1317 1318 if (journal_replay_ignore(i)) 1319 continue; 1320 1321 BUG_ON(seq > le64_to_cpu(i->j.seq)); 1322 1323 while (seq < le64_to_cpu(i->j.seq)) { 1324 u64 missing_start, missing_end; 1325 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; 1326 1327 while (seq < le64_to_cpu(i->j.seq) && 1328 bch2_journal_seq_is_blacklisted(c, seq, false)) 1329 seq++; 1330 1331 if (seq == le64_to_cpu(i->j.seq)) 1332 break; 1333 1334 missing_start = seq; 1335 1336 while (seq < le64_to_cpu(i->j.seq) && 1337 !bch2_journal_seq_is_blacklisted(c, seq, false)) 1338 seq++; 1339 1340 if (prev) { 1341 bch2_journal_ptrs_to_text(&buf1, c, prev); 1342 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits)); 1343 } else 1344 prt_printf(&buf1, "(none)"); 1345 bch2_journal_ptrs_to_text(&buf2, c, i); 1346 1347 missing_end = seq - 1; 1348 fsck_err(c, journal_entries_missing, 1349 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" 1350 " prev at %s\n" 1351 " next at %s, continue?", 1352 missing_start, missing_end, 1353 *last_seq, *blacklist_seq - 1, 1354 buf1.buf, buf2.buf); 1355 1356 printbuf_exit(&buf1); 1357 printbuf_exit(&buf2); 1358 } 1359 1360 prev = i; 1361 seq++; 1362 } 1363 1364 genradix_for_each(&c->journal_entries, radix_iter, _i) { 1365 struct bch_replicas_padded replicas = { 1366 .e.data_type = BCH_DATA_journal, 1367 .e.nr_required = 1, 1368 }; 1369 1370 i = *_i; 1371 if (journal_replay_ignore(i)) 1372 continue; 1373 1374 darray_for_each(i->ptrs, ptr) { 1375 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); 1376 1377 if (!ptr->csum_good) 1378 bch_err_dev_offset(ca, ptr->sector, 1379 "invalid journal checksum, seq %llu%s", 1380 le64_to_cpu(i->j.seq), 1381 i->csum_good ? " (had good copy on another device)" : ""); 1382 } 1383 1384 ret = jset_validate(c, 1385 bch2_dev_have_ref(c, i->ptrs.data[0].dev), 1386 &i->j, 1387 i->ptrs.data[0].sector, 1388 READ); 1389 if (ret) 1390 goto err; 1391 1392 darray_for_each(i->ptrs, ptr) 1393 replicas.e.devs[replicas.e.nr_devs++] = ptr->dev; 1394 1395 bch2_replicas_entry_sort(&replicas.e); 1396 1397 printbuf_reset(&buf); 1398 bch2_replicas_entry_to_text(&buf, &replicas.e); 1399 1400 if (!degraded && 1401 !bch2_replicas_marked(c, &replicas.e) && 1402 (le64_to_cpu(i->j.seq) == *last_seq || 1403 fsck_err(c, journal_entry_replicas_not_marked, 1404 "superblock not marked as containing replicas for journal entry %llu\n %s", 1405 le64_to_cpu(i->j.seq), buf.buf))) { 1406 ret = bch2_mark_replicas(c, &replicas.e); 1407 if (ret) 1408 goto err; 1409 } 1410 } 1411 err: 1412 fsck_err: 1413 printbuf_exit(&buf); 1414 return ret; 1415 } 1416 1417 /* journal write: */ 1418 1419 static void __journal_write_alloc(struct journal *j, 1420 struct journal_buf *w, 1421 struct dev_alloc_list *devs_sorted, 1422 unsigned sectors, 1423 unsigned *replicas, 1424 unsigned replicas_want) 1425 { 1426 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1427 struct journal_device *ja; 1428 struct bch_dev *ca; 1429 unsigned i; 1430 1431 if (*replicas >= replicas_want) 1432 return; 1433 1434 for (i = 0; i < devs_sorted->nr; i++) { 1435 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]); 1436 if (!ca) 1437 continue; 1438 1439 ja = &ca->journal; 1440 1441 /* 1442 * Check that we can use this device, and aren't already using 1443 * it: 1444 */ 1445 if (!ca->mi.durability || 1446 ca->mi.state != BCH_MEMBER_STATE_rw || 1447 !ja->nr || 1448 bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) || 1449 sectors > ja->sectors_free) 1450 continue; 1451 1452 bch2_dev_stripe_increment(ca, &j->wp.stripe); 1453 1454 bch2_bkey_append_ptr(&w->key, 1455 (struct bch_extent_ptr) { 1456 .offset = bucket_to_sector(ca, 1457 ja->buckets[ja->cur_idx]) + 1458 ca->mi.bucket_size - 1459 ja->sectors_free, 1460 .dev = ca->dev_idx, 1461 }); 1462 1463 ja->sectors_free -= sectors; 1464 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); 1465 1466 *replicas += ca->mi.durability; 1467 1468 if (*replicas >= replicas_want) 1469 break; 1470 } 1471 } 1472 1473 /** 1474 * journal_write_alloc - decide where to write next journal entry 1475 * 1476 * @j: journal object 1477 * @w: journal buf (entry to be written) 1478 * 1479 * Returns: 0 on success, or -EROFS on failure 1480 */ 1481 static int journal_write_alloc(struct journal *j, struct journal_buf *w) 1482 { 1483 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1484 struct bch_devs_mask devs; 1485 struct journal_device *ja; 1486 struct bch_dev *ca; 1487 struct dev_alloc_list devs_sorted; 1488 unsigned sectors = vstruct_sectors(w->data, c->block_bits); 1489 unsigned target = c->opts.metadata_target ?: 1490 c->opts.foreground_target; 1491 unsigned i, replicas = 0, replicas_want = 1492 READ_ONCE(c->opts.metadata_replicas); 1493 unsigned replicas_need = min_t(unsigned, replicas_want, 1494 READ_ONCE(c->opts.metadata_replicas_required)); 1495 1496 rcu_read_lock(); 1497 retry: 1498 devs = target_rw_devs(c, BCH_DATA_journal, target); 1499 1500 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); 1501 1502 __journal_write_alloc(j, w, &devs_sorted, 1503 sectors, &replicas, replicas_want); 1504 1505 if (replicas >= replicas_want) 1506 goto done; 1507 1508 for (i = 0; i < devs_sorted.nr; i++) { 1509 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]); 1510 if (!ca) 1511 continue; 1512 1513 ja = &ca->journal; 1514 1515 if (sectors > ja->sectors_free && 1516 sectors <= ca->mi.bucket_size && 1517 bch2_journal_dev_buckets_available(j, ja, 1518 journal_space_discarded)) { 1519 ja->cur_idx = (ja->cur_idx + 1) % ja->nr; 1520 ja->sectors_free = ca->mi.bucket_size; 1521 1522 /* 1523 * ja->bucket_seq[ja->cur_idx] must always have 1524 * something sensible: 1525 */ 1526 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); 1527 } 1528 } 1529 1530 __journal_write_alloc(j, w, &devs_sorted, 1531 sectors, &replicas, replicas_want); 1532 1533 if (replicas < replicas_want && target) { 1534 /* Retry from all devices: */ 1535 target = 0; 1536 goto retry; 1537 } 1538 done: 1539 rcu_read_unlock(); 1540 1541 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX); 1542 1543 return replicas >= replicas_need ? 0 : -EROFS; 1544 } 1545 1546 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) 1547 { 1548 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1549 1550 /* we aren't holding j->lock: */ 1551 unsigned new_size = READ_ONCE(j->buf_size_want); 1552 void *new_buf; 1553 1554 if (buf->buf_size >= new_size) 1555 return; 1556 1557 size_t btree_write_buffer_size = new_size / 64; 1558 1559 if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size)) 1560 return; 1561 1562 new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN); 1563 if (!new_buf) 1564 return; 1565 1566 memcpy(new_buf, buf->data, buf->buf_size); 1567 1568 spin_lock(&j->lock); 1569 swap(buf->data, new_buf); 1570 swap(buf->buf_size, new_size); 1571 spin_unlock(&j->lock); 1572 1573 kvfree(new_buf); 1574 } 1575 1576 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) 1577 { 1578 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); 1579 } 1580 1581 static CLOSURE_CALLBACK(journal_write_done) 1582 { 1583 closure_type(w, struct journal_buf, io); 1584 struct journal *j = container_of(w, struct journal, buf[w->idx]); 1585 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1586 struct bch_replicas_padded replicas; 1587 union journal_res_state old, new; 1588 u64 v, seq = le64_to_cpu(w->data->seq); 1589 int err = 0; 1590 1591 bch2_time_stats_update(!JSET_NO_FLUSH(w->data) 1592 ? j->flush_write_time 1593 : j->noflush_write_time, j->write_start_time); 1594 1595 if (!w->devs_written.nr) { 1596 bch_err(c, "unable to write journal to sufficient devices"); 1597 err = -EIO; 1598 } else { 1599 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, 1600 w->devs_written); 1601 if (bch2_mark_replicas(c, &replicas.e)) 1602 err = -EIO; 1603 } 1604 1605 if (err) 1606 bch2_fatal_error(c); 1607 1608 closure_debug_destroy(cl); 1609 1610 spin_lock(&j->lock); 1611 if (seq >= j->pin.front) 1612 journal_seq_pin(j, seq)->devs = w->devs_written; 1613 if (err && (!j->err_seq || seq < j->err_seq)) 1614 j->err_seq = seq; 1615 w->write_done = true; 1616 1617 bool completed = false; 1618 1619 for (seq = journal_last_unwritten_seq(j); 1620 seq <= journal_cur_seq(j); 1621 seq++) { 1622 w = j->buf + (seq & JOURNAL_BUF_MASK); 1623 if (!w->write_done) 1624 break; 1625 1626 if (!j->err_seq && !JSET_NO_FLUSH(w->data)) { 1627 j->flushed_seq_ondisk = seq; 1628 j->last_seq_ondisk = w->last_seq; 1629 1630 bch2_do_discards(c); 1631 closure_wake_up(&c->freelist_wait); 1632 bch2_reset_alloc_cursors(c); 1633 } 1634 1635 j->seq_ondisk = seq; 1636 1637 /* 1638 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard 1639 * more buckets: 1640 * 1641 * Must come before signaling write completion, for 1642 * bch2_fs_journal_stop(): 1643 */ 1644 if (j->watermark != BCH_WATERMARK_stripe) 1645 journal_reclaim_kick(&c->journal); 1646 1647 v = atomic64_read(&j->reservations.counter); 1648 do { 1649 old.v = new.v = v; 1650 BUG_ON(journal_state_count(new, new.unwritten_idx)); 1651 BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK)); 1652 1653 new.unwritten_idx++; 1654 } while ((v = atomic64_cmpxchg(&j->reservations.counter, old.v, new.v)) != old.v); 1655 1656 closure_wake_up(&w->wait); 1657 completed = true; 1658 } 1659 1660 if (completed) { 1661 bch2_journal_reclaim_fast(j); 1662 bch2_journal_space_available(j); 1663 1664 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], false); 1665 1666 journal_wake(j); 1667 } 1668 1669 if (journal_last_unwritten_seq(j) == journal_cur_seq(j) && 1670 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) { 1671 struct journal_buf *buf = journal_cur_buf(j); 1672 long delta = buf->expires - jiffies; 1673 1674 /* 1675 * We don't close a journal entry to write it while there's 1676 * previous entries still in flight - the current journal entry 1677 * might want to be written now: 1678 */ 1679 mod_delayed_work(j->wq, &j->write_work, max(0L, delta)); 1680 } 1681 1682 /* 1683 * We don't typically trigger journal writes from her - the next journal 1684 * write will be triggered immediately after the previous one is 1685 * allocated, in bch2_journal_write() - but the journal write error path 1686 * is special: 1687 */ 1688 bch2_journal_do_writes(j); 1689 spin_unlock(&j->lock); 1690 } 1691 1692 static void journal_write_endio(struct bio *bio) 1693 { 1694 struct journal_bio *jbio = container_of(bio, struct journal_bio, bio); 1695 struct bch_dev *ca = jbio->ca; 1696 struct journal *j = &ca->fs->journal; 1697 struct journal_buf *w = j->buf + jbio->buf_idx; 1698 1699 if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write, 1700 "error writing journal entry %llu: %s", 1701 le64_to_cpu(w->data->seq), 1702 bch2_blk_status_to_str(bio->bi_status)) || 1703 bch2_meta_write_fault("journal")) { 1704 unsigned long flags; 1705 1706 spin_lock_irqsave(&j->err_lock, flags); 1707 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx); 1708 spin_unlock_irqrestore(&j->err_lock, flags); 1709 } 1710 1711 closure_put(&w->io); 1712 percpu_ref_put(&ca->io_ref); 1713 } 1714 1715 static CLOSURE_CALLBACK(journal_write_submit) 1716 { 1717 closure_type(w, struct journal_buf, io); 1718 struct journal *j = container_of(w, struct journal, buf[w->idx]); 1719 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1720 unsigned sectors = vstruct_sectors(w->data, c->block_bits); 1721 1722 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { 1723 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); 1724 if (!ca) { 1725 /* XXX: fix this */ 1726 bch_err(c, "missing device for journal write\n"); 1727 continue; 1728 } 1729 1730 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal], 1731 sectors); 1732 1733 struct journal_device *ja = &ca->journal; 1734 struct bio *bio = &ja->bio[w->idx]->bio; 1735 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); 1736 bio->bi_iter.bi_sector = ptr->offset; 1737 bio->bi_end_io = journal_write_endio; 1738 bio->bi_private = ca; 1739 1740 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); 1741 ca->prev_journal_sector = bio->bi_iter.bi_sector; 1742 1743 if (!JSET_NO_FLUSH(w->data)) 1744 bio->bi_opf |= REQ_FUA; 1745 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush) 1746 bio->bi_opf |= REQ_PREFLUSH; 1747 1748 bch2_bio_map(bio, w->data, sectors << 9); 1749 1750 trace_and_count(c, journal_write, bio); 1751 closure_bio_submit(bio, cl); 1752 1753 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); 1754 } 1755 1756 continue_at(cl, journal_write_done, j->wq); 1757 } 1758 1759 static CLOSURE_CALLBACK(journal_write_preflush) 1760 { 1761 closure_type(w, struct journal_buf, io); 1762 struct journal *j = container_of(w, struct journal, buf[w->idx]); 1763 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1764 1765 if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { 1766 spin_lock(&j->lock); 1767 if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { 1768 closure_wait(&j->async_wait, cl); 1769 spin_unlock(&j->lock); 1770 continue_at(cl, journal_write_preflush, j->wq); 1771 return; 1772 } 1773 spin_unlock(&j->lock); 1774 } 1775 1776 if (w->separate_flush) { 1777 for_each_rw_member(c, ca) { 1778 percpu_ref_get(&ca->io_ref); 1779 1780 struct journal_device *ja = &ca->journal; 1781 struct bio *bio = &ja->bio[w->idx]->bio; 1782 bio_reset(bio, ca->disk_sb.bdev, 1783 REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH); 1784 bio->bi_end_io = journal_write_endio; 1785 bio->bi_private = ca; 1786 closure_bio_submit(bio, cl); 1787 } 1788 1789 continue_at(cl, journal_write_submit, j->wq); 1790 } else { 1791 /* 1792 * no need to punt to another work item if we're not waiting on 1793 * preflushes 1794 */ 1795 journal_write_submit(&cl->work); 1796 } 1797 } 1798 1799 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) 1800 { 1801 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1802 struct jset_entry *start, *end; 1803 struct jset *jset = w->data; 1804 struct journal_keys_to_wb wb = { NULL }; 1805 unsigned sectors, bytes, u64s; 1806 unsigned long btree_roots_have = 0; 1807 bool validate_before_checksum = false; 1808 u64 seq = le64_to_cpu(jset->seq); 1809 int ret; 1810 1811 /* 1812 * Simple compaction, dropping empty jset_entries (from journal 1813 * reservations that weren't fully used) and merging jset_entries that 1814 * can be. 1815 * 1816 * If we wanted to be really fancy here, we could sort all the keys in 1817 * the jset and drop keys that were overwritten - probably not worth it: 1818 */ 1819 vstruct_for_each(jset, i) { 1820 unsigned u64s = le16_to_cpu(i->u64s); 1821 1822 /* Empty entry: */ 1823 if (!u64s) 1824 continue; 1825 1826 /* 1827 * New btree roots are set by journalling them; when the journal 1828 * entry gets written we have to propagate them to 1829 * c->btree_roots 1830 * 1831 * But, every journal entry we write has to contain all the 1832 * btree roots (at least for now); so after we copy btree roots 1833 * to c->btree_roots we have to get any missing btree roots and 1834 * add them to this journal entry: 1835 */ 1836 switch (i->type) { 1837 case BCH_JSET_ENTRY_btree_root: 1838 bch2_journal_entry_to_btree_root(c, i); 1839 __set_bit(i->btree_id, &btree_roots_have); 1840 break; 1841 case BCH_JSET_ENTRY_write_buffer_keys: 1842 EBUG_ON(!w->need_flush_to_write_buffer); 1843 1844 if (!wb.wb) 1845 bch2_journal_keys_to_write_buffer_start(c, &wb, seq); 1846 1847 jset_entry_for_each_key(i, k) { 1848 ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k); 1849 if (ret) { 1850 bch2_fs_fatal_error(c, "flushing journal keys to btree write buffer: %s", 1851 bch2_err_str(ret)); 1852 bch2_journal_keys_to_write_buffer_end(c, &wb); 1853 return ret; 1854 } 1855 } 1856 i->type = BCH_JSET_ENTRY_btree_keys; 1857 break; 1858 } 1859 } 1860 1861 if (wb.wb) 1862 bch2_journal_keys_to_write_buffer_end(c, &wb); 1863 1864 spin_lock(&c->journal.lock); 1865 w->need_flush_to_write_buffer = false; 1866 spin_unlock(&c->journal.lock); 1867 1868 start = end = vstruct_last(jset); 1869 1870 end = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have); 1871 1872 struct jset_entry_datetime *d = 1873 container_of(jset_entry_init(&end, sizeof(*d)), struct jset_entry_datetime, entry); 1874 d->entry.type = BCH_JSET_ENTRY_datetime; 1875 d->seconds = cpu_to_le64(ktime_get_real_seconds()); 1876 1877 bch2_journal_super_entries_add_common(c, &end, seq); 1878 u64s = (u64 *) end - (u64 *) start; 1879 1880 WARN_ON(u64s > j->entry_u64s_reserved); 1881 1882 le32_add_cpu(&jset->u64s, u64s); 1883 1884 sectors = vstruct_sectors(jset, c->block_bits); 1885 bytes = vstruct_bytes(jset); 1886 1887 if (sectors > w->sectors) { 1888 bch2_fs_fatal_error(c, ": journal write overran available space, %zu > %u (extra %u reserved %u/%u)", 1889 vstruct_bytes(jset), w->sectors << 9, 1890 u64s, w->u64s_reserved, j->entry_u64s_reserved); 1891 return -EINVAL; 1892 } 1893 1894 jset->magic = cpu_to_le64(jset_magic(c)); 1895 jset->version = cpu_to_le32(c->sb.version); 1896 1897 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN); 1898 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c)); 1899 1900 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset)) 1901 j->last_empty_seq = seq; 1902 1903 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset))) 1904 validate_before_checksum = true; 1905 1906 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current) 1907 validate_before_checksum = true; 1908 1909 if (validate_before_checksum && 1910 (ret = jset_validate(c, NULL, jset, 0, WRITE))) 1911 return ret; 1912 1913 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), 1914 jset->encrypted_start, 1915 vstruct_end(jset) - (void *) jset->encrypted_start); 1916 if (bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret))) 1917 return ret; 1918 1919 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), 1920 journal_nonce(jset), jset); 1921 1922 if (!validate_before_checksum && 1923 (ret = jset_validate(c, NULL, jset, 0, WRITE))) 1924 return ret; 1925 1926 memset((void *) jset + bytes, 0, (sectors << 9) - bytes); 1927 return 0; 1928 } 1929 1930 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w) 1931 { 1932 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1933 int error = bch2_journal_error(j); 1934 1935 /* 1936 * If the journal is in an error state - we did an emergency shutdown - 1937 * we prefer to continue doing journal writes. We just mark them as 1938 * noflush so they'll never be used, but they'll still be visible by the 1939 * list_journal tool - this helps in debugging. 1940 * 1941 * There's a caveat: the first journal write after marking the 1942 * superblock dirty must always be a flush write, because on startup 1943 * from a clean shutdown we didn't necessarily read the journal and the 1944 * new journal write might overwrite whatever was in the journal 1945 * previously - we can't leave the journal without any flush writes in 1946 * it. 1947 * 1948 * So if we're in an error state, and we're still starting up, we don't 1949 * write anything at all. 1950 */ 1951 if (error && test_bit(JOURNAL_need_flush_write, &j->flags)) 1952 return -EIO; 1953 1954 if (error || 1955 w->noflush || 1956 (!w->must_flush && 1957 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) && 1958 test_bit(JOURNAL_may_skip_flush, &j->flags))) { 1959 w->noflush = true; 1960 SET_JSET_NO_FLUSH(w->data, true); 1961 w->data->last_seq = 0; 1962 w->last_seq = 0; 1963 1964 j->nr_noflush_writes++; 1965 } else { 1966 w->must_flush = true; 1967 j->last_flush_write = jiffies; 1968 j->nr_flush_writes++; 1969 clear_bit(JOURNAL_need_flush_write, &j->flags); 1970 } 1971 1972 return 0; 1973 } 1974 1975 CLOSURE_CALLBACK(bch2_journal_write) 1976 { 1977 closure_type(w, struct journal_buf, io); 1978 struct journal *j = container_of(w, struct journal, buf[w->idx]); 1979 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1980 struct bch_replicas_padded replicas; 1981 unsigned nr_rw_members = 0; 1982 int ret; 1983 1984 for_each_rw_member(c, ca) 1985 nr_rw_members++; 1986 1987 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); 1988 BUG_ON(!w->write_started); 1989 BUG_ON(w->write_allocated); 1990 BUG_ON(w->write_done); 1991 1992 j->write_start_time = local_clock(); 1993 1994 spin_lock(&j->lock); 1995 if (nr_rw_members > 1) 1996 w->separate_flush = true; 1997 1998 ret = bch2_journal_write_pick_flush(j, w); 1999 spin_unlock(&j->lock); 2000 if (ret) 2001 goto err; 2002 2003 mutex_lock(&j->buf_lock); 2004 journal_buf_realloc(j, w); 2005 2006 ret = bch2_journal_write_prep(j, w); 2007 mutex_unlock(&j->buf_lock); 2008 if (ret) 2009 goto err; 2010 2011 j->entry_bytes_written += vstruct_bytes(w->data); 2012 2013 while (1) { 2014 spin_lock(&j->lock); 2015 ret = journal_write_alloc(j, w); 2016 if (!ret || !j->can_discard) 2017 break; 2018 2019 spin_unlock(&j->lock); 2020 bch2_journal_do_discards(j); 2021 } 2022 2023 if (ret) { 2024 struct printbuf buf = PRINTBUF; 2025 buf.atomic++; 2026 2027 prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write: %s"), 2028 bch2_err_str(ret)); 2029 __bch2_journal_debug_to_text(&buf, j); 2030 spin_unlock(&j->lock); 2031 bch2_print_string_as_lines(KERN_ERR, buf.buf); 2032 printbuf_exit(&buf); 2033 goto err; 2034 } 2035 2036 /* 2037 * write is allocated, no longer need to account for it in 2038 * bch2_journal_space_available(): 2039 */ 2040 w->sectors = 0; 2041 w->write_allocated = true; 2042 2043 /* 2044 * journal entry has been compacted and allocated, recalculate space 2045 * available: 2046 */ 2047 bch2_journal_space_available(j); 2048 bch2_journal_do_writes(j); 2049 spin_unlock(&j->lock); 2050 2051 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); 2052 2053 if (c->opts.nochanges) 2054 goto no_io; 2055 2056 /* 2057 * Mark journal replicas before we submit the write to guarantee 2058 * recovery will find the journal entries after a crash. 2059 */ 2060 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, 2061 w->devs_written); 2062 ret = bch2_mark_replicas(c, &replicas.e); 2063 if (ret) 2064 goto err; 2065 2066 if (!JSET_NO_FLUSH(w->data)) 2067 continue_at(cl, journal_write_preflush, j->wq); 2068 else 2069 continue_at(cl, journal_write_submit, j->wq); 2070 return; 2071 no_io: 2072 continue_at(cl, journal_write_done, j->wq); 2073 return; 2074 err: 2075 bch2_fatal_error(c); 2076 continue_at(cl, journal_write_done, j->wq); 2077 } 2078