1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "btree_gc.h" 5 #include "btree_io.h" 6 #include "btree_iter.h" 7 #include "btree_journal_iter.h" 8 #include "btree_key_cache.h" 9 #include "btree_update_interior.h" 10 #include "btree_write_buffer.h" 11 #include "buckets.h" 12 #include "errcode.h" 13 #include "error.h" 14 #include "journal.h" 15 #include "journal_reclaim.h" 16 #include "replicas.h" 17 #include "snapshot.h" 18 19 #include <linux/prefetch.h> 20 21 static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i) 22 { 23 #ifdef CONFIG_BCACHEFS_DEBUG 24 struct bch_fs *c = trans->c; 25 struct bkey u; 26 struct bkey_s_c k = bch2_btree_path_peek_slot_exact(i->path, &u); 27 28 if (unlikely(trans->journal_replay_not_finished)) { 29 struct bkey_i *j_k = 30 bch2_journal_keys_peek_slot(c, i->btree_id, i->level, i->k->k.p); 31 32 if (j_k) 33 k = bkey_i_to_s_c(j_k); 34 } 35 36 u = *k.k; 37 u.needs_whiteout = i->old_k.needs_whiteout; 38 39 BUG_ON(memcmp(&i->old_k, &u, sizeof(struct bkey))); 40 BUG_ON(i->old_v != k.v); 41 #endif 42 } 43 44 static inline struct btree_path_level *insert_l(struct btree_insert_entry *i) 45 { 46 return i->path->l + i->level; 47 } 48 49 static inline bool same_leaf_as_prev(struct btree_trans *trans, 50 struct btree_insert_entry *i) 51 { 52 return i != trans->updates && 53 insert_l(&i[0])->b == insert_l(&i[-1])->b; 54 } 55 56 static inline bool same_leaf_as_next(struct btree_trans *trans, 57 struct btree_insert_entry *i) 58 { 59 return i + 1 < trans->updates + trans->nr_updates && 60 insert_l(&i[0])->b == insert_l(&i[1])->b; 61 } 62 63 inline void bch2_btree_node_prep_for_write(struct btree_trans *trans, 64 struct btree_path *path, 65 struct btree *b) 66 { 67 struct bch_fs *c = trans->c; 68 69 if (unlikely(btree_node_just_written(b)) && 70 bch2_btree_post_write_cleanup(c, b)) 71 bch2_trans_node_reinit_iter(trans, b); 72 73 /* 74 * If the last bset has been written, or if it's gotten too big - start 75 * a new bset to insert into: 76 */ 77 if (want_new_bset(c, b)) 78 bch2_btree_init_next(trans, b); 79 } 80 81 static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i) 82 { 83 while (--i >= trans->updates) { 84 if (same_leaf_as_prev(trans, i)) 85 continue; 86 87 bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b); 88 } 89 90 trace_and_count(trans->c, trans_restart_would_deadlock_write, trans); 91 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write); 92 } 93 94 static inline int bch2_trans_lock_write(struct btree_trans *trans) 95 { 96 struct btree_insert_entry *i; 97 98 EBUG_ON(trans->write_locked); 99 100 trans_for_each_update(trans, i) { 101 if (same_leaf_as_prev(trans, i)) 102 continue; 103 104 if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c)) 105 return trans_lock_write_fail(trans, i); 106 107 if (!i->cached) 108 bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b); 109 } 110 111 trans->write_locked = true; 112 return 0; 113 } 114 115 static inline void bch2_trans_unlock_write(struct btree_trans *trans) 116 { 117 if (likely(trans->write_locked)) { 118 struct btree_insert_entry *i; 119 120 trans_for_each_update(trans, i) 121 if (!same_leaf_as_prev(trans, i)) 122 bch2_btree_node_unlock_write_inlined(trans, i->path, 123 insert_l(i)->b); 124 trans->write_locked = false; 125 } 126 } 127 128 /* Inserting into a given leaf node (last stage of insert): */ 129 130 /* Handle overwrites and do insert, for non extents: */ 131 bool bch2_btree_bset_insert_key(struct btree_trans *trans, 132 struct btree_path *path, 133 struct btree *b, 134 struct btree_node_iter *node_iter, 135 struct bkey_i *insert) 136 { 137 struct bkey_packed *k; 138 unsigned clobber_u64s = 0, new_u64s = 0; 139 140 EBUG_ON(btree_node_just_written(b)); 141 EBUG_ON(bset_written(b, btree_bset_last(b))); 142 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k)); 143 EBUG_ON(bpos_lt(insert->k.p, b->data->min_key)); 144 EBUG_ON(bpos_gt(insert->k.p, b->data->max_key)); 145 EBUG_ON(insert->k.u64s > 146 bch_btree_keys_u64s_remaining(trans->c, b)); 147 EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos)); 148 149 k = bch2_btree_node_iter_peek_all(node_iter, b); 150 if (k && bkey_cmp_left_packed(b, k, &insert->k.p)) 151 k = NULL; 152 153 /* @k is the key being overwritten/deleted, if any: */ 154 EBUG_ON(k && bkey_deleted(k)); 155 156 /* Deleting, but not found? nothing to do: */ 157 if (bkey_deleted(&insert->k) && !k) 158 return false; 159 160 if (bkey_deleted(&insert->k)) { 161 /* Deleting: */ 162 btree_account_key_drop(b, k); 163 k->type = KEY_TYPE_deleted; 164 165 if (k->needs_whiteout) 166 push_whiteout(trans->c, b, insert->k.p); 167 k->needs_whiteout = false; 168 169 if (k >= btree_bset_last(b)->start) { 170 clobber_u64s = k->u64s; 171 bch2_bset_delete(b, k, clobber_u64s); 172 goto fix_iter; 173 } else { 174 bch2_btree_path_fix_key_modified(trans, b, k); 175 } 176 177 return true; 178 } 179 180 if (k) { 181 /* Overwriting: */ 182 btree_account_key_drop(b, k); 183 k->type = KEY_TYPE_deleted; 184 185 insert->k.needs_whiteout = k->needs_whiteout; 186 k->needs_whiteout = false; 187 188 if (k >= btree_bset_last(b)->start) { 189 clobber_u64s = k->u64s; 190 goto overwrite; 191 } else { 192 bch2_btree_path_fix_key_modified(trans, b, k); 193 } 194 } 195 196 k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b)); 197 overwrite: 198 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s); 199 new_u64s = k->u64s; 200 fix_iter: 201 if (clobber_u64s != new_u64s) 202 bch2_btree_node_iter_fix(trans, path, b, node_iter, k, 203 clobber_u64s, new_u64s); 204 return true; 205 } 206 207 static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin, 208 unsigned i, u64 seq) 209 { 210 struct bch_fs *c = container_of(j, struct bch_fs, journal); 211 struct btree_write *w = container_of(pin, struct btree_write, journal); 212 struct btree *b = container_of(w, struct btree, writes[i]); 213 struct btree_trans *trans = bch2_trans_get(c); 214 unsigned long old, new, v; 215 unsigned idx = w - b->writes; 216 217 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 218 v = READ_ONCE(b->flags); 219 220 do { 221 old = new = v; 222 223 if (!(old & (1 << BTREE_NODE_dirty)) || 224 !!(old & (1 << BTREE_NODE_write_idx)) != idx || 225 w->journal.seq != seq) 226 break; 227 228 new &= ~BTREE_WRITE_TYPE_MASK; 229 new |= BTREE_WRITE_journal_reclaim; 230 new |= 1 << BTREE_NODE_need_write; 231 } while ((v = cmpxchg(&b->flags, old, new)) != old); 232 233 btree_node_write_if_need(c, b, SIX_LOCK_read); 234 six_unlock_read(&b->c.lock); 235 236 bch2_trans_put(trans); 237 return 0; 238 } 239 240 int bch2_btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq) 241 { 242 return __btree_node_flush(j, pin, 0, seq); 243 } 244 245 int bch2_btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq) 246 { 247 return __btree_node_flush(j, pin, 1, seq); 248 } 249 250 inline void bch2_btree_add_journal_pin(struct bch_fs *c, 251 struct btree *b, u64 seq) 252 { 253 struct btree_write *w = btree_current_write(b); 254 255 bch2_journal_pin_add(&c->journal, seq, &w->journal, 256 btree_node_write_idx(b) == 0 257 ? bch2_btree_node_flush0 258 : bch2_btree_node_flush1); 259 } 260 261 /** 262 * bch2_btree_insert_key_leaf() - insert a key one key into a leaf node 263 * @trans: btree transaction object 264 * @path: path pointing to @insert's pos 265 * @insert: key to insert 266 * @journal_seq: sequence number of journal reservation 267 */ 268 inline void bch2_btree_insert_key_leaf(struct btree_trans *trans, 269 struct btree_path *path, 270 struct bkey_i *insert, 271 u64 journal_seq) 272 { 273 struct bch_fs *c = trans->c; 274 struct btree *b = path_l(path)->b; 275 struct bset_tree *t = bset_tree_last(b); 276 struct bset *i = bset(b, t); 277 int old_u64s = bset_u64s(t); 278 int old_live_u64s = b->nr.live_u64s; 279 int live_u64s_added, u64s_added; 280 281 if (unlikely(!bch2_btree_bset_insert_key(trans, path, b, 282 &path_l(path)->iter, insert))) 283 return; 284 285 i->journal_seq = cpu_to_le64(max(journal_seq, le64_to_cpu(i->journal_seq))); 286 287 bch2_btree_add_journal_pin(c, b, journal_seq); 288 289 if (unlikely(!btree_node_dirty(b))) { 290 EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags)); 291 set_btree_node_dirty_acct(c, b); 292 } 293 294 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; 295 u64s_added = (int) bset_u64s(t) - old_u64s; 296 297 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0) 298 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added); 299 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0) 300 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added); 301 302 if (u64s_added > live_u64s_added && 303 bch2_maybe_compact_whiteouts(c, b)) 304 bch2_trans_node_reinit_iter(trans, b); 305 } 306 307 /* Cached btree updates: */ 308 309 /* Normal update interface: */ 310 311 static inline void btree_insert_entry_checks(struct btree_trans *trans, 312 struct btree_insert_entry *i) 313 { 314 BUG_ON(!bpos_eq(i->k->k.p, i->path->pos)); 315 BUG_ON(i->cached != i->path->cached); 316 BUG_ON(i->level != i->path->level); 317 BUG_ON(i->btree_id != i->path->btree_id); 318 EBUG_ON(!i->level && 319 btree_type_has_snapshots(i->btree_id) && 320 !(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) && 321 test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) && 322 i->k->k.p.snapshot && 323 bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot)); 324 } 325 326 static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans, 327 unsigned flags) 328 { 329 return bch2_journal_res_get(&trans->c->journal, &trans->journal_res, 330 trans->journal_u64s, flags); 331 } 332 333 #define JSET_ENTRY_LOG_U64s 4 334 335 static noinline void journal_transaction_name(struct btree_trans *trans) 336 { 337 struct bch_fs *c = trans->c; 338 struct journal *j = &c->journal; 339 struct jset_entry *entry = 340 bch2_journal_add_entry(j, &trans->journal_res, 341 BCH_JSET_ENTRY_log, 0, 0, 342 JSET_ENTRY_LOG_U64s); 343 struct jset_entry_log *l = 344 container_of(entry, struct jset_entry_log, entry); 345 346 strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64)); 347 } 348 349 static inline int btree_key_can_insert(struct btree_trans *trans, 350 struct btree *b, unsigned u64s) 351 { 352 struct bch_fs *c = trans->c; 353 354 if (!bch2_btree_node_insert_fits(c, b, u64s)) 355 return -BCH_ERR_btree_insert_btree_node_full; 356 357 return 0; 358 } 359 360 noinline static int 361 btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags, 362 struct btree_path *path, unsigned new_u64s) 363 { 364 struct bch_fs *c = trans->c; 365 struct btree_insert_entry *i; 366 struct bkey_cached *ck = (void *) path->l[0].b; 367 struct bkey_i *new_k; 368 int ret; 369 370 bch2_trans_unlock_write(trans); 371 bch2_trans_unlock(trans); 372 373 new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL); 374 if (!new_k) { 375 bch_err(c, "error allocating memory for key cache key, btree %s u64s %u", 376 bch2_btree_id_str(path->btree_id), new_u64s); 377 return -BCH_ERR_ENOMEM_btree_key_cache_insert; 378 } 379 380 ret = bch2_trans_relock(trans) ?: 381 bch2_trans_lock_write(trans); 382 if (unlikely(ret)) { 383 kfree(new_k); 384 return ret; 385 } 386 387 memcpy(new_k, ck->k, ck->u64s * sizeof(u64)); 388 389 trans_for_each_update(trans, i) 390 if (i->old_v == &ck->k->v) 391 i->old_v = &new_k->v; 392 393 kfree(ck->k); 394 ck->u64s = new_u64s; 395 ck->k = new_k; 396 return 0; 397 } 398 399 static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags, 400 struct btree_path *path, unsigned u64s) 401 { 402 struct bch_fs *c = trans->c; 403 struct bkey_cached *ck = (void *) path->l[0].b; 404 struct btree_insert_entry *i; 405 unsigned new_u64s; 406 struct bkey_i *new_k; 407 408 EBUG_ON(path->level); 409 410 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) && 411 bch2_btree_key_cache_must_wait(c) && 412 !(flags & BTREE_INSERT_JOURNAL_RECLAIM)) 413 return -BCH_ERR_btree_insert_need_journal_reclaim; 414 415 /* 416 * bch2_varint_decode can read past the end of the buffer by at most 7 417 * bytes (it won't be used): 418 */ 419 u64s += 1; 420 421 if (u64s <= ck->u64s) 422 return 0; 423 424 new_u64s = roundup_pow_of_two(u64s); 425 new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT); 426 if (unlikely(!new_k)) 427 return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s); 428 429 trans_for_each_update(trans, i) 430 if (i->old_v == &ck->k->v) 431 i->old_v = &new_k->v; 432 433 ck->u64s = new_u64s; 434 ck->k = new_k; 435 return 0; 436 } 437 438 /* Triggers: */ 439 440 static int run_one_mem_trigger(struct btree_trans *trans, 441 struct btree_insert_entry *i, 442 unsigned flags) 443 { 444 struct bkey_s_c old = { &i->old_k, i->old_v }; 445 struct bkey_i *new = i->k; 446 const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type); 447 const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type); 448 int ret; 449 450 verify_update_old_key(trans, i); 451 452 if (unlikely(flags & BTREE_TRIGGER_NORUN)) 453 return 0; 454 455 if (!btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id))) 456 return 0; 457 458 if (old_ops->atomic_trigger == new_ops->atomic_trigger) { 459 ret = bch2_mark_key(trans, i->btree_id, i->level, 460 old, bkey_i_to_s_c(new), 461 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags); 462 } else { 463 struct bkey _deleted = KEY(0, 0, 0); 464 struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL }; 465 466 _deleted.p = i->path->pos; 467 468 ret = bch2_mark_key(trans, i->btree_id, i->level, 469 deleted, bkey_i_to_s_c(new), 470 BTREE_TRIGGER_INSERT|flags) ?: 471 bch2_mark_key(trans, i->btree_id, i->level, 472 old, deleted, 473 BTREE_TRIGGER_OVERWRITE|flags); 474 } 475 476 return ret; 477 } 478 479 static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i, 480 bool overwrite) 481 { 482 /* 483 * Transactional triggers create new btree_insert_entries, so we can't 484 * pass them a pointer to a btree_insert_entry, that memory is going to 485 * move: 486 */ 487 struct bkey old_k = i->old_k; 488 struct bkey_s_c old = { &old_k, i->old_v }; 489 const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type); 490 const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type); 491 492 verify_update_old_key(trans, i); 493 494 if ((i->flags & BTREE_TRIGGER_NORUN) || 495 !(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type))) 496 return 0; 497 498 if (!i->insert_trigger_run && 499 !i->overwrite_trigger_run && 500 old_ops->trans_trigger == new_ops->trans_trigger) { 501 i->overwrite_trigger_run = true; 502 i->insert_trigger_run = true; 503 return bch2_trans_mark_key(trans, i->btree_id, i->level, old, i->k, 504 BTREE_TRIGGER_INSERT| 505 BTREE_TRIGGER_OVERWRITE| 506 i->flags) ?: 1; 507 } else if (overwrite && !i->overwrite_trigger_run) { 508 i->overwrite_trigger_run = true; 509 return bch2_trans_mark_old(trans, i->btree_id, i->level, old, i->flags) ?: 1; 510 } else if (!overwrite && !i->insert_trigger_run) { 511 i->insert_trigger_run = true; 512 return bch2_trans_mark_new(trans, i->btree_id, i->level, i->k, i->flags) ?: 1; 513 } else { 514 return 0; 515 } 516 } 517 518 static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id, 519 struct btree_insert_entry *btree_id_start) 520 { 521 struct btree_insert_entry *i; 522 bool trans_trigger_run; 523 int ret, overwrite; 524 525 for (overwrite = 1; overwrite >= 0; --overwrite) { 526 527 /* 528 * Running triggers will append more updates to the list of updates as 529 * we're walking it: 530 */ 531 do { 532 trans_trigger_run = false; 533 534 for (i = btree_id_start; 535 i < trans->updates + trans->nr_updates && i->btree_id <= btree_id; 536 i++) { 537 if (i->btree_id != btree_id) 538 continue; 539 540 ret = run_one_trans_trigger(trans, i, overwrite); 541 if (ret < 0) 542 return ret; 543 if (ret) 544 trans_trigger_run = true; 545 } 546 } while (trans_trigger_run); 547 } 548 549 return 0; 550 } 551 552 static int bch2_trans_commit_run_triggers(struct btree_trans *trans) 553 { 554 struct btree_insert_entry *i = NULL, *btree_id_start = trans->updates; 555 unsigned btree_id = 0; 556 int ret = 0; 557 558 /* 559 * 560 * For a given btree, this algorithm runs insert triggers before 561 * overwrite triggers: this is so that when extents are being moved 562 * (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop references before 563 * they are re-added. 564 */ 565 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) { 566 if (btree_id == BTREE_ID_alloc) 567 continue; 568 569 while (btree_id_start < trans->updates + trans->nr_updates && 570 btree_id_start->btree_id < btree_id) 571 btree_id_start++; 572 573 ret = run_btree_triggers(trans, btree_id, btree_id_start); 574 if (ret) 575 return ret; 576 } 577 578 trans_for_each_update(trans, i) { 579 if (i->btree_id > BTREE_ID_alloc) 580 break; 581 if (i->btree_id == BTREE_ID_alloc) { 582 ret = run_btree_triggers(trans, BTREE_ID_alloc, i); 583 if (ret) 584 return ret; 585 break; 586 } 587 } 588 589 #ifdef CONFIG_BCACHEFS_DEBUG 590 trans_for_each_update(trans, i) 591 BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) && 592 (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) && 593 (!i->insert_trigger_run || !i->overwrite_trigger_run)); 594 #endif 595 return 0; 596 } 597 598 static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans) 599 { 600 struct bch_fs *c = trans->c; 601 struct btree_insert_entry *i; 602 int ret = 0; 603 604 trans_for_each_update(trans, i) { 605 /* 606 * XXX: synchronization of cached update triggers with gc 607 * XXX: synchronization of interior node updates with gc 608 */ 609 BUG_ON(i->cached || i->level); 610 611 if (gc_visited(c, gc_pos_btree_node(insert_l(i)->b))) { 612 ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC); 613 if (ret) 614 break; 615 } 616 } 617 618 return ret; 619 } 620 621 static inline int 622 bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, 623 struct btree_insert_entry **stopped_at, 624 unsigned long trace_ip) 625 { 626 struct bch_fs *c = trans->c; 627 struct btree_insert_entry *i; 628 struct btree_write_buffered_key *wb; 629 struct btree_trans_commit_hook *h; 630 unsigned u64s = 0; 631 int ret; 632 633 if (race_fault()) { 634 trace_and_count(c, trans_restart_fault_inject, trans, trace_ip); 635 return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject); 636 } 637 638 /* 639 * Check if the insert will fit in the leaf node with the write lock 640 * held, otherwise another thread could write the node changing the 641 * amount of space available: 642 */ 643 644 prefetch(&trans->c->journal.flags); 645 646 trans_for_each_update(trans, i) { 647 /* Multiple inserts might go to same leaf: */ 648 if (!same_leaf_as_prev(trans, i)) 649 u64s = 0; 650 651 u64s += i->k->k.u64s; 652 ret = !i->cached 653 ? btree_key_can_insert(trans, insert_l(i)->b, u64s) 654 : btree_key_can_insert_cached(trans, flags, i->path, u64s); 655 if (ret) { 656 *stopped_at = i; 657 return ret; 658 } 659 } 660 661 if (trans->nr_wb_updates && 662 trans->nr_wb_updates + c->btree_write_buffer.state.nr > c->btree_write_buffer.size) 663 return -BCH_ERR_btree_insert_need_flush_buffer; 664 665 /* 666 * Don't get journal reservation until after we know insert will 667 * succeed: 668 */ 669 if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) { 670 ret = bch2_trans_journal_res_get(trans, 671 (flags & BCH_WATERMARK_MASK)| 672 JOURNAL_RES_GET_NONBLOCK); 673 if (ret) 674 return ret; 675 676 if (unlikely(trans->journal_transaction_names)) 677 journal_transaction_name(trans); 678 } else { 679 trans->journal_res.seq = c->journal.replay_journal_seq; 680 } 681 682 /* 683 * Not allowed to fail after we've gotten our journal reservation - we 684 * have to use it: 685 */ 686 687 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 688 !(flags & BTREE_INSERT_JOURNAL_REPLAY)) { 689 if (bch2_journal_seq_verify) 690 trans_for_each_update(trans, i) 691 i->k->k.version.lo = trans->journal_res.seq; 692 else if (bch2_inject_invalid_keys) 693 trans_for_each_update(trans, i) 694 i->k->k.version = MAX_VERSION; 695 } 696 697 if (trans->fs_usage_deltas && 698 bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas)) 699 return -BCH_ERR_btree_insert_need_mark_replicas; 700 701 if (trans->nr_wb_updates) { 702 EBUG_ON(flags & BTREE_INSERT_JOURNAL_REPLAY); 703 704 ret = bch2_btree_insert_keys_write_buffer(trans); 705 if (ret) 706 goto revert_fs_usage; 707 } 708 709 h = trans->hooks; 710 while (h) { 711 ret = h->fn(trans, h); 712 if (ret) 713 goto revert_fs_usage; 714 h = h->next; 715 } 716 717 trans_for_each_update(trans, i) 718 if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) { 719 ret = run_one_mem_trigger(trans, i, i->flags); 720 if (ret) 721 goto fatal_err; 722 } 723 724 if (unlikely(c->gc_pos.phase)) { 725 ret = bch2_trans_commit_run_gc_triggers(trans); 726 if (ret) 727 goto fatal_err; 728 } 729 730 if (unlikely(trans->extra_journal_entries.nr)) { 731 memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res), 732 trans->extra_journal_entries.data, 733 trans->extra_journal_entries.nr); 734 735 trans->journal_res.offset += trans->extra_journal_entries.nr; 736 trans->journal_res.u64s -= trans->extra_journal_entries.nr; 737 } 738 739 if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) { 740 struct journal *j = &c->journal; 741 struct jset_entry *entry; 742 743 trans_for_each_update(trans, i) { 744 if (i->key_cache_already_flushed) 745 continue; 746 747 if (i->flags & BTREE_UPDATE_NOJOURNAL) 748 continue; 749 750 verify_update_old_key(trans, i); 751 752 if (trans->journal_transaction_names) { 753 entry = bch2_journal_add_entry(j, &trans->journal_res, 754 BCH_JSET_ENTRY_overwrite, 755 i->btree_id, i->level, 756 i->old_k.u64s); 757 bkey_reassemble((struct bkey_i *) entry->start, 758 (struct bkey_s_c) { &i->old_k, i->old_v }); 759 } 760 761 entry = bch2_journal_add_entry(j, &trans->journal_res, 762 BCH_JSET_ENTRY_btree_keys, 763 i->btree_id, i->level, 764 i->k->k.u64s); 765 bkey_copy((struct bkey_i *) entry->start, i->k); 766 } 767 768 trans_for_each_wb_update(trans, wb) { 769 entry = bch2_journal_add_entry(j, &trans->journal_res, 770 BCH_JSET_ENTRY_btree_keys, 771 wb->btree, 0, 772 wb->k.k.u64s); 773 bkey_copy((struct bkey_i *) entry->start, &wb->k); 774 } 775 776 if (trans->journal_seq) 777 *trans->journal_seq = trans->journal_res.seq; 778 } 779 780 trans_for_each_update(trans, i) { 781 i->k->k.needs_whiteout = false; 782 783 if (!i->cached) { 784 u64 seq = trans->journal_res.seq; 785 786 if (i->flags & BTREE_UPDATE_PREJOURNAL) 787 seq = i->seq; 788 789 bch2_btree_insert_key_leaf(trans, i->path, i->k, seq); 790 } else if (!i->key_cache_already_flushed) 791 bch2_btree_insert_key_cached(trans, flags, i); 792 else { 793 bch2_btree_key_cache_drop(trans, i->path); 794 btree_path_set_dirty(i->path, BTREE_ITER_NEED_TRAVERSE); 795 } 796 } 797 798 return 0; 799 fatal_err: 800 bch2_fatal_error(c); 801 revert_fs_usage: 802 if (trans->fs_usage_deltas) 803 bch2_trans_fs_usage_revert(trans, trans->fs_usage_deltas); 804 return ret; 805 } 806 807 static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans) 808 { 809 struct btree_insert_entry *i; 810 struct btree_write_buffered_key *wb; 811 812 trans_for_each_update(trans, i) 813 bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p); 814 815 trans_for_each_wb_update(trans, wb) 816 bch2_journal_key_overwritten(trans->c, wb->btree, 0, wb->k.k.p); 817 } 818 819 static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans, 820 enum bkey_invalid_flags flags, 821 struct btree_insert_entry *i, 822 struct printbuf *err) 823 { 824 struct bch_fs *c = trans->c; 825 826 printbuf_reset(err); 827 prt_printf(err, "invalid bkey on insert from %s -> %ps", 828 trans->fn, (void *) i->ip_allocated); 829 prt_newline(err); 830 printbuf_indent_add(err, 2); 831 832 bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k)); 833 prt_newline(err); 834 835 bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type, flags, err); 836 bch2_print_string_as_lines(KERN_ERR, err->buf); 837 838 bch2_inconsistent_error(c); 839 bch2_dump_trans_updates(trans); 840 841 return -EINVAL; 842 } 843 844 /* 845 * Get journal reservation, take write locks, and attempt to do btree update(s): 846 */ 847 static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags, 848 struct btree_insert_entry **stopped_at, 849 unsigned long trace_ip) 850 { 851 struct bch_fs *c = trans->c; 852 struct btree_insert_entry *i; 853 int ret = 0, u64s_delta = 0; 854 855 trans_for_each_update(trans, i) { 856 if (i->cached) 857 continue; 858 859 u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0; 860 u64s_delta -= i->old_btree_u64s; 861 862 if (!same_leaf_as_next(trans, i)) { 863 if (u64s_delta <= 0) { 864 ret = bch2_foreground_maybe_merge(trans, i->path, 865 i->level, flags); 866 if (unlikely(ret)) 867 return ret; 868 } 869 870 u64s_delta = 0; 871 } 872 } 873 874 ret = bch2_trans_lock_write(trans); 875 if (unlikely(ret)) 876 return ret; 877 878 ret = bch2_trans_commit_write_locked(trans, flags, stopped_at, trace_ip); 879 880 if (!ret && unlikely(trans->journal_replay_not_finished)) 881 bch2_drop_overwrites_from_journal(trans); 882 883 bch2_trans_unlock_write(trans); 884 885 if (!ret && trans->journal_pin) 886 bch2_journal_pin_add(&c->journal, trans->journal_res.seq, 887 trans->journal_pin, NULL); 888 889 /* 890 * Drop journal reservation after dropping write locks, since dropping 891 * the journal reservation may kick off a journal write: 892 */ 893 bch2_journal_res_put(&c->journal, &trans->journal_res); 894 895 return ret; 896 } 897 898 static int journal_reclaim_wait_done(struct bch_fs *c) 899 { 900 int ret = bch2_journal_error(&c->journal) ?: 901 !bch2_btree_key_cache_must_wait(c); 902 903 if (!ret) 904 journal_reclaim_kick(&c->journal); 905 return ret; 906 } 907 908 static noinline 909 int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags, 910 struct btree_insert_entry *i, 911 int ret, unsigned long trace_ip) 912 { 913 struct bch_fs *c = trans->c; 914 915 switch (ret) { 916 case -BCH_ERR_btree_insert_btree_node_full: 917 ret = bch2_btree_split_leaf(trans, i->path, flags); 918 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 919 trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path); 920 break; 921 case -BCH_ERR_btree_insert_need_mark_replicas: 922 ret = drop_locks_do(trans, 923 bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas)); 924 break; 925 case -BCH_ERR_journal_res_get_blocked: 926 /* 927 * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK 928 * flag 929 */ 930 if ((flags & BTREE_INSERT_JOURNAL_RECLAIM) && 931 (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) { 932 ret = -BCH_ERR_journal_reclaim_would_deadlock; 933 break; 934 } 935 936 ret = drop_locks_do(trans, 937 bch2_trans_journal_res_get(trans, 938 (flags & BCH_WATERMARK_MASK)| 939 JOURNAL_RES_GET_CHECK)); 940 break; 941 case -BCH_ERR_btree_insert_need_journal_reclaim: 942 bch2_trans_unlock(trans); 943 944 trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip); 945 946 wait_event_freezable(c->journal.reclaim_wait, 947 (ret = journal_reclaim_wait_done(c))); 948 if (ret < 0) 949 break; 950 951 ret = bch2_trans_relock(trans); 952 break; 953 case -BCH_ERR_btree_insert_need_flush_buffer: { 954 struct btree_write_buffer *wb = &c->btree_write_buffer; 955 956 ret = 0; 957 958 if (wb->state.nr > wb->size * 3 / 4) { 959 bch2_trans_unlock(trans); 960 mutex_lock(&wb->flush_lock); 961 962 if (wb->state.nr > wb->size * 3 / 4) { 963 bch2_trans_begin(trans); 964 ret = __bch2_btree_write_buffer_flush(trans, 965 flags|BTREE_INSERT_NOCHECK_RW, true); 966 if (!ret) { 967 trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_); 968 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush); 969 } 970 } else { 971 mutex_unlock(&wb->flush_lock); 972 ret = bch2_trans_relock(trans); 973 } 974 } 975 break; 976 } 977 default: 978 BUG_ON(ret >= 0); 979 break; 980 } 981 982 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted); 983 984 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOSPC) && 985 !(flags & BTREE_INSERT_NOWAIT) && 986 (flags & BTREE_INSERT_NOFAIL), c, 987 "%s: incorrectly got %s\n", __func__, bch2_err_str(ret)); 988 989 return ret; 990 } 991 992 static noinline int 993 bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags) 994 { 995 struct bch_fs *c = trans->c; 996 int ret; 997 998 if (likely(!(flags & BTREE_INSERT_LAZY_RW)) || 999 test_bit(BCH_FS_STARTED, &c->flags)) 1000 return -BCH_ERR_erofs_trans_commit; 1001 1002 ret = drop_locks_do(trans, bch2_fs_read_write_early(c)); 1003 if (ret) 1004 return ret; 1005 1006 bch2_write_ref_get(c, BCH_WRITE_REF_trans); 1007 return 0; 1008 } 1009 1010 /* 1011 * This is for updates done in the early part of fsck - btree_gc - before we've 1012 * gone RW. we only add the new key to the list of keys for journal replay to 1013 * do. 1014 */ 1015 static noinline int 1016 do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans) 1017 { 1018 struct bch_fs *c = trans->c; 1019 struct btree_insert_entry *i; 1020 int ret = 0; 1021 1022 trans_for_each_update(trans, i) { 1023 ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k); 1024 if (ret) 1025 break; 1026 } 1027 1028 return ret; 1029 } 1030 1031 int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) 1032 { 1033 struct bch_fs *c = trans->c; 1034 struct btree_insert_entry *i = NULL; 1035 struct btree_write_buffered_key *wb; 1036 int ret = 0; 1037 1038 if (!trans->nr_updates && 1039 !trans->nr_wb_updates && 1040 !trans->extra_journal_entries.nr) 1041 goto out_reset; 1042 1043 if (flags & BTREE_INSERT_GC_LOCK_HELD) 1044 lockdep_assert_held(&c->gc_lock); 1045 1046 ret = bch2_trans_commit_run_triggers(trans); 1047 if (ret) 1048 goto out_reset; 1049 1050 trans_for_each_update(trans, i) { 1051 struct printbuf buf = PRINTBUF; 1052 enum bkey_invalid_flags invalid_flags = 0; 1053 1054 if (!(flags & BTREE_INSERT_JOURNAL_REPLAY)) 1055 invalid_flags |= BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT; 1056 1057 if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), 1058 i->bkey_type, invalid_flags, &buf))) 1059 ret = bch2_trans_commit_bkey_invalid(trans, invalid_flags, i, &buf); 1060 btree_insert_entry_checks(trans, i); 1061 printbuf_exit(&buf); 1062 1063 if (ret) 1064 return ret; 1065 } 1066 1067 if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) { 1068 ret = do_bch2_trans_commit_to_journal_replay(trans); 1069 goto out_reset; 1070 } 1071 1072 if (!(flags & BTREE_INSERT_NOCHECK_RW) && 1073 unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) { 1074 ret = bch2_trans_commit_get_rw_cold(trans, flags); 1075 if (ret) 1076 goto out_reset; 1077 } 1078 1079 if (c->btree_write_buffer.state.nr > c->btree_write_buffer.size / 2 && 1080 mutex_trylock(&c->btree_write_buffer.flush_lock)) { 1081 bch2_trans_begin(trans); 1082 bch2_trans_unlock(trans); 1083 1084 ret = __bch2_btree_write_buffer_flush(trans, 1085 flags|BTREE_INSERT_NOCHECK_RW, true); 1086 if (!ret) { 1087 trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_); 1088 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush); 1089 } 1090 goto out; 1091 } 1092 1093 EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags)); 1094 1095 trans->journal_u64s = trans->extra_journal_entries.nr; 1096 trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names); 1097 if (trans->journal_transaction_names) 1098 trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s); 1099 1100 trans_for_each_update(trans, i) { 1101 EBUG_ON(!i->path->should_be_locked); 1102 1103 ret = bch2_btree_path_upgrade(trans, i->path, i->level + 1); 1104 if (unlikely(ret)) 1105 goto out; 1106 1107 EBUG_ON(!btree_node_intent_locked(i->path, i->level)); 1108 1109 if (i->key_cache_already_flushed) 1110 continue; 1111 1112 if (i->flags & BTREE_UPDATE_NOJOURNAL) 1113 continue; 1114 1115 /* we're going to journal the key being updated: */ 1116 trans->journal_u64s += jset_u64s(i->k->k.u64s); 1117 1118 /* and we're also going to log the overwrite: */ 1119 if (trans->journal_transaction_names) 1120 trans->journal_u64s += jset_u64s(i->old_k.u64s); 1121 } 1122 1123 trans_for_each_wb_update(trans, wb) 1124 trans->journal_u64s += jset_u64s(wb->k.k.u64s); 1125 1126 if (trans->extra_journal_res) { 1127 ret = bch2_disk_reservation_add(c, trans->disk_res, 1128 trans->extra_journal_res, 1129 (flags & BTREE_INSERT_NOFAIL) 1130 ? BCH_DISK_RESERVATION_NOFAIL : 0); 1131 if (ret) 1132 goto err; 1133 } 1134 retry: 1135 bch2_trans_verify_not_in_restart(trans); 1136 memset(&trans->journal_res, 0, sizeof(trans->journal_res)); 1137 1138 ret = do_bch2_trans_commit(trans, flags, &i, _RET_IP_); 1139 1140 /* make sure we didn't drop or screw up locks: */ 1141 bch2_trans_verify_locks(trans); 1142 1143 if (ret) 1144 goto err; 1145 1146 trace_and_count(c, transaction_commit, trans, _RET_IP_); 1147 out: 1148 if (likely(!(flags & BTREE_INSERT_NOCHECK_RW))) 1149 bch2_write_ref_put(c, BCH_WRITE_REF_trans); 1150 out_reset: 1151 if (!ret) 1152 bch2_trans_downgrade(trans); 1153 bch2_trans_reset_updates(trans); 1154 1155 return ret; 1156 err: 1157 ret = bch2_trans_commit_error(trans, flags, i, ret, _RET_IP_); 1158 if (ret) 1159 goto out; 1160 1161 goto retry; 1162 } 1163