1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_BTREE_ITER_H 3 #define _BCACHEFS_BTREE_ITER_H 4 5 #include "bset.h" 6 #include "btree_types.h" 7 #include "trace.h" 8 9 static inline int __bkey_err(const struct bkey *k) 10 { 11 return PTR_ERR_OR_ZERO(k); 12 } 13 14 #define bkey_err(_k) __bkey_err((_k).k) 15 16 static inline void __btree_path_get(struct btree_path *path, bool intent) 17 { 18 path->ref++; 19 path->intent_ref += intent; 20 } 21 22 static inline bool __btree_path_put(struct btree_path *path, bool intent) 23 { 24 EBUG_ON(!path->ref); 25 EBUG_ON(!path->intent_ref && intent); 26 path->intent_ref -= intent; 27 return --path->ref == 0; 28 } 29 30 static inline void btree_path_set_dirty(struct btree_path *path, 31 enum btree_path_uptodate u) 32 { 33 path->uptodate = max_t(unsigned, path->uptodate, u); 34 } 35 36 static inline struct btree *btree_path_node(struct btree_path *path, 37 unsigned level) 38 { 39 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL; 40 } 41 42 static inline bool btree_node_lock_seq_matches(const struct btree_path *path, 43 const struct btree *b, unsigned level) 44 { 45 return path->l[level].lock_seq == six_lock_seq(&b->c.lock); 46 } 47 48 static inline struct btree *btree_node_parent(struct btree_path *path, 49 struct btree *b) 50 { 51 return btree_path_node(path, b->c.level + 1); 52 } 53 54 /* Iterate over paths within a transaction: */ 55 56 void __bch2_btree_trans_sort_paths(struct btree_trans *); 57 58 static inline void btree_trans_sort_paths(struct btree_trans *trans) 59 { 60 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 61 trans->paths_sorted) 62 return; 63 __bch2_btree_trans_sort_paths(trans); 64 } 65 66 static inline unsigned long *trans_paths_nr(struct btree_path *paths) 67 { 68 return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths; 69 } 70 71 static inline unsigned long *trans_paths_allocated(struct btree_path *paths) 72 { 73 unsigned long *v = trans_paths_nr(paths); 74 return v - BITS_TO_LONGS(*v); 75 } 76 77 #define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\ 78 for (_idx = _start; \ 79 (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr; \ 80 _idx++) 81 82 static inline struct btree_path * 83 __trans_next_path(struct btree_trans *trans, unsigned *idx) 84 { 85 unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG; 86 /* 87 * Open coded find_next_bit(), because 88 * - this is fast path, we can't afford the function call 89 * - and we know that nr_paths is a multiple of BITS_PER_LONG, 90 */ 91 while (*idx < trans->nr_paths) { 92 unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1)); 93 if (v) { 94 *idx += __ffs(v); 95 return trans->paths + *idx; 96 } 97 98 *idx += BITS_PER_LONG; 99 *idx &= ~(BITS_PER_LONG - 1); 100 w++; 101 } 102 103 return NULL; 104 } 105 106 /* 107 * This version is intended to be safe for use on a btree_trans that is owned by 108 * another thread, for bch2_btree_trans_to_text(); 109 */ 110 #define trans_for_each_path_from(_trans, _path, _idx, _start) \ 111 for (_idx = _start; \ 112 (_path = __trans_next_path((_trans), &_idx)); \ 113 _idx++) 114 115 #define trans_for_each_path(_trans, _path, _idx) \ 116 trans_for_each_path_from(_trans, _path, _idx, 1) 117 118 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path) 119 { 120 unsigned idx = path ? path->sorted_idx + 1 : 0; 121 122 EBUG_ON(idx > trans->nr_sorted); 123 124 return idx < trans->nr_sorted 125 ? trans->paths + trans->sorted[idx] 126 : NULL; 127 } 128 129 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path) 130 { 131 unsigned idx = path ? path->sorted_idx : trans->nr_sorted; 132 133 return idx 134 ? trans->paths + trans->sorted[idx - 1] 135 : NULL; 136 } 137 138 #define trans_for_each_path_idx_inorder(_trans, _iter) \ 139 for (_iter = (struct trans_for_each_path_inorder_iter) { 0 }; \ 140 (_iter.path_idx = trans->sorted[_iter.sorted_idx], \ 141 _iter.sorted_idx < (_trans)->nr_sorted); \ 142 _iter.sorted_idx++) 143 144 struct trans_for_each_path_inorder_iter { 145 btree_path_idx_t sorted_idx; 146 btree_path_idx_t path_idx; 147 }; 148 149 #define trans_for_each_path_inorder(_trans, _path, _iter) \ 150 for (_iter = (struct trans_for_each_path_inorder_iter) { 0 }; \ 151 (_iter.path_idx = trans->sorted[_iter.sorted_idx], \ 152 _path = (_trans)->paths + _iter.path_idx, \ 153 _iter.sorted_idx < (_trans)->nr_sorted); \ 154 _iter.sorted_idx++) 155 156 #define trans_for_each_path_inorder_reverse(_trans, _path, _i) \ 157 for (_i = trans->nr_sorted - 1; \ 158 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\ 159 --_i) 160 161 static inline bool __path_has_node(const struct btree_path *path, 162 const struct btree *b) 163 { 164 return path->l[b->c.level].b == b && 165 btree_node_lock_seq_matches(path, b, b->c.level); 166 } 167 168 static inline struct btree_path * 169 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b, 170 unsigned *idx) 171 { 172 struct btree_path *path; 173 174 while ((path = __trans_next_path(trans, idx)) && 175 !__path_has_node(path, b)) 176 (*idx)++; 177 178 return path; 179 } 180 181 #define trans_for_each_path_with_node(_trans, _b, _path, _iter) \ 182 for (_iter = 1; \ 183 (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\ 184 _iter++) 185 186 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t, 187 bool, unsigned long); 188 189 static inline btree_path_idx_t __must_check 190 bch2_btree_path_make_mut(struct btree_trans *trans, 191 btree_path_idx_t path, bool intent, 192 unsigned long ip) 193 { 194 if (trans->paths[path].ref > 1 || 195 trans->paths[path].preserve) 196 path = __bch2_btree_path_make_mut(trans, path, intent, ip); 197 trans->paths[path].should_be_locked = false; 198 return path; 199 } 200 201 btree_path_idx_t __must_check 202 __bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t, 203 struct bpos, bool, unsigned long); 204 205 static inline btree_path_idx_t __must_check 206 bch2_btree_path_set_pos(struct btree_trans *trans, 207 btree_path_idx_t path, struct bpos new_pos, 208 bool intent, unsigned long ip) 209 { 210 return !bpos_eq(new_pos, trans->paths[path].pos) 211 ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip) 212 : path; 213 } 214 215 int __must_check bch2_btree_path_traverse_one(struct btree_trans *, 216 btree_path_idx_t, 217 unsigned, unsigned long); 218 219 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *); 220 221 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans, 222 btree_path_idx_t path, unsigned flags) 223 { 224 bch2_trans_verify_not_unlocked(trans); 225 226 if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK) 227 return 0; 228 229 return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_); 230 } 231 232 btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos, 233 unsigned, unsigned, unsigned, unsigned long); 234 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id, 235 unsigned, struct bpos); 236 237 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *); 238 239 /* 240 * bch2_btree_path_peek_slot() for a cached iterator might return a key in a 241 * different snapshot: 242 */ 243 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u) 244 { 245 struct bkey_s_c k = bch2_btree_path_peek_slot(path, u); 246 247 if (k.k && bpos_eq(path->pos, k.k->p)) 248 return k; 249 250 bkey_init(u); 251 u->p = path->pos; 252 return (struct bkey_s_c) { u, NULL }; 253 } 254 255 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *, 256 struct btree_iter *, struct bpos); 257 258 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *); 259 260 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *); 261 262 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock) 263 { 264 return mutex_trylock(lock) 265 ? 0 266 : __bch2_trans_mutex_lock(trans, lock); 267 } 268 269 #ifdef CONFIG_BCACHEFS_DEBUG 270 void bch2_trans_verify_paths(struct btree_trans *); 271 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos); 272 #else 273 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {} 274 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, 275 struct bpos pos) {} 276 #endif 277 278 void bch2_btree_path_fix_key_modified(struct btree_trans *trans, 279 struct btree *, struct bkey_packed *); 280 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *, 281 struct btree *, struct btree_node_iter *, 282 struct bkey_packed *, unsigned, unsigned); 283 284 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *); 285 286 void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool); 287 288 int bch2_trans_relock(struct btree_trans *); 289 int bch2_trans_relock_notrace(struct btree_trans *); 290 void bch2_trans_unlock(struct btree_trans *); 291 void bch2_trans_unlock_long(struct btree_trans *); 292 293 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count) 294 { 295 return restart_count != trans->restart_count 296 ? -BCH_ERR_transaction_restart_nested 297 : 0; 298 } 299 300 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32); 301 302 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans, 303 u32 restart_count) 304 { 305 if (trans_was_restarted(trans, restart_count)) 306 bch2_trans_restart_error(trans, restart_count); 307 } 308 309 void __noreturn bch2_trans_in_restart_error(struct btree_trans *); 310 311 static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans) 312 { 313 if (trans->restarted) 314 bch2_trans_in_restart_error(trans); 315 } 316 317 void __noreturn bch2_trans_unlocked_error(struct btree_trans *); 318 319 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans) 320 { 321 if (!trans->locked) 322 bch2_trans_unlocked_error(trans); 323 } 324 325 __always_inline 326 static int btree_trans_restart_nounlock(struct btree_trans *trans, int err) 327 { 328 BUG_ON(err <= 0); 329 BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart)); 330 331 trans->restarted = err; 332 trans->last_restarted_ip = _THIS_IP_; 333 return -err; 334 } 335 336 __always_inline 337 static int btree_trans_restart(struct btree_trans *trans, int err) 338 { 339 btree_trans_restart_nounlock(trans, err); 340 return -err; 341 } 342 343 bool bch2_btree_node_upgrade(struct btree_trans *, 344 struct btree_path *, unsigned); 345 346 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned); 347 348 static inline void bch2_btree_path_downgrade(struct btree_trans *trans, 349 struct btree_path *path) 350 { 351 unsigned new_locks_want = path->level + !!path->intent_ref; 352 353 if (path->locks_want > new_locks_want) 354 __bch2_btree_path_downgrade(trans, path, new_locks_want); 355 } 356 357 void bch2_trans_downgrade(struct btree_trans *); 358 359 void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *); 360 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *); 361 362 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter); 363 int __must_check bch2_btree_iter_traverse(struct btree_iter *); 364 365 struct btree *bch2_btree_iter_peek_node(struct btree_iter *); 366 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *); 367 struct btree *bch2_btree_iter_next_node(struct btree_iter *); 368 369 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos); 370 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *); 371 372 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) 373 { 374 return bch2_btree_iter_peek_upto(iter, SPOS_MAX); 375 } 376 377 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *); 378 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *); 379 380 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *); 381 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *); 382 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *); 383 384 bool bch2_btree_iter_advance(struct btree_iter *); 385 bool bch2_btree_iter_rewind(struct btree_iter *); 386 387 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) 388 { 389 iter->k.type = KEY_TYPE_deleted; 390 iter->k.p.inode = iter->pos.inode = new_pos.inode; 391 iter->k.p.offset = iter->pos.offset = new_pos.offset; 392 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot; 393 iter->k.size = 0; 394 } 395 396 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) 397 { 398 struct btree_trans *trans = iter->trans; 399 400 if (unlikely(iter->update_path)) 401 bch2_path_put(trans, iter->update_path, 402 iter->flags & BTREE_ITER_intent); 403 iter->update_path = 0; 404 405 if (!(iter->flags & BTREE_ITER_all_snapshots)) 406 new_pos.snapshot = iter->snapshot; 407 408 __bch2_btree_iter_set_pos(iter, new_pos); 409 } 410 411 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter) 412 { 413 BUG_ON(!(iter->flags & BTREE_ITER_is_extents)); 414 iter->pos = bkey_start_pos(&iter->k); 415 } 416 417 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot) 418 { 419 struct bpos pos = iter->pos; 420 421 iter->snapshot = snapshot; 422 pos.snapshot = snapshot; 423 bch2_btree_iter_set_pos(iter, pos); 424 } 425 426 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *); 427 428 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans, 429 unsigned btree_id, 430 unsigned flags) 431 { 432 if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) && 433 btree_id_is_extents(btree_id)) 434 flags |= BTREE_ITER_is_extents; 435 436 if (!(flags & BTREE_ITER_snapshot_field) && 437 !btree_type_has_snapshot_field(btree_id)) 438 flags &= ~BTREE_ITER_all_snapshots; 439 440 if (!(flags & BTREE_ITER_all_snapshots) && 441 btree_type_has_snapshots(btree_id)) 442 flags |= BTREE_ITER_filter_snapshots; 443 444 if (trans->journal_replay_not_finished) 445 flags |= BTREE_ITER_with_journal; 446 447 return flags; 448 } 449 450 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans, 451 unsigned btree_id, 452 unsigned flags) 453 { 454 if (!btree_id_cached(trans->c, btree_id)) { 455 flags &= ~BTREE_ITER_cached; 456 flags &= ~BTREE_ITER_with_key_cache; 457 } else if (!(flags & BTREE_ITER_cached)) 458 flags |= BTREE_ITER_with_key_cache; 459 460 return __bch2_btree_iter_flags(trans, btree_id, flags); 461 } 462 463 static inline void bch2_trans_iter_init_common(struct btree_trans *trans, 464 struct btree_iter *iter, 465 unsigned btree_id, struct bpos pos, 466 unsigned locks_want, 467 unsigned depth, 468 unsigned flags, 469 unsigned long ip) 470 { 471 iter->trans = trans; 472 iter->update_path = 0; 473 iter->key_cache_path = 0; 474 iter->btree_id = btree_id; 475 iter->min_depth = 0; 476 iter->flags = flags; 477 iter->snapshot = pos.snapshot; 478 iter->pos = pos; 479 iter->k = POS_KEY(pos); 480 iter->journal_idx = 0; 481 #ifdef CONFIG_BCACHEFS_DEBUG 482 iter->ip_allocated = ip; 483 #endif 484 iter->path = bch2_path_get(trans, btree_id, iter->pos, 485 locks_want, depth, flags, ip); 486 } 487 488 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *, 489 enum btree_id, struct bpos, unsigned); 490 491 static inline void bch2_trans_iter_init(struct btree_trans *trans, 492 struct btree_iter *iter, 493 unsigned btree_id, struct bpos pos, 494 unsigned flags) 495 { 496 if (__builtin_constant_p(btree_id) && 497 __builtin_constant_p(flags)) 498 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, 499 bch2_btree_iter_flags(trans, btree_id, flags), 500 _THIS_IP_); 501 else 502 bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags); 503 } 504 505 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *, 506 enum btree_id, struct bpos, 507 unsigned, unsigned, unsigned); 508 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *); 509 510 void bch2_set_btree_iter_dontneed(struct btree_iter *); 511 512 void *__bch2_trans_kmalloc(struct btree_trans *, size_t); 513 514 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) 515 { 516 size = roundup(size, 8); 517 518 if (likely(trans->mem_top + size <= trans->mem_bytes)) { 519 void *p = trans->mem + trans->mem_top; 520 521 trans->mem_top += size; 522 memset(p, 0, size); 523 return p; 524 } else { 525 return __bch2_trans_kmalloc(trans, size); 526 } 527 } 528 529 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size) 530 { 531 size = round_up(size, 8); 532 533 if (likely(trans->mem_top + size <= trans->mem_bytes)) { 534 void *p = trans->mem + trans->mem_top; 535 536 trans->mem_top += size; 537 return p; 538 } else { 539 return __bch2_trans_kmalloc(trans, size); 540 } 541 } 542 543 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans, 544 struct btree_iter *iter, 545 unsigned btree_id, struct bpos pos, 546 unsigned flags, unsigned type) 547 { 548 struct bkey_s_c k; 549 550 bch2_trans_iter_init(trans, iter, btree_id, pos, flags); 551 k = bch2_btree_iter_peek_slot(iter); 552 553 if (!bkey_err(k) && type && k.k->type != type) 554 k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch); 555 if (unlikely(bkey_err(k))) 556 bch2_trans_iter_exit(trans, iter); 557 return k; 558 } 559 560 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans, 561 struct btree_iter *iter, 562 unsigned btree_id, struct bpos pos, 563 unsigned flags) 564 { 565 return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0); 566 } 567 568 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\ 569 bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \ 570 _btree_id, _pos, _flags, KEY_TYPE_##_type)) 571 572 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, 573 unsigned btree_id, struct bpos pos, 574 unsigned flags, unsigned type, 575 unsigned val_size, void *val) 576 { 577 struct btree_iter iter; 578 struct bkey_s_c k; 579 int ret; 580 581 k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type); 582 ret = bkey_err(k); 583 if (!ret) { 584 unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size); 585 586 memcpy(val, k.v, b); 587 if (unlikely(b < sizeof(*val))) 588 memset((void *) val + b, 0, sizeof(*val) - b); 589 bch2_trans_iter_exit(trans, &iter); 590 } 591 592 return ret; 593 } 594 595 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\ 596 __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \ 597 KEY_TYPE_##_type, sizeof(*_val), _val) 598 599 void bch2_trans_srcu_unlock(struct btree_trans *); 600 601 u32 bch2_trans_begin(struct btree_trans *); 602 603 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \ 604 _locks_want, _depth, _flags, _b, _do) \ 605 ({ \ 606 bch2_trans_begin((_trans)); \ 607 \ 608 struct btree_iter _iter; \ 609 bch2_trans_node_iter_init((_trans), &_iter, (_btree_id), \ 610 _start, _locks_want, _depth, _flags); \ 611 int _ret3 = 0; \ 612 do { \ 613 _ret3 = lockrestart_do((_trans), ({ \ 614 struct btree *_b = bch2_btree_iter_peek_node(&_iter); \ 615 if (!_b) \ 616 break; \ 617 \ 618 PTR_ERR_OR_ZERO(_b) ?: (_do); \ 619 })) ?: \ 620 lockrestart_do((_trans), \ 621 PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \ 622 } while (!_ret3); \ 623 \ 624 bch2_trans_iter_exit((_trans), &(_iter)); \ 625 _ret3; \ 626 }) 627 628 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \ 629 _flags, _b, _do) \ 630 __for_each_btree_node(_trans, _iter, _btree_id, _start, \ 631 0, 0, _flags, _b, _do) 632 633 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter, 634 unsigned flags) 635 { 636 return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) : 637 bch2_btree_iter_peek_prev(iter); 638 } 639 640 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter, 641 unsigned flags) 642 { 643 return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) : 644 bch2_btree_iter_peek(iter); 645 } 646 647 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter, 648 struct bpos end, 649 unsigned flags) 650 { 651 if (!(flags & BTREE_ITER_slots)) 652 return bch2_btree_iter_peek_upto(iter, end); 653 654 if (bkey_gt(iter->pos, end)) 655 return bkey_s_c_null; 656 657 return bch2_btree_iter_peek_slot(iter); 658 } 659 660 int __bch2_btree_trans_too_many_iters(struct btree_trans *); 661 662 static inline int btree_trans_too_many_iters(struct btree_trans *trans) 663 { 664 if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8) 665 return __bch2_btree_trans_too_many_iters(trans); 666 667 return 0; 668 } 669 670 /* 671 * goto instead of loop, so that when used inside for_each_btree_key2() 672 * break/continue work correctly 673 */ 674 #define lockrestart_do(_trans, _do) \ 675 ({ \ 676 __label__ transaction_restart; \ 677 u32 _restart_count; \ 678 int _ret2; \ 679 transaction_restart: \ 680 _restart_count = bch2_trans_begin(_trans); \ 681 _ret2 = (_do); \ 682 \ 683 if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart)) \ 684 goto transaction_restart; \ 685 \ 686 if (!_ret2) \ 687 bch2_trans_verify_not_restarted(_trans, _restart_count);\ 688 _ret2; \ 689 }) 690 691 /* 692 * nested_lockrestart_do(), nested_commit_do(): 693 * 694 * These are like lockrestart_do() and commit_do(), with two differences: 695 * 696 * - We don't call bch2_trans_begin() unless we had a transaction restart 697 * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a 698 * transaction restart 699 */ 700 #define nested_lockrestart_do(_trans, _do) \ 701 ({ \ 702 u32 _restart_count, _orig_restart_count; \ 703 int _ret2; \ 704 \ 705 _restart_count = _orig_restart_count = (_trans)->restart_count; \ 706 \ 707 while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\ 708 _restart_count = bch2_trans_begin(_trans); \ 709 \ 710 if (!_ret2) \ 711 bch2_trans_verify_not_restarted(_trans, _restart_count);\ 712 \ 713 _ret2 ?: trans_was_restarted(_trans, _restart_count); \ 714 }) 715 716 #define for_each_btree_key_upto_continue(_trans, _iter, \ 717 _end, _flags, _k, _do) \ 718 ({ \ 719 struct bkey_s_c _k; \ 720 int _ret3 = 0; \ 721 \ 722 do { \ 723 _ret3 = lockrestart_do(_trans, ({ \ 724 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), \ 725 _end, (_flags)); \ 726 if (!(_k).k) \ 727 break; \ 728 \ 729 bkey_err(_k) ?: (_do); \ 730 })); \ 731 } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \ 732 \ 733 bch2_trans_iter_exit((_trans), &(_iter)); \ 734 _ret3; \ 735 }) 736 737 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \ 738 for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do) 739 740 #define for_each_btree_key_upto(_trans, _iter, _btree_id, \ 741 _start, _end, _flags, _k, _do) \ 742 ({ \ 743 bch2_trans_begin(trans); \ 744 \ 745 struct btree_iter _iter; \ 746 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ 747 (_start), (_flags)); \ 748 \ 749 for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\ 750 }) 751 752 #define for_each_btree_key(_trans, _iter, _btree_id, \ 753 _start, _flags, _k, _do) \ 754 for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \ 755 SPOS_MAX, _flags, _k, _do) 756 757 #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ 758 _start, _flags, _k, _do) \ 759 ({ \ 760 struct btree_iter _iter; \ 761 struct bkey_s_c _k; \ 762 int _ret3 = 0; \ 763 \ 764 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ 765 (_start), (_flags)); \ 766 \ 767 do { \ 768 _ret3 = lockrestart_do(_trans, ({ \ 769 (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \ 770 (_flags)); \ 771 if (!(_k).k) \ 772 break; \ 773 \ 774 bkey_err(_k) ?: (_do); \ 775 })); \ 776 } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \ 777 \ 778 bch2_trans_iter_exit((_trans), &(_iter)); \ 779 _ret3; \ 780 }) 781 782 #define for_each_btree_key_commit(_trans, _iter, _btree_id, \ 783 _start, _iter_flags, _k, \ 784 _disk_res, _journal_seq, _commit_flags,\ 785 _do) \ 786 for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\ 787 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ 788 (_journal_seq), (_commit_flags))) 789 790 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id, \ 791 _start, _iter_flags, _k, \ 792 _disk_res, _journal_seq, _commit_flags,\ 793 _do) \ 794 for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\ 795 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ 796 (_journal_seq), (_commit_flags))) 797 798 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \ 799 _start, _end, _iter_flags, _k, \ 800 _disk_res, _journal_seq, _commit_flags,\ 801 _do) \ 802 for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\ 803 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ 804 (_journal_seq), (_commit_flags))) 805 806 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); 807 808 static inline struct bkey_s_c 809 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, 810 struct btree_iter *iter, unsigned flags) 811 { 812 struct bkey_s_c k; 813 814 while (btree_trans_too_many_iters(trans) || 815 (k = bch2_btree_iter_peek_type(iter, flags), 816 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart))) 817 bch2_trans_begin(trans); 818 819 return k; 820 } 821 822 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \ 823 _start, _end, _flags, _k, _ret) \ 824 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ 825 (_start), (_flags)); \ 826 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\ 827 !((_ret) = bkey_err(_k)) && (_k).k; \ 828 bch2_btree_iter_advance(&(_iter))) 829 830 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\ 831 for (; \ 832 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \ 833 !((_ret) = bkey_err(_k)) && (_k).k; \ 834 bch2_btree_iter_advance(&(_iter))) 835 836 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \ 837 _start, _flags, _k, _ret) \ 838 for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\ 839 SPOS_MAX, _flags, _k, _ret) 840 841 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ 842 for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) 843 844 /* 845 * This should not be used in a fastpath, without first trying _do in 846 * nonblocking mode - it will cause excessive transaction restarts and 847 * potentially livelocking: 848 */ 849 #define drop_locks_do(_trans, _do) \ 850 ({ \ 851 bch2_trans_unlock(_trans); \ 852 (_do) ?: bch2_trans_relock(_trans); \ 853 }) 854 855 #define allocate_dropping_locks_errcode(_trans, _do) \ 856 ({ \ 857 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ 858 int _ret = _do; \ 859 \ 860 if (bch2_err_matches(_ret, ENOMEM)) { \ 861 _gfp = GFP_KERNEL; \ 862 _ret = drop_locks_do(trans, _do); \ 863 } \ 864 _ret; \ 865 }) 866 867 #define allocate_dropping_locks(_trans, _ret, _do) \ 868 ({ \ 869 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ 870 typeof(_do) _p = _do; \ 871 \ 872 _ret = 0; \ 873 if (unlikely(!_p)) { \ 874 _gfp = GFP_KERNEL; \ 875 _ret = drop_locks_do(trans, ((_p = _do), 0)); \ 876 } \ 877 _p; \ 878 }) 879 880 #define bch2_trans_run(_c, _do) \ 881 ({ \ 882 struct btree_trans *trans = bch2_trans_get(_c); \ 883 int _ret = (_do); \ 884 bch2_trans_put(trans); \ 885 _ret; \ 886 }) 887 888 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *); 889 void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t); 890 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *); 891 void bch2_dump_trans_updates(struct btree_trans *); 892 void bch2_dump_trans_paths_updates(struct btree_trans *); 893 894 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned); 895 void bch2_trans_put(struct btree_trans *); 896 897 bool bch2_current_has_btree_trans(struct bch_fs *); 898 899 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR]; 900 unsigned bch2_trans_get_fn_idx(const char *); 901 902 #define bch2_trans_get(_c) \ 903 ({ \ 904 static unsigned trans_fn_idx; \ 905 \ 906 if (unlikely(!trans_fn_idx)) \ 907 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \ 908 __bch2_trans_get(_c, trans_fn_idx); \ 909 }) 910 911 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *); 912 913 void bch2_fs_btree_iter_exit(struct bch_fs *); 914 void bch2_fs_btree_iter_init_early(struct bch_fs *); 915 int bch2_fs_btree_iter_init(struct bch_fs *); 916 917 #endif /* _BCACHEFS_BTREE_ITER_H */ 918