1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM bcachefs 4 5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ) 6 7 #include <linux/tracepoint.h> 8 9 #define TRACE_BPOS_entries(name) \ 10 __field(u64, name##_inode ) \ 11 __field(u64, name##_offset ) \ 12 __field(u32, name##_snapshot ) 13 14 #define TRACE_BPOS_assign(dst, src) \ 15 __entry->dst##_inode = (src).inode; \ 16 __entry->dst##_offset = (src).offset; \ 17 __entry->dst##_snapshot = (src).snapshot 18 19 DECLARE_EVENT_CLASS(bpos, 20 TP_PROTO(const struct bpos *p), 21 TP_ARGS(p), 22 23 TP_STRUCT__entry( 24 TRACE_BPOS_entries(p) 25 ), 26 27 TP_fast_assign( 28 TRACE_BPOS_assign(p, *p); 29 ), 30 31 TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot) 32 ); 33 34 DECLARE_EVENT_CLASS(fs_str, 35 TP_PROTO(struct bch_fs *c, const char *str), 36 TP_ARGS(c, str), 37 38 TP_STRUCT__entry( 39 __field(dev_t, dev ) 40 __string(str, str ) 41 ), 42 43 TP_fast_assign( 44 __entry->dev = c->dev; 45 __assign_str(str); 46 ), 47 48 TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str)) 49 ); 50 51 DECLARE_EVENT_CLASS(trans_str, 52 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str), 53 TP_ARGS(trans, caller_ip, str), 54 55 TP_STRUCT__entry( 56 __field(dev_t, dev ) 57 __array(char, trans_fn, 32 ) 58 __field(unsigned long, caller_ip ) 59 __string(str, str ) 60 ), 61 62 TP_fast_assign( 63 __entry->dev = trans->c->dev; 64 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 65 __entry->caller_ip = caller_ip; 66 __assign_str(str); 67 ), 68 69 TP_printk("%d,%d %s %pS %s", 70 MAJOR(__entry->dev), MINOR(__entry->dev), 71 __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str)) 72 ); 73 74 DECLARE_EVENT_CLASS(trans_str_nocaller, 75 TP_PROTO(struct btree_trans *trans, const char *str), 76 TP_ARGS(trans, str), 77 78 TP_STRUCT__entry( 79 __field(dev_t, dev ) 80 __array(char, trans_fn, 32 ) 81 __string(str, str ) 82 ), 83 84 TP_fast_assign( 85 __entry->dev = trans->c->dev; 86 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 87 __assign_str(str); 88 ), 89 90 TP_printk("%d,%d %s %s", 91 MAJOR(__entry->dev), MINOR(__entry->dev), 92 __entry->trans_fn, __get_str(str)) 93 ); 94 95 DECLARE_EVENT_CLASS(btree_node_nofs, 96 TP_PROTO(struct bch_fs *c, struct btree *b), 97 TP_ARGS(c, b), 98 99 TP_STRUCT__entry( 100 __field(dev_t, dev ) 101 __field(u8, level ) 102 __field(u8, btree_id ) 103 TRACE_BPOS_entries(pos) 104 ), 105 106 TP_fast_assign( 107 __entry->dev = c->dev; 108 __entry->level = b->c.level; 109 __entry->btree_id = b->c.btree_id; 110 TRACE_BPOS_assign(pos, b->key.k.p); 111 ), 112 113 TP_printk("%d,%d %u %s %llu:%llu:%u", 114 MAJOR(__entry->dev), MINOR(__entry->dev), 115 __entry->level, 116 bch2_btree_id_str(__entry->btree_id), 117 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) 118 ); 119 120 DECLARE_EVENT_CLASS(btree_node, 121 TP_PROTO(struct btree_trans *trans, struct btree *b), 122 TP_ARGS(trans, b), 123 124 TP_STRUCT__entry( 125 __field(dev_t, dev ) 126 __array(char, trans_fn, 32 ) 127 __field(u8, level ) 128 __field(u8, btree_id ) 129 TRACE_BPOS_entries(pos) 130 ), 131 132 TP_fast_assign( 133 __entry->dev = trans->c->dev; 134 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 135 __entry->level = b->c.level; 136 __entry->btree_id = b->c.btree_id; 137 TRACE_BPOS_assign(pos, b->key.k.p); 138 ), 139 140 TP_printk("%d,%d %s %u %s %llu:%llu:%u", 141 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn, 142 __entry->level, 143 bch2_btree_id_str(__entry->btree_id), 144 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) 145 ); 146 147 DECLARE_EVENT_CLASS(bch_fs, 148 TP_PROTO(struct bch_fs *c), 149 TP_ARGS(c), 150 151 TP_STRUCT__entry( 152 __field(dev_t, dev ) 153 ), 154 155 TP_fast_assign( 156 __entry->dev = c->dev; 157 ), 158 159 TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev)) 160 ); 161 162 DECLARE_EVENT_CLASS(btree_trans, 163 TP_PROTO(struct btree_trans *trans), 164 TP_ARGS(trans), 165 166 TP_STRUCT__entry( 167 __field(dev_t, dev ) 168 __array(char, trans_fn, 32 ) 169 ), 170 171 TP_fast_assign( 172 __entry->dev = trans->c->dev; 173 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 174 ), 175 176 TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn) 177 ); 178 179 DECLARE_EVENT_CLASS(bio, 180 TP_PROTO(struct bio *bio), 181 TP_ARGS(bio), 182 183 TP_STRUCT__entry( 184 __field(dev_t, dev ) 185 __field(sector_t, sector ) 186 __field(unsigned int, nr_sector ) 187 __array(char, rwbs, 6 ) 188 ), 189 190 TP_fast_assign( 191 __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0; 192 __entry->sector = bio->bi_iter.bi_sector; 193 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 194 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 195 ), 196 197 TP_printk("%d,%d %s %llu + %u", 198 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 199 (unsigned long long)__entry->sector, __entry->nr_sector) 200 ); 201 202 /* disk_accounting.c */ 203 204 TRACE_EVENT(accounting_mem_insert, 205 TP_PROTO(struct bch_fs *c, const char *acc), 206 TP_ARGS(c, acc), 207 208 TP_STRUCT__entry( 209 __field(dev_t, dev ) 210 __field(unsigned, new_nr ) 211 __string(acc, acc ) 212 ), 213 214 TP_fast_assign( 215 __entry->dev = c->dev; 216 __entry->new_nr = c->accounting.k.nr; 217 __assign_str(acc); 218 ), 219 220 TP_printk("%d,%d entries %u added %s", 221 MAJOR(__entry->dev), MINOR(__entry->dev), 222 __entry->new_nr, 223 __get_str(acc)) 224 ); 225 226 /* fs.c: */ 227 TRACE_EVENT(bch2_sync_fs, 228 TP_PROTO(struct super_block *sb, int wait), 229 230 TP_ARGS(sb, wait), 231 232 TP_STRUCT__entry( 233 __field( dev_t, dev ) 234 __field( int, wait ) 235 236 ), 237 238 TP_fast_assign( 239 __entry->dev = sb->s_dev; 240 __entry->wait = wait; 241 ), 242 243 TP_printk("dev %d,%d wait %d", 244 MAJOR(__entry->dev), MINOR(__entry->dev), 245 __entry->wait) 246 ); 247 248 /* fs-io.c: */ 249 TRACE_EVENT(bch2_fsync, 250 TP_PROTO(struct file *file, int datasync), 251 252 TP_ARGS(file, datasync), 253 254 TP_STRUCT__entry( 255 __field( dev_t, dev ) 256 __field( ino_t, ino ) 257 __field( ino_t, parent ) 258 __field( int, datasync ) 259 ), 260 261 TP_fast_assign( 262 struct dentry *dentry = file->f_path.dentry; 263 264 __entry->dev = dentry->d_sb->s_dev; 265 __entry->ino = d_inode(dentry)->i_ino; 266 __entry->parent = d_inode(dentry->d_parent)->i_ino; 267 __entry->datasync = datasync; 268 ), 269 270 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ", 271 MAJOR(__entry->dev), MINOR(__entry->dev), 272 (unsigned long) __entry->ino, 273 (unsigned long) __entry->parent, __entry->datasync) 274 ); 275 276 /* super-io.c: */ 277 TRACE_EVENT(write_super, 278 TP_PROTO(struct bch_fs *c, unsigned long ip), 279 TP_ARGS(c, ip), 280 281 TP_STRUCT__entry( 282 __field(dev_t, dev ) 283 __field(unsigned long, ip ) 284 ), 285 286 TP_fast_assign( 287 __entry->dev = c->dev; 288 __entry->ip = ip; 289 ), 290 291 TP_printk("%d,%d for %pS", 292 MAJOR(__entry->dev), MINOR(__entry->dev), 293 (void *) __entry->ip) 294 ); 295 296 /* io.c: */ 297 298 DEFINE_EVENT(bio, read_promote, 299 TP_PROTO(struct bio *bio), 300 TP_ARGS(bio) 301 ); 302 303 TRACE_EVENT(read_nopromote, 304 TP_PROTO(struct bch_fs *c, int ret), 305 TP_ARGS(c, ret), 306 307 TP_STRUCT__entry( 308 __field(dev_t, dev ) 309 __array(char, ret, 32 ) 310 ), 311 312 TP_fast_assign( 313 __entry->dev = c->dev; 314 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret)); 315 ), 316 317 TP_printk("%d,%d ret %s", 318 MAJOR(__entry->dev), MINOR(__entry->dev), 319 __entry->ret) 320 ); 321 322 DEFINE_EVENT(bio, read_bounce, 323 TP_PROTO(struct bio *bio), 324 TP_ARGS(bio) 325 ); 326 327 DEFINE_EVENT(bio, read_split, 328 TP_PROTO(struct bio *bio), 329 TP_ARGS(bio) 330 ); 331 332 DEFINE_EVENT(bio, read_retry, 333 TP_PROTO(struct bio *bio), 334 TP_ARGS(bio) 335 ); 336 337 DEFINE_EVENT(bio, read_reuse_race, 338 TP_PROTO(struct bio *bio), 339 TP_ARGS(bio) 340 ); 341 342 /* Journal */ 343 344 DEFINE_EVENT(bch_fs, journal_full, 345 TP_PROTO(struct bch_fs *c), 346 TP_ARGS(c) 347 ); 348 349 DEFINE_EVENT(fs_str, journal_entry_full, 350 TP_PROTO(struct bch_fs *c, const char *str), 351 TP_ARGS(c, str) 352 ); 353 354 DEFINE_EVENT(fs_str, journal_entry_close, 355 TP_PROTO(struct bch_fs *c, const char *str), 356 TP_ARGS(c, str) 357 ); 358 359 DEFINE_EVENT(bio, journal_write, 360 TP_PROTO(struct bio *bio), 361 TP_ARGS(bio) 362 ); 363 364 TRACE_EVENT(journal_reclaim_start, 365 TP_PROTO(struct bch_fs *c, bool direct, bool kicked, 366 u64 min_nr, u64 min_key_cache, 367 u64 btree_cache_dirty, u64 btree_cache_total, 368 u64 btree_key_cache_dirty, u64 btree_key_cache_total), 369 TP_ARGS(c, direct, kicked, min_nr, min_key_cache, 370 btree_cache_dirty, btree_cache_total, 371 btree_key_cache_dirty, btree_key_cache_total), 372 373 TP_STRUCT__entry( 374 __field(dev_t, dev ) 375 __field(bool, direct ) 376 __field(bool, kicked ) 377 __field(u64, min_nr ) 378 __field(u64, min_key_cache ) 379 __field(u64, btree_cache_dirty ) 380 __field(u64, btree_cache_total ) 381 __field(u64, btree_key_cache_dirty ) 382 __field(u64, btree_key_cache_total ) 383 ), 384 385 TP_fast_assign( 386 __entry->dev = c->dev; 387 __entry->direct = direct; 388 __entry->kicked = kicked; 389 __entry->min_nr = min_nr; 390 __entry->min_key_cache = min_key_cache; 391 __entry->btree_cache_dirty = btree_cache_dirty; 392 __entry->btree_cache_total = btree_cache_total; 393 __entry->btree_key_cache_dirty = btree_key_cache_dirty; 394 __entry->btree_key_cache_total = btree_key_cache_total; 395 ), 396 397 TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu", 398 MAJOR(__entry->dev), MINOR(__entry->dev), 399 __entry->direct, 400 __entry->kicked, 401 __entry->min_nr, 402 __entry->min_key_cache, 403 __entry->btree_cache_dirty, 404 __entry->btree_cache_total, 405 __entry->btree_key_cache_dirty, 406 __entry->btree_key_cache_total) 407 ); 408 409 TRACE_EVENT(journal_reclaim_finish, 410 TP_PROTO(struct bch_fs *c, u64 nr_flushed), 411 TP_ARGS(c, nr_flushed), 412 413 TP_STRUCT__entry( 414 __field(dev_t, dev ) 415 __field(u64, nr_flushed ) 416 ), 417 418 TP_fast_assign( 419 __entry->dev = c->dev; 420 __entry->nr_flushed = nr_flushed; 421 ), 422 423 TP_printk("%d,%d flushed %llu", 424 MAJOR(__entry->dev), MINOR(__entry->dev), 425 __entry->nr_flushed) 426 ); 427 428 /* bset.c: */ 429 430 DEFINE_EVENT(bpos, bkey_pack_pos_fail, 431 TP_PROTO(const struct bpos *p), 432 TP_ARGS(p) 433 ); 434 435 /* Btree cache: */ 436 437 TRACE_EVENT(btree_cache_scan, 438 TP_PROTO(long nr_to_scan, long can_free, long ret), 439 TP_ARGS(nr_to_scan, can_free, ret), 440 441 TP_STRUCT__entry( 442 __field(long, nr_to_scan ) 443 __field(long, can_free ) 444 __field(long, ret ) 445 ), 446 447 TP_fast_assign( 448 __entry->nr_to_scan = nr_to_scan; 449 __entry->can_free = can_free; 450 __entry->ret = ret; 451 ), 452 453 TP_printk("scanned for %li nodes, can free %li, ret %li", 454 __entry->nr_to_scan, __entry->can_free, __entry->ret) 455 ); 456 457 DEFINE_EVENT(btree_node_nofs, btree_cache_reap, 458 TP_PROTO(struct bch_fs *c, struct btree *b), 459 TP_ARGS(c, b) 460 ); 461 462 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail, 463 TP_PROTO(struct btree_trans *trans), 464 TP_ARGS(trans) 465 ); 466 467 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock, 468 TP_PROTO(struct btree_trans *trans), 469 TP_ARGS(trans) 470 ); 471 472 DEFINE_EVENT(btree_trans, btree_cache_cannibalize, 473 TP_PROTO(struct btree_trans *trans), 474 TP_ARGS(trans) 475 ); 476 477 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock, 478 TP_PROTO(struct btree_trans *trans), 479 TP_ARGS(trans) 480 ); 481 482 /* Btree */ 483 484 DEFINE_EVENT(btree_node, btree_node_read, 485 TP_PROTO(struct btree_trans *trans, struct btree *b), 486 TP_ARGS(trans, b) 487 ); 488 489 TRACE_EVENT(btree_node_write, 490 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors), 491 TP_ARGS(b, bytes, sectors), 492 493 TP_STRUCT__entry( 494 __field(enum btree_node_type, type) 495 __field(unsigned, bytes ) 496 __field(unsigned, sectors ) 497 ), 498 499 TP_fast_assign( 500 __entry->type = btree_node_type(b); 501 __entry->bytes = bytes; 502 __entry->sectors = sectors; 503 ), 504 505 TP_printk("bkey type %u bytes %u sectors %u", 506 __entry->type , __entry->bytes, __entry->sectors) 507 ); 508 509 DEFINE_EVENT(btree_node, btree_node_alloc, 510 TP_PROTO(struct btree_trans *trans, struct btree *b), 511 TP_ARGS(trans, b) 512 ); 513 514 DEFINE_EVENT(btree_node, btree_node_free, 515 TP_PROTO(struct btree_trans *trans, struct btree *b), 516 TP_ARGS(trans, b) 517 ); 518 519 TRACE_EVENT(btree_reserve_get_fail, 520 TP_PROTO(const char *trans_fn, 521 unsigned long caller_ip, 522 size_t required, 523 int ret), 524 TP_ARGS(trans_fn, caller_ip, required, ret), 525 526 TP_STRUCT__entry( 527 __array(char, trans_fn, 32 ) 528 __field(unsigned long, caller_ip ) 529 __field(size_t, required ) 530 __array(char, ret, 32 ) 531 ), 532 533 TP_fast_assign( 534 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn)); 535 __entry->caller_ip = caller_ip; 536 __entry->required = required; 537 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret)); 538 ), 539 540 TP_printk("%s %pS required %zu ret %s", 541 __entry->trans_fn, 542 (void *) __entry->caller_ip, 543 __entry->required, 544 __entry->ret) 545 ); 546 547 DEFINE_EVENT(btree_node, btree_node_compact, 548 TP_PROTO(struct btree_trans *trans, struct btree *b), 549 TP_ARGS(trans, b) 550 ); 551 552 DEFINE_EVENT(btree_node, btree_node_merge, 553 TP_PROTO(struct btree_trans *trans, struct btree *b), 554 TP_ARGS(trans, b) 555 ); 556 557 DEFINE_EVENT(btree_node, btree_node_split, 558 TP_PROTO(struct btree_trans *trans, struct btree *b), 559 TP_ARGS(trans, b) 560 ); 561 562 DEFINE_EVENT(btree_node, btree_node_rewrite, 563 TP_PROTO(struct btree_trans *trans, struct btree *b), 564 TP_ARGS(trans, b) 565 ); 566 567 DEFINE_EVENT(btree_node, btree_node_set_root, 568 TP_PROTO(struct btree_trans *trans, struct btree *b), 569 TP_ARGS(trans, b) 570 ); 571 572 TRACE_EVENT(btree_path_relock_fail, 573 TP_PROTO(struct btree_trans *trans, 574 unsigned long caller_ip, 575 struct btree_path *path, 576 unsigned level), 577 TP_ARGS(trans, caller_ip, path, level), 578 579 TP_STRUCT__entry( 580 __array(char, trans_fn, 32 ) 581 __field(unsigned long, caller_ip ) 582 __field(u8, btree_id ) 583 __field(u8, level ) 584 __field(u8, path_idx) 585 TRACE_BPOS_entries(pos) 586 __array(char, node, 24 ) 587 __field(u8, self_read_count ) 588 __field(u8, self_intent_count) 589 __field(u8, read_count ) 590 __field(u8, intent_count ) 591 __field(u32, iter_lock_seq ) 592 __field(u32, node_lock_seq ) 593 ), 594 595 TP_fast_assign( 596 struct btree *b = btree_path_node(path, level); 597 struct six_lock_count c; 598 599 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 600 __entry->caller_ip = caller_ip; 601 __entry->btree_id = path->btree_id; 602 __entry->level = level; 603 __entry->path_idx = path - trans->paths; 604 TRACE_BPOS_assign(pos, path->pos); 605 606 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level); 607 __entry->self_read_count = c.n[SIX_LOCK_read]; 608 __entry->self_intent_count = c.n[SIX_LOCK_intent]; 609 610 if (IS_ERR(b)) { 611 strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node)); 612 } else { 613 c = six_lock_counts(&path->l[level].b->c.lock); 614 __entry->read_count = c.n[SIX_LOCK_read]; 615 __entry->intent_count = c.n[SIX_LOCK_intent]; 616 scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c); 617 } 618 __entry->iter_lock_seq = path->l[level].lock_seq; 619 __entry->node_lock_seq = is_btree_node(path, level) 620 ? six_lock_seq(&path->l[level].b->c.lock) 621 : 0; 622 ), 623 624 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u", 625 __entry->trans_fn, 626 (void *) __entry->caller_ip, 627 __entry->path_idx, 628 bch2_btree_id_str(__entry->btree_id), 629 __entry->pos_inode, 630 __entry->pos_offset, 631 __entry->pos_snapshot, 632 __entry->level, 633 __entry->node, 634 __entry->self_read_count, 635 __entry->self_intent_count, 636 __entry->read_count, 637 __entry->intent_count, 638 __entry->iter_lock_seq, 639 __entry->node_lock_seq) 640 ); 641 642 TRACE_EVENT(btree_path_upgrade_fail, 643 TP_PROTO(struct btree_trans *trans, 644 unsigned long caller_ip, 645 struct btree_path *path, 646 unsigned level), 647 TP_ARGS(trans, caller_ip, path, level), 648 649 TP_STRUCT__entry( 650 __array(char, trans_fn, 32 ) 651 __field(unsigned long, caller_ip ) 652 __field(u8, btree_id ) 653 __field(u8, level ) 654 __field(u8, path_idx) 655 TRACE_BPOS_entries(pos) 656 __field(u8, locked ) 657 __field(u8, self_read_count ) 658 __field(u8, self_intent_count) 659 __field(u8, read_count ) 660 __field(u8, intent_count ) 661 __field(u32, iter_lock_seq ) 662 __field(u32, node_lock_seq ) 663 ), 664 665 TP_fast_assign( 666 struct six_lock_count c; 667 668 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 669 __entry->caller_ip = caller_ip; 670 __entry->btree_id = path->btree_id; 671 __entry->level = level; 672 __entry->path_idx = path - trans->paths; 673 TRACE_BPOS_assign(pos, path->pos); 674 __entry->locked = btree_node_locked(path, level); 675 676 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level), 677 __entry->self_read_count = c.n[SIX_LOCK_read]; 678 __entry->self_intent_count = c.n[SIX_LOCK_intent]; 679 c = six_lock_counts(&path->l[level].b->c.lock); 680 __entry->read_count = c.n[SIX_LOCK_read]; 681 __entry->intent_count = c.n[SIX_LOCK_intent]; 682 __entry->iter_lock_seq = path->l[level].lock_seq; 683 __entry->node_lock_seq = is_btree_node(path, level) 684 ? six_lock_seq(&path->l[level].b->c.lock) 685 : 0; 686 ), 687 688 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u", 689 __entry->trans_fn, 690 (void *) __entry->caller_ip, 691 __entry->path_idx, 692 bch2_btree_id_str(__entry->btree_id), 693 __entry->pos_inode, 694 __entry->pos_offset, 695 __entry->pos_snapshot, 696 __entry->level, 697 __entry->locked, 698 __entry->self_read_count, 699 __entry->self_intent_count, 700 __entry->read_count, 701 __entry->intent_count, 702 __entry->iter_lock_seq, 703 __entry->node_lock_seq) 704 ); 705 706 /* Garbage collection */ 707 708 DEFINE_EVENT(bch_fs, gc_gens_start, 709 TP_PROTO(struct bch_fs *c), 710 TP_ARGS(c) 711 ); 712 713 DEFINE_EVENT(bch_fs, gc_gens_end, 714 TP_PROTO(struct bch_fs *c), 715 TP_ARGS(c) 716 ); 717 718 /* Allocator */ 719 720 DEFINE_EVENT(fs_str, bucket_alloc, 721 TP_PROTO(struct bch_fs *c, const char *str), 722 TP_ARGS(c, str) 723 ); 724 725 DEFINE_EVENT(fs_str, bucket_alloc_fail, 726 TP_PROTO(struct bch_fs *c, const char *str), 727 TP_ARGS(c, str) 728 ); 729 730 TRACE_EVENT(discard_buckets, 731 TP_PROTO(struct bch_fs *c, u64 seen, u64 open, 732 u64 need_journal_commit, u64 discarded, const char *err), 733 TP_ARGS(c, seen, open, need_journal_commit, discarded, err), 734 735 TP_STRUCT__entry( 736 __field(dev_t, dev ) 737 __field(u64, seen ) 738 __field(u64, open ) 739 __field(u64, need_journal_commit ) 740 __field(u64, discarded ) 741 __array(char, err, 16 ) 742 ), 743 744 TP_fast_assign( 745 __entry->dev = c->dev; 746 __entry->seen = seen; 747 __entry->open = open; 748 __entry->need_journal_commit = need_journal_commit; 749 __entry->discarded = discarded; 750 strscpy(__entry->err, err, sizeof(__entry->err)); 751 ), 752 753 TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s", 754 MAJOR(__entry->dev), MINOR(__entry->dev), 755 __entry->seen, 756 __entry->open, 757 __entry->need_journal_commit, 758 __entry->discarded, 759 __entry->err) 760 ); 761 762 TRACE_EVENT(bucket_invalidate, 763 TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors), 764 TP_ARGS(c, dev, bucket, sectors), 765 766 TP_STRUCT__entry( 767 __field(dev_t, dev ) 768 __field(u32, dev_idx ) 769 __field(u32, sectors ) 770 __field(u64, bucket ) 771 ), 772 773 TP_fast_assign( 774 __entry->dev = c->dev; 775 __entry->dev_idx = dev; 776 __entry->sectors = sectors; 777 __entry->bucket = bucket; 778 ), 779 780 TP_printk("%d:%d invalidated %u:%llu cached sectors %u", 781 MAJOR(__entry->dev), MINOR(__entry->dev), 782 __entry->dev_idx, __entry->bucket, 783 __entry->sectors) 784 ); 785 786 /* Moving IO */ 787 788 TRACE_EVENT(bucket_evacuate, 789 TP_PROTO(struct bch_fs *c, struct bpos *bucket), 790 TP_ARGS(c, bucket), 791 792 TP_STRUCT__entry( 793 __field(dev_t, dev ) 794 __field(u32, dev_idx ) 795 __field(u64, bucket ) 796 ), 797 798 TP_fast_assign( 799 __entry->dev = c->dev; 800 __entry->dev_idx = bucket->inode; 801 __entry->bucket = bucket->offset; 802 ), 803 804 TP_printk("%d:%d %u:%llu", 805 MAJOR(__entry->dev), MINOR(__entry->dev), 806 __entry->dev_idx, __entry->bucket) 807 ); 808 809 DEFINE_EVENT(fs_str, move_extent, 810 TP_PROTO(struct bch_fs *c, const char *str), 811 TP_ARGS(c, str) 812 ); 813 814 DEFINE_EVENT(fs_str, move_extent_read, 815 TP_PROTO(struct bch_fs *c, const char *str), 816 TP_ARGS(c, str) 817 ); 818 819 DEFINE_EVENT(fs_str, move_extent_write, 820 TP_PROTO(struct bch_fs *c, const char *str), 821 TP_ARGS(c, str) 822 ); 823 824 DEFINE_EVENT(fs_str, move_extent_finish, 825 TP_PROTO(struct bch_fs *c, const char *str), 826 TP_ARGS(c, str) 827 ); 828 829 DEFINE_EVENT(fs_str, move_extent_fail, 830 TP_PROTO(struct bch_fs *c, const char *str), 831 TP_ARGS(c, str) 832 ); 833 834 DEFINE_EVENT(fs_str, move_extent_start_fail, 835 TP_PROTO(struct bch_fs *c, const char *str), 836 TP_ARGS(c, str) 837 ); 838 839 TRACE_EVENT(move_data, 840 TP_PROTO(struct bch_fs *c, 841 struct bch_move_stats *stats), 842 TP_ARGS(c, stats), 843 844 TP_STRUCT__entry( 845 __field(dev_t, dev ) 846 __field(u64, keys_moved ) 847 __field(u64, keys_raced ) 848 __field(u64, sectors_seen ) 849 __field(u64, sectors_moved ) 850 __field(u64, sectors_raced ) 851 ), 852 853 TP_fast_assign( 854 __entry->dev = c->dev; 855 __entry->keys_moved = atomic64_read(&stats->keys_moved); 856 __entry->keys_raced = atomic64_read(&stats->keys_raced); 857 __entry->sectors_seen = atomic64_read(&stats->sectors_seen); 858 __entry->sectors_moved = atomic64_read(&stats->sectors_moved); 859 __entry->sectors_raced = atomic64_read(&stats->sectors_raced); 860 ), 861 862 TP_printk("%d,%d keys moved %llu raced %llu" 863 "sectors seen %llu moved %llu raced %llu", 864 MAJOR(__entry->dev), MINOR(__entry->dev), 865 __entry->keys_moved, 866 __entry->keys_raced, 867 __entry->sectors_seen, 868 __entry->sectors_moved, 869 __entry->sectors_raced) 870 ); 871 872 TRACE_EVENT(evacuate_bucket, 873 TP_PROTO(struct bch_fs *c, struct bpos *bucket, 874 unsigned sectors, unsigned bucket_size, 875 int ret), 876 TP_ARGS(c, bucket, sectors, bucket_size, ret), 877 878 TP_STRUCT__entry( 879 __field(dev_t, dev ) 880 __field(u64, member ) 881 __field(u64, bucket ) 882 __field(u32, sectors ) 883 __field(u32, bucket_size ) 884 __field(int, ret ) 885 ), 886 887 TP_fast_assign( 888 __entry->dev = c->dev; 889 __entry->member = bucket->inode; 890 __entry->bucket = bucket->offset; 891 __entry->sectors = sectors; 892 __entry->bucket_size = bucket_size; 893 __entry->ret = ret; 894 ), 895 896 TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i", 897 MAJOR(__entry->dev), MINOR(__entry->dev), 898 __entry->member, __entry->bucket, 899 __entry->sectors, __entry->bucket_size, 900 __entry->ret) 901 ); 902 903 TRACE_EVENT(copygc, 904 TP_PROTO(struct bch_fs *c, 905 u64 buckets, 906 u64 sectors_seen, 907 u64 sectors_moved), 908 TP_ARGS(c, buckets, sectors_seen, sectors_moved), 909 910 TP_STRUCT__entry( 911 __field(dev_t, dev ) 912 __field(u64, buckets ) 913 __field(u64, sectors_seen ) 914 __field(u64, sectors_moved ) 915 ), 916 917 TP_fast_assign( 918 __entry->dev = c->dev; 919 __entry->buckets = buckets; 920 __entry->sectors_seen = sectors_seen; 921 __entry->sectors_moved = sectors_moved; 922 ), 923 924 TP_printk("%d,%d buckets %llu sectors seen %llu moved %llu", 925 MAJOR(__entry->dev), MINOR(__entry->dev), 926 __entry->buckets, 927 __entry->sectors_seen, 928 __entry->sectors_moved) 929 ); 930 931 TRACE_EVENT(copygc_wait, 932 TP_PROTO(struct bch_fs *c, 933 u64 wait_amount, u64 until), 934 TP_ARGS(c, wait_amount, until), 935 936 TP_STRUCT__entry( 937 __field(dev_t, dev ) 938 __field(u64, wait_amount ) 939 __field(u64, until ) 940 ), 941 942 TP_fast_assign( 943 __entry->dev = c->dev; 944 __entry->wait_amount = wait_amount; 945 __entry->until = until; 946 ), 947 948 TP_printk("%d,%u waiting for %llu sectors until %llu", 949 MAJOR(__entry->dev), MINOR(__entry->dev), 950 __entry->wait_amount, __entry->until) 951 ); 952 953 /* btree transactions: */ 954 955 DECLARE_EVENT_CLASS(transaction_event, 956 TP_PROTO(struct btree_trans *trans, 957 unsigned long caller_ip), 958 TP_ARGS(trans, caller_ip), 959 960 TP_STRUCT__entry( 961 __array(char, trans_fn, 32 ) 962 __field(unsigned long, caller_ip ) 963 ), 964 965 TP_fast_assign( 966 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 967 __entry->caller_ip = caller_ip; 968 ), 969 970 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip) 971 ); 972 973 DEFINE_EVENT(transaction_event, transaction_commit, 974 TP_PROTO(struct btree_trans *trans, 975 unsigned long caller_ip), 976 TP_ARGS(trans, caller_ip) 977 ); 978 979 DEFINE_EVENT(transaction_event, trans_restart_injected, 980 TP_PROTO(struct btree_trans *trans, 981 unsigned long caller_ip), 982 TP_ARGS(trans, caller_ip) 983 ); 984 985 TRACE_EVENT(trans_restart_split_race, 986 TP_PROTO(struct btree_trans *trans, 987 unsigned long caller_ip, 988 struct btree *b), 989 TP_ARGS(trans, caller_ip, b), 990 991 TP_STRUCT__entry( 992 __array(char, trans_fn, 32 ) 993 __field(unsigned long, caller_ip ) 994 __field(u8, level ) 995 __field(u16, written ) 996 __field(u16, blocks ) 997 __field(u16, u64s_remaining ) 998 ), 999 1000 TP_fast_assign( 1001 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1002 __entry->caller_ip = caller_ip; 1003 __entry->level = b->c.level; 1004 __entry->written = b->written; 1005 __entry->blocks = btree_blocks(trans->c); 1006 __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b); 1007 ), 1008 1009 TP_printk("%s %pS l=%u written %u/%u u64s remaining %u", 1010 __entry->trans_fn, (void *) __entry->caller_ip, 1011 __entry->level, 1012 __entry->written, __entry->blocks, 1013 __entry->u64s_remaining) 1014 ); 1015 1016 TRACE_EVENT(trans_blocked_journal_reclaim, 1017 TP_PROTO(struct btree_trans *trans, 1018 unsigned long caller_ip), 1019 TP_ARGS(trans, caller_ip), 1020 1021 TP_STRUCT__entry( 1022 __array(char, trans_fn, 32 ) 1023 __field(unsigned long, caller_ip ) 1024 1025 __field(unsigned long, key_cache_nr_keys ) 1026 __field(unsigned long, key_cache_nr_dirty ) 1027 __field(long, must_wait ) 1028 ), 1029 1030 TP_fast_assign( 1031 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1032 __entry->caller_ip = caller_ip; 1033 __entry->key_cache_nr_keys = atomic_long_read(&trans->c->btree_key_cache.nr_keys); 1034 __entry->key_cache_nr_dirty = atomic_long_read(&trans->c->btree_key_cache.nr_dirty); 1035 __entry->must_wait = __bch2_btree_key_cache_must_wait(trans->c); 1036 ), 1037 1038 TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li", 1039 __entry->trans_fn, (void *) __entry->caller_ip, 1040 __entry->key_cache_nr_keys, 1041 __entry->key_cache_nr_dirty, 1042 __entry->must_wait) 1043 ); 1044 1045 TRACE_EVENT(trans_restart_journal_preres_get, 1046 TP_PROTO(struct btree_trans *trans, 1047 unsigned long caller_ip, 1048 unsigned flags), 1049 TP_ARGS(trans, caller_ip, flags), 1050 1051 TP_STRUCT__entry( 1052 __array(char, trans_fn, 32 ) 1053 __field(unsigned long, caller_ip ) 1054 __field(unsigned, flags ) 1055 ), 1056 1057 TP_fast_assign( 1058 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1059 __entry->caller_ip = caller_ip; 1060 __entry->flags = flags; 1061 ), 1062 1063 TP_printk("%s %pS %x", __entry->trans_fn, 1064 (void *) __entry->caller_ip, 1065 __entry->flags) 1066 ); 1067 1068 DEFINE_EVENT(transaction_event, trans_restart_fault_inject, 1069 TP_PROTO(struct btree_trans *trans, 1070 unsigned long caller_ip), 1071 TP_ARGS(trans, caller_ip) 1072 ); 1073 1074 DEFINE_EVENT(transaction_event, trans_traverse_all, 1075 TP_PROTO(struct btree_trans *trans, 1076 unsigned long caller_ip), 1077 TP_ARGS(trans, caller_ip) 1078 ); 1079 1080 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced, 1081 TP_PROTO(struct btree_trans *trans, 1082 unsigned long caller_ip), 1083 TP_ARGS(trans, caller_ip) 1084 ); 1085 1086 DEFINE_EVENT(trans_str, trans_restart_too_many_iters, 1087 TP_PROTO(struct btree_trans *trans, 1088 unsigned long caller_ip, 1089 const char *paths), 1090 TP_ARGS(trans, caller_ip, paths) 1091 ); 1092 1093 DECLARE_EVENT_CLASS(transaction_restart_iter, 1094 TP_PROTO(struct btree_trans *trans, 1095 unsigned long caller_ip, 1096 struct btree_path *path), 1097 TP_ARGS(trans, caller_ip, path), 1098 1099 TP_STRUCT__entry( 1100 __array(char, trans_fn, 32 ) 1101 __field(unsigned long, caller_ip ) 1102 __field(u8, btree_id ) 1103 TRACE_BPOS_entries(pos) 1104 ), 1105 1106 TP_fast_assign( 1107 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1108 __entry->caller_ip = caller_ip; 1109 __entry->btree_id = path->btree_id; 1110 TRACE_BPOS_assign(pos, path->pos) 1111 ), 1112 1113 TP_printk("%s %pS btree %s pos %llu:%llu:%u", 1114 __entry->trans_fn, 1115 (void *) __entry->caller_ip, 1116 bch2_btree_id_str(__entry->btree_id), 1117 __entry->pos_inode, 1118 __entry->pos_offset, 1119 __entry->pos_snapshot) 1120 ); 1121 1122 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused, 1123 TP_PROTO(struct btree_trans *trans, 1124 unsigned long caller_ip, 1125 struct btree_path *path), 1126 TP_ARGS(trans, caller_ip, path) 1127 ); 1128 1129 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split, 1130 TP_PROTO(struct btree_trans *trans, 1131 unsigned long caller_ip, 1132 struct btree_path *path), 1133 TP_ARGS(trans, caller_ip, path) 1134 ); 1135 1136 TRACE_EVENT(trans_restart_upgrade, 1137 TP_PROTO(struct btree_trans *trans, 1138 unsigned long caller_ip, 1139 struct btree_path *path, 1140 unsigned old_locks_want, 1141 unsigned new_locks_want, 1142 struct get_locks_fail *f), 1143 TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f), 1144 1145 TP_STRUCT__entry( 1146 __array(char, trans_fn, 32 ) 1147 __field(unsigned long, caller_ip ) 1148 __field(u8, btree_id ) 1149 __field(u8, old_locks_want ) 1150 __field(u8, new_locks_want ) 1151 __field(u8, level ) 1152 __field(u32, path_seq ) 1153 __field(u32, node_seq ) 1154 TRACE_BPOS_entries(pos) 1155 ), 1156 1157 TP_fast_assign( 1158 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1159 __entry->caller_ip = caller_ip; 1160 __entry->btree_id = path->btree_id; 1161 __entry->old_locks_want = old_locks_want; 1162 __entry->new_locks_want = new_locks_want; 1163 __entry->level = f->l; 1164 __entry->path_seq = path->l[f->l].lock_seq; 1165 __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq; 1166 TRACE_BPOS_assign(pos, path->pos) 1167 ), 1168 1169 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u", 1170 __entry->trans_fn, 1171 (void *) __entry->caller_ip, 1172 bch2_btree_id_str(__entry->btree_id), 1173 __entry->pos_inode, 1174 __entry->pos_offset, 1175 __entry->pos_snapshot, 1176 __entry->old_locks_want, 1177 __entry->new_locks_want, 1178 __entry->level, 1179 __entry->path_seq, 1180 __entry->node_seq) 1181 ); 1182 1183 DEFINE_EVENT(trans_str, trans_restart_relock, 1184 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str), 1185 TP_ARGS(trans, caller_ip, str) 1186 ); 1187 1188 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node, 1189 TP_PROTO(struct btree_trans *trans, 1190 unsigned long caller_ip, 1191 struct btree_path *path), 1192 TP_ARGS(trans, caller_ip, path) 1193 ); 1194 1195 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill, 1196 TP_PROTO(struct btree_trans *trans, 1197 unsigned long caller_ip, 1198 struct btree_path *path), 1199 TP_ARGS(trans, caller_ip, path) 1200 ); 1201 1202 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill, 1203 TP_PROTO(struct btree_trans *trans, 1204 unsigned long caller_ip, 1205 struct btree_path *path), 1206 TP_ARGS(trans, caller_ip, path) 1207 ); 1208 1209 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade, 1210 TP_PROTO(struct btree_trans *trans, 1211 unsigned long caller_ip), 1212 TP_ARGS(trans, caller_ip) 1213 ); 1214 1215 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill, 1216 TP_PROTO(struct btree_trans *trans, 1217 unsigned long caller_ip, 1218 struct btree_path *path), 1219 TP_ARGS(trans, caller_ip, path) 1220 ); 1221 1222 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path, 1223 TP_PROTO(struct btree_trans *trans, 1224 unsigned long caller_ip, 1225 struct btree_path *path), 1226 TP_ARGS(trans, caller_ip, path) 1227 ); 1228 1229 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent, 1230 TP_PROTO(struct btree_trans *trans, 1231 unsigned long caller_ip, 1232 struct btree_path *path), 1233 TP_ARGS(trans, caller_ip, path) 1234 ); 1235 1236 DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse, 1237 TP_PROTO(struct btree_trans *trans, 1238 unsigned long caller_ip, 1239 struct btree_path *path), 1240 TP_ARGS(trans, caller_ip, path) 1241 ); 1242 1243 DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure, 1244 TP_PROTO(struct btree_trans *trans, 1245 unsigned long caller_ip, 1246 struct btree_path *path), 1247 TP_ARGS(trans, caller_ip, path) 1248 ); 1249 1250 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock, 1251 TP_PROTO(struct btree_trans *trans, 1252 const char *cycle), 1253 TP_ARGS(trans, cycle) 1254 ); 1255 1256 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit, 1257 TP_PROTO(struct btree_trans *trans, 1258 unsigned long caller_ip), 1259 TP_ARGS(trans, caller_ip) 1260 ); 1261 1262 TRACE_EVENT(trans_restart_would_deadlock_write, 1263 TP_PROTO(struct btree_trans *trans), 1264 TP_ARGS(trans), 1265 1266 TP_STRUCT__entry( 1267 __array(char, trans_fn, 32 ) 1268 ), 1269 1270 TP_fast_assign( 1271 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1272 ), 1273 1274 TP_printk("%s", __entry->trans_fn) 1275 ); 1276 1277 TRACE_EVENT(trans_restart_mem_realloced, 1278 TP_PROTO(struct btree_trans *trans, 1279 unsigned long caller_ip, 1280 unsigned long bytes), 1281 TP_ARGS(trans, caller_ip, bytes), 1282 1283 TP_STRUCT__entry( 1284 __array(char, trans_fn, 32 ) 1285 __field(unsigned long, caller_ip ) 1286 __field(unsigned long, bytes ) 1287 ), 1288 1289 TP_fast_assign( 1290 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1291 __entry->caller_ip = caller_ip; 1292 __entry->bytes = bytes; 1293 ), 1294 1295 TP_printk("%s %pS bytes %lu", 1296 __entry->trans_fn, 1297 (void *) __entry->caller_ip, 1298 __entry->bytes) 1299 ); 1300 1301 TRACE_EVENT(trans_restart_key_cache_key_realloced, 1302 TP_PROTO(struct btree_trans *trans, 1303 unsigned long caller_ip, 1304 struct btree_path *path, 1305 unsigned old_u64s, 1306 unsigned new_u64s), 1307 TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s), 1308 1309 TP_STRUCT__entry( 1310 __array(char, trans_fn, 32 ) 1311 __field(unsigned long, caller_ip ) 1312 __field(enum btree_id, btree_id ) 1313 TRACE_BPOS_entries(pos) 1314 __field(u32, old_u64s ) 1315 __field(u32, new_u64s ) 1316 ), 1317 1318 TP_fast_assign( 1319 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1320 __entry->caller_ip = caller_ip; 1321 1322 __entry->btree_id = path->btree_id; 1323 TRACE_BPOS_assign(pos, path->pos); 1324 __entry->old_u64s = old_u64s; 1325 __entry->new_u64s = new_u64s; 1326 ), 1327 1328 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u", 1329 __entry->trans_fn, 1330 (void *) __entry->caller_ip, 1331 bch2_btree_id_str(__entry->btree_id), 1332 __entry->pos_inode, 1333 __entry->pos_offset, 1334 __entry->pos_snapshot, 1335 __entry->old_u64s, 1336 __entry->new_u64s) 1337 ); 1338 1339 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush, 1340 TP_PROTO(struct btree_trans *trans, 1341 unsigned long caller_ip), 1342 TP_ARGS(trans, caller_ip) 1343 ); 1344 1345 TRACE_EVENT(path_downgrade, 1346 TP_PROTO(struct btree_trans *trans, 1347 unsigned long caller_ip, 1348 struct btree_path *path, 1349 unsigned old_locks_want), 1350 TP_ARGS(trans, caller_ip, path, old_locks_want), 1351 1352 TP_STRUCT__entry( 1353 __array(char, trans_fn, 32 ) 1354 __field(unsigned long, caller_ip ) 1355 __field(unsigned, old_locks_want ) 1356 __field(unsigned, new_locks_want ) 1357 __field(unsigned, btree ) 1358 TRACE_BPOS_entries(pos) 1359 ), 1360 1361 TP_fast_assign( 1362 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1363 __entry->caller_ip = caller_ip; 1364 __entry->old_locks_want = old_locks_want; 1365 __entry->new_locks_want = path->locks_want; 1366 __entry->btree = path->btree_id; 1367 TRACE_BPOS_assign(pos, path->pos); 1368 ), 1369 1370 TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u", 1371 __entry->trans_fn, 1372 (void *) __entry->caller_ip, 1373 __entry->old_locks_want, 1374 __entry->new_locks_want, 1375 bch2_btree_id_str(__entry->btree), 1376 __entry->pos_inode, 1377 __entry->pos_offset, 1378 __entry->pos_snapshot) 1379 ); 1380 1381 TRACE_EVENT(key_cache_fill, 1382 TP_PROTO(struct btree_trans *trans, const char *key), 1383 TP_ARGS(trans, key), 1384 1385 TP_STRUCT__entry( 1386 __array(char, trans_fn, 32 ) 1387 __string(key, key ) 1388 ), 1389 1390 TP_fast_assign( 1391 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1392 __assign_str(key); 1393 ), 1394 1395 TP_printk("%s %s", __entry->trans_fn, __get_str(key)) 1396 ); 1397 1398 TRACE_EVENT(write_buffer_flush, 1399 TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size), 1400 TP_ARGS(trans, nr, skipped, fast, size), 1401 1402 TP_STRUCT__entry( 1403 __field(size_t, nr ) 1404 __field(size_t, skipped ) 1405 __field(size_t, fast ) 1406 __field(size_t, size ) 1407 ), 1408 1409 TP_fast_assign( 1410 __entry->nr = nr; 1411 __entry->skipped = skipped; 1412 __entry->fast = fast; 1413 __entry->size = size; 1414 ), 1415 1416 TP_printk("%zu/%zu skipped %zu fast %zu", 1417 __entry->nr, __entry->size, __entry->skipped, __entry->fast) 1418 ); 1419 1420 TRACE_EVENT(write_buffer_flush_sync, 1421 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip), 1422 TP_ARGS(trans, caller_ip), 1423 1424 TP_STRUCT__entry( 1425 __array(char, trans_fn, 32 ) 1426 __field(unsigned long, caller_ip ) 1427 ), 1428 1429 TP_fast_assign( 1430 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1431 __entry->caller_ip = caller_ip; 1432 ), 1433 1434 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip) 1435 ); 1436 1437 TRACE_EVENT(write_buffer_flush_slowpath, 1438 TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total), 1439 TP_ARGS(trans, slowpath, total), 1440 1441 TP_STRUCT__entry( 1442 __field(size_t, slowpath ) 1443 __field(size_t, total ) 1444 ), 1445 1446 TP_fast_assign( 1447 __entry->slowpath = slowpath; 1448 __entry->total = total; 1449 ), 1450 1451 TP_printk("%zu/%zu", __entry->slowpath, __entry->total) 1452 ); 1453 1454 TRACE_EVENT(write_buffer_maybe_flush, 1455 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *key), 1456 TP_ARGS(trans, caller_ip, key), 1457 1458 TP_STRUCT__entry( 1459 __array(char, trans_fn, 32 ) 1460 __field(unsigned long, caller_ip ) 1461 __string(key, key ) 1462 ), 1463 1464 TP_fast_assign( 1465 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1466 __assign_str(key); 1467 ), 1468 1469 TP_printk("%s %pS %s", __entry->trans_fn, (void *) __entry->caller_ip, __get_str(key)) 1470 ); 1471 1472 DEFINE_EVENT(fs_str, rebalance_extent, 1473 TP_PROTO(struct bch_fs *c, const char *str), 1474 TP_ARGS(c, str) 1475 ); 1476 1477 DEFINE_EVENT(fs_str, data_update, 1478 TP_PROTO(struct bch_fs *c, const char *str), 1479 TP_ARGS(c, str) 1480 ); 1481 1482 TRACE_EVENT(error_downcast, 1483 TP_PROTO(int bch_err, int std_err, unsigned long ip), 1484 TP_ARGS(bch_err, std_err, ip), 1485 1486 TP_STRUCT__entry( 1487 __array(char, bch_err, 32 ) 1488 __array(char, std_err, 32 ) 1489 __array(char, ip, 32 ) 1490 ), 1491 1492 TP_fast_assign( 1493 strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err)); 1494 strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err)); 1495 snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip); 1496 ), 1497 1498 TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip) 1499 ); 1500 1501 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS 1502 1503 TRACE_EVENT(update_by_path, 1504 TP_PROTO(struct btree_trans *trans, struct btree_path *path, 1505 struct btree_insert_entry *i, bool overwrite), 1506 TP_ARGS(trans, path, i, overwrite), 1507 1508 TP_STRUCT__entry( 1509 __array(char, trans_fn, 32 ) 1510 __field(btree_path_idx_t, path_idx ) 1511 __field(u8, btree_id ) 1512 TRACE_BPOS_entries(pos) 1513 __field(u8, overwrite ) 1514 __field(btree_path_idx_t, update_idx ) 1515 __field(btree_path_idx_t, nr_updates ) 1516 ), 1517 1518 TP_fast_assign( 1519 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1520 __entry->path_idx = path - trans->paths; 1521 __entry->btree_id = path->btree_id; 1522 TRACE_BPOS_assign(pos, path->pos); 1523 __entry->overwrite = overwrite; 1524 __entry->update_idx = i - trans->updates; 1525 __entry->nr_updates = trans->nr_updates; 1526 ), 1527 1528 TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u", 1529 __entry->trans_fn, 1530 __entry->path_idx, 1531 bch2_btree_id_str(__entry->btree_id), 1532 __entry->pos_inode, 1533 __entry->pos_offset, 1534 __entry->pos_snapshot, 1535 __entry->overwrite, 1536 __entry->update_idx, 1537 __entry->nr_updates) 1538 ); 1539 1540 TRACE_EVENT(btree_path_lock, 1541 TP_PROTO(struct btree_trans *trans, 1542 unsigned long caller_ip, 1543 struct btree_bkey_cached_common *b), 1544 TP_ARGS(trans, caller_ip, b), 1545 1546 TP_STRUCT__entry( 1547 __array(char, trans_fn, 32 ) 1548 __field(unsigned long, caller_ip ) 1549 __field(u8, btree_id ) 1550 __field(u8, level ) 1551 __array(char, node, 24 ) 1552 __field(u32, lock_seq ) 1553 ), 1554 1555 TP_fast_assign( 1556 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1557 __entry->caller_ip = caller_ip; 1558 __entry->btree_id = b->btree_id; 1559 __entry->level = b->level; 1560 1561 scnprintf(__entry->node, sizeof(__entry->node), "%px", b); 1562 __entry->lock_seq = six_lock_seq(&b->lock); 1563 ), 1564 1565 TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u", 1566 __entry->trans_fn, 1567 (void *) __entry->caller_ip, 1568 bch2_btree_id_str(__entry->btree_id), 1569 __entry->level, 1570 __entry->node, 1571 __entry->lock_seq) 1572 ); 1573 1574 DECLARE_EVENT_CLASS(btree_path_ev, 1575 TP_PROTO(struct btree_trans *trans, struct btree_path *path), 1576 TP_ARGS(trans, path), 1577 1578 TP_STRUCT__entry( 1579 __field(u16, idx ) 1580 __field(u8, ref ) 1581 __field(u8, btree_id ) 1582 TRACE_BPOS_entries(pos) 1583 ), 1584 1585 TP_fast_assign( 1586 __entry->idx = path - trans->paths; 1587 __entry->ref = path->ref; 1588 __entry->btree_id = path->btree_id; 1589 TRACE_BPOS_assign(pos, path->pos); 1590 ), 1591 1592 TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u", 1593 __entry->idx, __entry->ref, 1594 bch2_btree_id_str(__entry->btree_id), 1595 __entry->pos_inode, 1596 __entry->pos_offset, 1597 __entry->pos_snapshot) 1598 ); 1599 1600 DEFINE_EVENT(btree_path_ev, btree_path_get_ll, 1601 TP_PROTO(struct btree_trans *trans, struct btree_path *path), 1602 TP_ARGS(trans, path) 1603 ); 1604 1605 DEFINE_EVENT(btree_path_ev, btree_path_put_ll, 1606 TP_PROTO(struct btree_trans *trans, struct btree_path *path), 1607 TP_ARGS(trans, path) 1608 ); 1609 1610 DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked, 1611 TP_PROTO(struct btree_trans *trans, struct btree_path *path), 1612 TP_ARGS(trans, path) 1613 ); 1614 1615 TRACE_EVENT(btree_path_alloc, 1616 TP_PROTO(struct btree_trans *trans, struct btree_path *path), 1617 TP_ARGS(trans, path), 1618 1619 TP_STRUCT__entry( 1620 __field(btree_path_idx_t, idx ) 1621 __field(u8, locks_want ) 1622 __field(u8, btree_id ) 1623 TRACE_BPOS_entries(pos) 1624 ), 1625 1626 TP_fast_assign( 1627 __entry->idx = path - trans->paths; 1628 __entry->locks_want = path->locks_want; 1629 __entry->btree_id = path->btree_id; 1630 TRACE_BPOS_assign(pos, path->pos); 1631 ), 1632 1633 TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u", 1634 __entry->idx, 1635 bch2_btree_id_str(__entry->btree_id), 1636 __entry->locks_want, 1637 __entry->pos_inode, 1638 __entry->pos_offset, 1639 __entry->pos_snapshot) 1640 ); 1641 1642 TRACE_EVENT(btree_path_get, 1643 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos), 1644 TP_ARGS(trans, path, new_pos), 1645 1646 TP_STRUCT__entry( 1647 __field(btree_path_idx_t, idx ) 1648 __field(u8, ref ) 1649 __field(u8, preserve ) 1650 __field(u8, locks_want ) 1651 __field(u8, btree_id ) 1652 TRACE_BPOS_entries(old_pos) 1653 TRACE_BPOS_entries(new_pos) 1654 ), 1655 1656 TP_fast_assign( 1657 __entry->idx = path - trans->paths; 1658 __entry->ref = path->ref; 1659 __entry->preserve = path->preserve; 1660 __entry->locks_want = path->locks_want; 1661 __entry->btree_id = path->btree_id; 1662 TRACE_BPOS_assign(old_pos, path->pos); 1663 TRACE_BPOS_assign(new_pos, *new_pos); 1664 ), 1665 1666 TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u", 1667 __entry->idx, 1668 __entry->ref, 1669 __entry->preserve, 1670 bch2_btree_id_str(__entry->btree_id), 1671 __entry->locks_want, 1672 __entry->old_pos_inode, 1673 __entry->old_pos_offset, 1674 __entry->old_pos_snapshot, 1675 __entry->new_pos_inode, 1676 __entry->new_pos_offset, 1677 __entry->new_pos_snapshot) 1678 ); 1679 1680 DECLARE_EVENT_CLASS(btree_path_clone, 1681 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new), 1682 TP_ARGS(trans, path, new), 1683 1684 TP_STRUCT__entry( 1685 __field(btree_path_idx_t, idx ) 1686 __field(u8, new_idx ) 1687 __field(u8, btree_id ) 1688 __field(u8, ref ) 1689 __field(u8, preserve ) 1690 TRACE_BPOS_entries(pos) 1691 ), 1692 1693 TP_fast_assign( 1694 __entry->idx = path - trans->paths; 1695 __entry->new_idx = new - trans->paths; 1696 __entry->btree_id = path->btree_id; 1697 __entry->ref = path->ref; 1698 __entry->preserve = path->preserve; 1699 TRACE_BPOS_assign(pos, path->pos); 1700 ), 1701 1702 TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u", 1703 __entry->idx, 1704 __entry->ref, 1705 __entry->preserve, 1706 bch2_btree_id_str(__entry->btree_id), 1707 __entry->pos_inode, 1708 __entry->pos_offset, 1709 __entry->pos_snapshot, 1710 __entry->new_idx) 1711 ); 1712 1713 DEFINE_EVENT(btree_path_clone, btree_path_clone, 1714 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new), 1715 TP_ARGS(trans, path, new) 1716 ); 1717 1718 DEFINE_EVENT(btree_path_clone, btree_path_save_pos, 1719 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new), 1720 TP_ARGS(trans, path, new) 1721 ); 1722 1723 DECLARE_EVENT_CLASS(btree_path_traverse, 1724 TP_PROTO(struct btree_trans *trans, 1725 struct btree_path *path), 1726 TP_ARGS(trans, path), 1727 1728 TP_STRUCT__entry( 1729 __array(char, trans_fn, 32 ) 1730 __field(btree_path_idx_t, idx ) 1731 __field(u8, ref ) 1732 __field(u8, preserve ) 1733 __field(u8, should_be_locked ) 1734 __field(u8, btree_id ) 1735 __field(u8, level ) 1736 TRACE_BPOS_entries(pos) 1737 __field(u8, locks_want ) 1738 __field(u8, nodes_locked ) 1739 __array(char, node0, 24 ) 1740 __array(char, node1, 24 ) 1741 __array(char, node2, 24 ) 1742 __array(char, node3, 24 ) 1743 ), 1744 1745 TP_fast_assign( 1746 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1747 1748 __entry->idx = path - trans->paths; 1749 __entry->ref = path->ref; 1750 __entry->preserve = path->preserve; 1751 __entry->btree_id = path->btree_id; 1752 __entry->level = path->level; 1753 TRACE_BPOS_assign(pos, path->pos); 1754 1755 __entry->locks_want = path->locks_want; 1756 __entry->nodes_locked = path->nodes_locked; 1757 struct btree *b = path->l[0].b; 1758 if (IS_ERR(b)) 1759 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1760 else 1761 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c); 1762 b = path->l[1].b; 1763 if (IS_ERR(b)) 1764 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1765 else 1766 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c); 1767 b = path->l[2].b; 1768 if (IS_ERR(b)) 1769 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1770 else 1771 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c); 1772 b = path->l[3].b; 1773 if (IS_ERR(b)) 1774 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1775 else 1776 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c); 1777 ), 1778 1779 TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n" 1780 "locks %u %u %u %u node %s %s %s %s", 1781 __entry->trans_fn, 1782 __entry->idx, 1783 __entry->ref, 1784 __entry->preserve, 1785 bch2_btree_id_str(__entry->btree_id), 1786 __entry->pos_inode, 1787 __entry->pos_offset, 1788 __entry->pos_snapshot, 1789 __entry->level, 1790 __entry->locks_want, 1791 (__entry->nodes_locked >> 6) & 3, 1792 (__entry->nodes_locked >> 4) & 3, 1793 (__entry->nodes_locked >> 2) & 3, 1794 (__entry->nodes_locked >> 0) & 3, 1795 __entry->node3, 1796 __entry->node2, 1797 __entry->node1, 1798 __entry->node0) 1799 ); 1800 1801 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start, 1802 TP_PROTO(struct btree_trans *trans, 1803 struct btree_path *path), 1804 TP_ARGS(trans, path) 1805 ); 1806 1807 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end, 1808 TP_PROTO(struct btree_trans *trans, struct btree_path *path), 1809 TP_ARGS(trans, path) 1810 ); 1811 1812 TRACE_EVENT(btree_path_set_pos, 1813 TP_PROTO(struct btree_trans *trans, 1814 struct btree_path *path, 1815 struct bpos *new_pos), 1816 TP_ARGS(trans, path, new_pos), 1817 1818 TP_STRUCT__entry( 1819 __field(btree_path_idx_t, idx ) 1820 __field(u8, ref ) 1821 __field(u8, preserve ) 1822 __field(u8, btree_id ) 1823 TRACE_BPOS_entries(old_pos) 1824 TRACE_BPOS_entries(new_pos) 1825 __field(u8, locks_want ) 1826 __field(u8, nodes_locked ) 1827 __array(char, node0, 24 ) 1828 __array(char, node1, 24 ) 1829 __array(char, node2, 24 ) 1830 __array(char, node3, 24 ) 1831 ), 1832 1833 TP_fast_assign( 1834 __entry->idx = path - trans->paths; 1835 __entry->ref = path->ref; 1836 __entry->preserve = path->preserve; 1837 __entry->btree_id = path->btree_id; 1838 TRACE_BPOS_assign(old_pos, path->pos); 1839 TRACE_BPOS_assign(new_pos, *new_pos); 1840 1841 __entry->nodes_locked = path->nodes_locked; 1842 struct btree *b = path->l[0].b; 1843 if (IS_ERR(b)) 1844 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1845 else 1846 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c); 1847 b = path->l[1].b; 1848 if (IS_ERR(b)) 1849 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1850 else 1851 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c); 1852 b = path->l[2].b; 1853 if (IS_ERR(b)) 1854 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1855 else 1856 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c); 1857 b = path->l[3].b; 1858 if (IS_ERR(b)) 1859 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0)); 1860 else 1861 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c); 1862 ), 1863 1864 TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n" 1865 "locks %u %u %u %u node %s %s %s %s", 1866 __entry->idx, 1867 __entry->ref, 1868 __entry->preserve, 1869 bch2_btree_id_str(__entry->btree_id), 1870 __entry->old_pos_inode, 1871 __entry->old_pos_offset, 1872 __entry->old_pos_snapshot, 1873 __entry->new_pos_inode, 1874 __entry->new_pos_offset, 1875 __entry->new_pos_snapshot, 1876 (__entry->nodes_locked >> 6) & 3, 1877 (__entry->nodes_locked >> 4) & 3, 1878 (__entry->nodes_locked >> 2) & 3, 1879 (__entry->nodes_locked >> 0) & 3, 1880 __entry->node3, 1881 __entry->node2, 1882 __entry->node1, 1883 __entry->node0) 1884 ); 1885 1886 TRACE_EVENT(btree_path_free, 1887 TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup), 1888 TP_ARGS(trans, path, dup), 1889 1890 TP_STRUCT__entry( 1891 __field(btree_path_idx_t, idx ) 1892 __field(u8, preserve ) 1893 __field(u8, should_be_locked) 1894 __field(s8, dup ) 1895 __field(u8, dup_locked ) 1896 ), 1897 1898 TP_fast_assign( 1899 __entry->idx = path; 1900 __entry->preserve = trans->paths[path].preserve; 1901 __entry->should_be_locked = trans->paths[path].should_be_locked; 1902 __entry->dup = dup ? dup - trans->paths : -1; 1903 __entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0; 1904 ), 1905 1906 TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx, 1907 __entry->preserve ? 'P' : ' ', 1908 __entry->should_be_locked ? 'S' : ' ', 1909 __entry->dup, 1910 __entry->dup_locked) 1911 ); 1912 1913 TRACE_EVENT(btree_path_free_trans_begin, 1914 TP_PROTO(btree_path_idx_t path), 1915 TP_ARGS(path), 1916 1917 TP_STRUCT__entry( 1918 __field(btree_path_idx_t, idx ) 1919 ), 1920 1921 TP_fast_assign( 1922 __entry->idx = path; 1923 ), 1924 1925 TP_printk(" path %3u", __entry->idx) 1926 ); 1927 1928 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */ 1929 #ifndef _TRACE_BCACHEFS_H 1930 1931 static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path, 1932 struct btree_insert_entry *i, bool overwrite) {} 1933 static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {} 1934 static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {} 1935 static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {} 1936 static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {} 1937 static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {} 1938 static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {} 1939 static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {} 1940 static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {} 1941 static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {} 1942 static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {} 1943 static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {} 1944 static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {} 1945 static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {} 1946 1947 #endif 1948 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */ 1949 1950 #define _TRACE_BCACHEFS_H 1951 #endif /* _TRACE_BCACHEFS_H */ 1952 1953 /* This part must be outside protection */ 1954 #undef TRACE_INCLUDE_PATH 1955 #define TRACE_INCLUDE_PATH ../../fs/bcachefs 1956 1957 #undef TRACE_INCLUDE_FILE 1958 #define TRACE_INCLUDE_FILE trace 1959 1960 #include <trace/define_trace.h> 1961