1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM bcachefs 4 5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_BCACHEFS_H 7 8 #include <linux/tracepoint.h> 9 10 #define TRACE_BPOS_entries(name) \ 11 __field(u64, name##_inode ) \ 12 __field(u64, name##_offset ) \ 13 __field(u32, name##_snapshot ) 14 15 #define TRACE_BPOS_assign(dst, src) \ 16 __entry->dst##_inode = (src).inode; \ 17 __entry->dst##_offset = (src).offset; \ 18 __entry->dst##_snapshot = (src).snapshot 19 20 DECLARE_EVENT_CLASS(bpos, 21 TP_PROTO(const struct bpos *p), 22 TP_ARGS(p), 23 24 TP_STRUCT__entry( 25 TRACE_BPOS_entries(p) 26 ), 27 28 TP_fast_assign( 29 TRACE_BPOS_assign(p, *p); 30 ), 31 32 TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot) 33 ); 34 35 DECLARE_EVENT_CLASS(bkey, 36 TP_PROTO(struct bch_fs *c, const char *k), 37 TP_ARGS(c, k), 38 39 TP_STRUCT__entry( 40 __string(k, k ) 41 ), 42 43 TP_fast_assign( 44 __assign_str(k, k); 45 ), 46 47 TP_printk("%s", __get_str(k)) 48 ); 49 50 DECLARE_EVENT_CLASS(btree_node, 51 TP_PROTO(struct bch_fs *c, struct btree *b), 52 TP_ARGS(c, b), 53 54 TP_STRUCT__entry( 55 __field(dev_t, dev ) 56 __field(u8, level ) 57 __field(u8, btree_id ) 58 TRACE_BPOS_entries(pos) 59 ), 60 61 TP_fast_assign( 62 __entry->dev = c->dev; 63 __entry->level = b->c.level; 64 __entry->btree_id = b->c.btree_id; 65 TRACE_BPOS_assign(pos, b->key.k.p); 66 ), 67 68 TP_printk("%d,%d %u %s %llu:%llu:%u", 69 MAJOR(__entry->dev), MINOR(__entry->dev), 70 __entry->level, 71 bch2_btree_ids[__entry->btree_id], 72 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) 73 ); 74 75 DECLARE_EVENT_CLASS(bch_fs, 76 TP_PROTO(struct bch_fs *c), 77 TP_ARGS(c), 78 79 TP_STRUCT__entry( 80 __field(dev_t, dev ) 81 ), 82 83 TP_fast_assign( 84 __entry->dev = c->dev; 85 ), 86 87 TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev)) 88 ); 89 90 DECLARE_EVENT_CLASS(bio, 91 TP_PROTO(struct bio *bio), 92 TP_ARGS(bio), 93 94 TP_STRUCT__entry( 95 __field(dev_t, dev ) 96 __field(sector_t, sector ) 97 __field(unsigned int, nr_sector ) 98 __array(char, rwbs, 6 ) 99 ), 100 101 TP_fast_assign( 102 __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0; 103 __entry->sector = bio->bi_iter.bi_sector; 104 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 106 ), 107 108 TP_printk("%d,%d %s %llu + %u", 109 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 110 (unsigned long long)__entry->sector, __entry->nr_sector) 111 ); 112 113 /* super-io.c: */ 114 TRACE_EVENT(write_super, 115 TP_PROTO(struct bch_fs *c, unsigned long ip), 116 TP_ARGS(c, ip), 117 118 TP_STRUCT__entry( 119 __field(dev_t, dev ) 120 __field(unsigned long, ip ) 121 ), 122 123 TP_fast_assign( 124 __entry->dev = c->dev; 125 __entry->ip = ip; 126 ), 127 128 TP_printk("%d,%d for %pS", 129 MAJOR(__entry->dev), MINOR(__entry->dev), 130 (void *) __entry->ip) 131 ); 132 133 /* io.c: */ 134 135 DEFINE_EVENT(bio, read_promote, 136 TP_PROTO(struct bio *bio), 137 TP_ARGS(bio) 138 ); 139 140 TRACE_EVENT(read_nopromote, 141 TP_PROTO(struct bch_fs *c, int ret), 142 TP_ARGS(c, ret), 143 144 TP_STRUCT__entry( 145 __field(dev_t, dev ) 146 __array(char, ret, 32 ) 147 ), 148 149 TP_fast_assign( 150 __entry->dev = c->dev; 151 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret)); 152 ), 153 154 TP_printk("%d,%d ret %s", 155 MAJOR(__entry->dev), MINOR(__entry->dev), 156 __entry->ret) 157 ); 158 159 DEFINE_EVENT(bio, read_bounce, 160 TP_PROTO(struct bio *bio), 161 TP_ARGS(bio) 162 ); 163 164 DEFINE_EVENT(bio, read_split, 165 TP_PROTO(struct bio *bio), 166 TP_ARGS(bio) 167 ); 168 169 DEFINE_EVENT(bio, read_retry, 170 TP_PROTO(struct bio *bio), 171 TP_ARGS(bio) 172 ); 173 174 DEFINE_EVENT(bio, read_reuse_race, 175 TP_PROTO(struct bio *bio), 176 TP_ARGS(bio) 177 ); 178 179 /* Journal */ 180 181 DEFINE_EVENT(bch_fs, journal_full, 182 TP_PROTO(struct bch_fs *c), 183 TP_ARGS(c) 184 ); 185 186 DEFINE_EVENT(bch_fs, journal_entry_full, 187 TP_PROTO(struct bch_fs *c), 188 TP_ARGS(c) 189 ); 190 191 DEFINE_EVENT(bio, journal_write, 192 TP_PROTO(struct bio *bio), 193 TP_ARGS(bio) 194 ); 195 196 TRACE_EVENT(journal_reclaim_start, 197 TP_PROTO(struct bch_fs *c, bool direct, bool kicked, 198 u64 min_nr, u64 min_key_cache, 199 u64 prereserved, u64 prereserved_total, 200 u64 btree_cache_dirty, u64 btree_cache_total, 201 u64 btree_key_cache_dirty, u64 btree_key_cache_total), 202 TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total, 203 btree_cache_dirty, btree_cache_total, 204 btree_key_cache_dirty, btree_key_cache_total), 205 206 TP_STRUCT__entry( 207 __field(dev_t, dev ) 208 __field(bool, direct ) 209 __field(bool, kicked ) 210 __field(u64, min_nr ) 211 __field(u64, min_key_cache ) 212 __field(u64, prereserved ) 213 __field(u64, prereserved_total ) 214 __field(u64, btree_cache_dirty ) 215 __field(u64, btree_cache_total ) 216 __field(u64, btree_key_cache_dirty ) 217 __field(u64, btree_key_cache_total ) 218 ), 219 220 TP_fast_assign( 221 __entry->dev = c->dev; 222 __entry->direct = direct; 223 __entry->kicked = kicked; 224 __entry->min_nr = min_nr; 225 __entry->min_key_cache = min_key_cache; 226 __entry->prereserved = prereserved; 227 __entry->prereserved_total = prereserved_total; 228 __entry->btree_cache_dirty = btree_cache_dirty; 229 __entry->btree_cache_total = btree_cache_total; 230 __entry->btree_key_cache_dirty = btree_key_cache_dirty; 231 __entry->btree_key_cache_total = btree_key_cache_total; 232 ), 233 234 TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu", 235 MAJOR(__entry->dev), MINOR(__entry->dev), 236 __entry->direct, 237 __entry->kicked, 238 __entry->min_nr, 239 __entry->min_key_cache, 240 __entry->prereserved, 241 __entry->prereserved_total, 242 __entry->btree_cache_dirty, 243 __entry->btree_cache_total, 244 __entry->btree_key_cache_dirty, 245 __entry->btree_key_cache_total) 246 ); 247 248 TRACE_EVENT(journal_reclaim_finish, 249 TP_PROTO(struct bch_fs *c, u64 nr_flushed), 250 TP_ARGS(c, nr_flushed), 251 252 TP_STRUCT__entry( 253 __field(dev_t, dev ) 254 __field(u64, nr_flushed ) 255 ), 256 257 TP_fast_assign( 258 __entry->dev = c->dev; 259 __entry->nr_flushed = nr_flushed; 260 ), 261 262 TP_printk("%d,%d flushed %llu", 263 MAJOR(__entry->dev), MINOR(__entry->dev), 264 __entry->nr_flushed) 265 ); 266 267 /* bset.c: */ 268 269 DEFINE_EVENT(bpos, bkey_pack_pos_fail, 270 TP_PROTO(const struct bpos *p), 271 TP_ARGS(p) 272 ); 273 274 /* Btree cache: */ 275 276 TRACE_EVENT(btree_cache_scan, 277 TP_PROTO(long nr_to_scan, long can_free, long ret), 278 TP_ARGS(nr_to_scan, can_free, ret), 279 280 TP_STRUCT__entry( 281 __field(long, nr_to_scan ) 282 __field(long, can_free ) 283 __field(long, ret ) 284 ), 285 286 TP_fast_assign( 287 __entry->nr_to_scan = nr_to_scan; 288 __entry->can_free = can_free; 289 __entry->ret = ret; 290 ), 291 292 TP_printk("scanned for %li nodes, can free %li, ret %li", 293 __entry->nr_to_scan, __entry->can_free, __entry->ret) 294 ); 295 296 DEFINE_EVENT(btree_node, btree_cache_reap, 297 TP_PROTO(struct bch_fs *c, struct btree *b), 298 TP_ARGS(c, b) 299 ); 300 301 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail, 302 TP_PROTO(struct bch_fs *c), 303 TP_ARGS(c) 304 ); 305 306 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock, 307 TP_PROTO(struct bch_fs *c), 308 TP_ARGS(c) 309 ); 310 311 DEFINE_EVENT(bch_fs, btree_cache_cannibalize, 312 TP_PROTO(struct bch_fs *c), 313 TP_ARGS(c) 314 ); 315 316 DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock, 317 TP_PROTO(struct bch_fs *c), 318 TP_ARGS(c) 319 ); 320 321 /* Btree */ 322 323 DEFINE_EVENT(btree_node, btree_node_read, 324 TP_PROTO(struct bch_fs *c, struct btree *b), 325 TP_ARGS(c, b) 326 ); 327 328 TRACE_EVENT(btree_node_write, 329 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors), 330 TP_ARGS(b, bytes, sectors), 331 332 TP_STRUCT__entry( 333 __field(enum btree_node_type, type) 334 __field(unsigned, bytes ) 335 __field(unsigned, sectors ) 336 ), 337 338 TP_fast_assign( 339 __entry->type = btree_node_type(b); 340 __entry->bytes = bytes; 341 __entry->sectors = sectors; 342 ), 343 344 TP_printk("bkey type %u bytes %u sectors %u", 345 __entry->type , __entry->bytes, __entry->sectors) 346 ); 347 348 DEFINE_EVENT(btree_node, btree_node_alloc, 349 TP_PROTO(struct bch_fs *c, struct btree *b), 350 TP_ARGS(c, b) 351 ); 352 353 DEFINE_EVENT(btree_node, btree_node_free, 354 TP_PROTO(struct bch_fs *c, struct btree *b), 355 TP_ARGS(c, b) 356 ); 357 358 TRACE_EVENT(btree_reserve_get_fail, 359 TP_PROTO(const char *trans_fn, 360 unsigned long caller_ip, 361 size_t required, 362 int ret), 363 TP_ARGS(trans_fn, caller_ip, required, ret), 364 365 TP_STRUCT__entry( 366 __array(char, trans_fn, 32 ) 367 __field(unsigned long, caller_ip ) 368 __field(size_t, required ) 369 __array(char, ret, 32 ) 370 ), 371 372 TP_fast_assign( 373 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn)); 374 __entry->caller_ip = caller_ip; 375 __entry->required = required; 376 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret)); 377 ), 378 379 TP_printk("%s %pS required %zu ret %s", 380 __entry->trans_fn, 381 (void *) __entry->caller_ip, 382 __entry->required, 383 __entry->ret) 384 ); 385 386 DEFINE_EVENT(btree_node, btree_node_compact, 387 TP_PROTO(struct bch_fs *c, struct btree *b), 388 TP_ARGS(c, b) 389 ); 390 391 DEFINE_EVENT(btree_node, btree_node_merge, 392 TP_PROTO(struct bch_fs *c, struct btree *b), 393 TP_ARGS(c, b) 394 ); 395 396 DEFINE_EVENT(btree_node, btree_node_split, 397 TP_PROTO(struct bch_fs *c, struct btree *b), 398 TP_ARGS(c, b) 399 ); 400 401 DEFINE_EVENT(btree_node, btree_node_rewrite, 402 TP_PROTO(struct bch_fs *c, struct btree *b), 403 TP_ARGS(c, b) 404 ); 405 406 DEFINE_EVENT(btree_node, btree_node_set_root, 407 TP_PROTO(struct bch_fs *c, struct btree *b), 408 TP_ARGS(c, b) 409 ); 410 411 TRACE_EVENT(btree_path_relock_fail, 412 TP_PROTO(struct btree_trans *trans, 413 unsigned long caller_ip, 414 struct btree_path *path, 415 unsigned level), 416 TP_ARGS(trans, caller_ip, path, level), 417 418 TP_STRUCT__entry( 419 __array(char, trans_fn, 32 ) 420 __field(unsigned long, caller_ip ) 421 __field(u8, btree_id ) 422 __field(u8, level ) 423 TRACE_BPOS_entries(pos) 424 __array(char, node, 24 ) 425 __field(u8, self_read_count ) 426 __field(u8, self_intent_count) 427 __field(u8, read_count ) 428 __field(u8, intent_count ) 429 __field(u32, iter_lock_seq ) 430 __field(u32, node_lock_seq ) 431 ), 432 433 TP_fast_assign( 434 struct btree *b = btree_path_node(path, level); 435 struct six_lock_count c; 436 437 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 438 __entry->caller_ip = caller_ip; 439 __entry->btree_id = path->btree_id; 440 __entry->level = path->level; 441 TRACE_BPOS_assign(pos, path->pos); 442 443 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level), 444 __entry->self_read_count = c.n[SIX_LOCK_read]; 445 __entry->self_intent_count = c.n[SIX_LOCK_intent]; 446 447 if (IS_ERR(b)) { 448 strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node)); 449 } else { 450 c = six_lock_counts(&path->l[level].b->c.lock); 451 __entry->read_count = c.n[SIX_LOCK_read]; 452 __entry->intent_count = c.n[SIX_LOCK_intent]; 453 scnprintf(__entry->node, sizeof(__entry->node), "%px", b); 454 } 455 __entry->iter_lock_seq = path->l[level].lock_seq; 456 __entry->node_lock_seq = is_btree_node(path, level) 457 ? six_lock_seq(&path->l[level].b->c.lock) 458 : 0; 459 ), 460 461 TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u", 462 __entry->trans_fn, 463 (void *) __entry->caller_ip, 464 bch2_btree_ids[__entry->btree_id], 465 __entry->pos_inode, 466 __entry->pos_offset, 467 __entry->pos_snapshot, 468 __entry->level, 469 __entry->node, 470 __entry->self_read_count, 471 __entry->self_intent_count, 472 __entry->read_count, 473 __entry->intent_count, 474 __entry->iter_lock_seq, 475 __entry->node_lock_seq) 476 ); 477 478 TRACE_EVENT(btree_path_upgrade_fail, 479 TP_PROTO(struct btree_trans *trans, 480 unsigned long caller_ip, 481 struct btree_path *path, 482 unsigned level), 483 TP_ARGS(trans, caller_ip, path, level), 484 485 TP_STRUCT__entry( 486 __array(char, trans_fn, 32 ) 487 __field(unsigned long, caller_ip ) 488 __field(u8, btree_id ) 489 __field(u8, level ) 490 TRACE_BPOS_entries(pos) 491 __field(u8, locked ) 492 __field(u8, self_read_count ) 493 __field(u8, self_intent_count) 494 __field(u8, read_count ) 495 __field(u8, intent_count ) 496 __field(u32, iter_lock_seq ) 497 __field(u32, node_lock_seq ) 498 ), 499 500 TP_fast_assign( 501 struct six_lock_count c; 502 503 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 504 __entry->caller_ip = caller_ip; 505 __entry->btree_id = path->btree_id; 506 __entry->level = level; 507 TRACE_BPOS_assign(pos, path->pos); 508 __entry->locked = btree_node_locked(path, level); 509 510 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level), 511 __entry->self_read_count = c.n[SIX_LOCK_read]; 512 __entry->self_intent_count = c.n[SIX_LOCK_intent]; 513 c = six_lock_counts(&path->l[level].b->c.lock); 514 __entry->read_count = c.n[SIX_LOCK_read]; 515 __entry->intent_count = c.n[SIX_LOCK_intent]; 516 __entry->iter_lock_seq = path->l[level].lock_seq; 517 __entry->node_lock_seq = is_btree_node(path, level) 518 ? six_lock_seq(&path->l[level].b->c.lock) 519 : 0; 520 ), 521 522 TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u", 523 __entry->trans_fn, 524 (void *) __entry->caller_ip, 525 bch2_btree_ids[__entry->btree_id], 526 __entry->pos_inode, 527 __entry->pos_offset, 528 __entry->pos_snapshot, 529 __entry->level, 530 __entry->locked, 531 __entry->self_read_count, 532 __entry->self_intent_count, 533 __entry->read_count, 534 __entry->intent_count, 535 __entry->iter_lock_seq, 536 __entry->node_lock_seq) 537 ); 538 539 /* Garbage collection */ 540 541 DEFINE_EVENT(bch_fs, gc_gens_start, 542 TP_PROTO(struct bch_fs *c), 543 TP_ARGS(c) 544 ); 545 546 DEFINE_EVENT(bch_fs, gc_gens_end, 547 TP_PROTO(struct bch_fs *c), 548 TP_ARGS(c) 549 ); 550 551 /* Allocator */ 552 553 DECLARE_EVENT_CLASS(bucket_alloc, 554 TP_PROTO(struct bch_dev *ca, const char *alloc_reserve, 555 u64 bucket, 556 u64 free, 557 u64 avail, 558 u64 copygc_wait_amount, 559 s64 copygc_waiting_for, 560 struct bucket_alloc_state *s, 561 bool nonblocking, 562 const char *err), 563 TP_ARGS(ca, alloc_reserve, bucket, free, avail, 564 copygc_wait_amount, copygc_waiting_for, 565 s, nonblocking, err), 566 567 TP_STRUCT__entry( 568 __field(u8, dev ) 569 __array(char, reserve, 16 ) 570 __field(u64, bucket ) 571 __field(u64, free ) 572 __field(u64, avail ) 573 __field(u64, copygc_wait_amount ) 574 __field(s64, copygc_waiting_for ) 575 __field(u64, seen ) 576 __field(u64, open ) 577 __field(u64, need_journal_commit ) 578 __field(u64, nouse ) 579 __field(bool, nonblocking ) 580 __field(u64, nocow ) 581 __array(char, err, 32 ) 582 ), 583 584 TP_fast_assign( 585 __entry->dev = ca->dev_idx; 586 strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve)); 587 __entry->bucket = bucket; 588 __entry->free = free; 589 __entry->avail = avail; 590 __entry->copygc_wait_amount = copygc_wait_amount; 591 __entry->copygc_waiting_for = copygc_waiting_for; 592 __entry->seen = s->buckets_seen; 593 __entry->open = s->skipped_open; 594 __entry->need_journal_commit = s->skipped_need_journal_commit; 595 __entry->nouse = s->skipped_nouse; 596 __entry->nonblocking = nonblocking; 597 __entry->nocow = s->skipped_nocow; 598 strscpy(__entry->err, err, sizeof(__entry->err)); 599 ), 600 601 TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s", 602 __entry->reserve, 603 __entry->dev, 604 __entry->bucket, 605 __entry->free, 606 __entry->avail, 607 __entry->copygc_wait_amount, 608 __entry->copygc_waiting_for, 609 __entry->seen, 610 __entry->open, 611 __entry->need_journal_commit, 612 __entry->nouse, 613 __entry->nocow, 614 __entry->nonblocking, 615 __entry->err) 616 ); 617 618 DEFINE_EVENT(bucket_alloc, bucket_alloc, 619 TP_PROTO(struct bch_dev *ca, const char *alloc_reserve, 620 u64 bucket, 621 u64 free, 622 u64 avail, 623 u64 copygc_wait_amount, 624 s64 copygc_waiting_for, 625 struct bucket_alloc_state *s, 626 bool nonblocking, 627 const char *err), 628 TP_ARGS(ca, alloc_reserve, bucket, free, avail, 629 copygc_wait_amount, copygc_waiting_for, 630 s, nonblocking, err) 631 ); 632 633 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail, 634 TP_PROTO(struct bch_dev *ca, const char *alloc_reserve, 635 u64 bucket, 636 u64 free, 637 u64 avail, 638 u64 copygc_wait_amount, 639 s64 copygc_waiting_for, 640 struct bucket_alloc_state *s, 641 bool nonblocking, 642 const char *err), 643 TP_ARGS(ca, alloc_reserve, bucket, free, avail, 644 copygc_wait_amount, copygc_waiting_for, 645 s, nonblocking, err) 646 ); 647 648 TRACE_EVENT(discard_buckets, 649 TP_PROTO(struct bch_fs *c, u64 seen, u64 open, 650 u64 need_journal_commit, u64 discarded, const char *err), 651 TP_ARGS(c, seen, open, need_journal_commit, discarded, err), 652 653 TP_STRUCT__entry( 654 __field(dev_t, dev ) 655 __field(u64, seen ) 656 __field(u64, open ) 657 __field(u64, need_journal_commit ) 658 __field(u64, discarded ) 659 __array(char, err, 16 ) 660 ), 661 662 TP_fast_assign( 663 __entry->dev = c->dev; 664 __entry->seen = seen; 665 __entry->open = open; 666 __entry->need_journal_commit = need_journal_commit; 667 __entry->discarded = discarded; 668 strscpy(__entry->err, err, sizeof(__entry->err)); 669 ), 670 671 TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s", 672 MAJOR(__entry->dev), MINOR(__entry->dev), 673 __entry->seen, 674 __entry->open, 675 __entry->need_journal_commit, 676 __entry->discarded, 677 __entry->err) 678 ); 679 680 TRACE_EVENT(bucket_invalidate, 681 TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors), 682 TP_ARGS(c, dev, bucket, sectors), 683 684 TP_STRUCT__entry( 685 __field(dev_t, dev ) 686 __field(u32, dev_idx ) 687 __field(u32, sectors ) 688 __field(u64, bucket ) 689 ), 690 691 TP_fast_assign( 692 __entry->dev = c->dev; 693 __entry->dev_idx = dev; 694 __entry->sectors = sectors; 695 __entry->bucket = bucket; 696 ), 697 698 TP_printk("%d:%d invalidated %u:%llu cached sectors %u", 699 MAJOR(__entry->dev), MINOR(__entry->dev), 700 __entry->dev_idx, __entry->bucket, 701 __entry->sectors) 702 ); 703 704 /* Moving IO */ 705 706 TRACE_EVENT(bucket_evacuate, 707 TP_PROTO(struct bch_fs *c, struct bpos *bucket), 708 TP_ARGS(c, bucket), 709 710 TP_STRUCT__entry( 711 __field(dev_t, dev ) 712 __field(u32, dev_idx ) 713 __field(u64, bucket ) 714 ), 715 716 TP_fast_assign( 717 __entry->dev = c->dev; 718 __entry->dev_idx = bucket->inode; 719 __entry->bucket = bucket->offset; 720 ), 721 722 TP_printk("%d:%d %u:%llu", 723 MAJOR(__entry->dev), MINOR(__entry->dev), 724 __entry->dev_idx, __entry->bucket) 725 ); 726 727 DEFINE_EVENT(bkey, move_extent, 728 TP_PROTO(struct bch_fs *c, const char *k), 729 TP_ARGS(c, k) 730 ); 731 732 DEFINE_EVENT(bkey, move_extent_read, 733 TP_PROTO(struct bch_fs *c, const char *k), 734 TP_ARGS(c, k) 735 ); 736 737 DEFINE_EVENT(bkey, move_extent_write, 738 TP_PROTO(struct bch_fs *c, const char *k), 739 TP_ARGS(c, k) 740 ); 741 742 DEFINE_EVENT(bkey, move_extent_finish, 743 TP_PROTO(struct bch_fs *c, const char *k), 744 TP_ARGS(c, k) 745 ); 746 747 TRACE_EVENT(move_extent_fail, 748 TP_PROTO(struct bch_fs *c, const char *msg), 749 TP_ARGS(c, msg), 750 751 TP_STRUCT__entry( 752 __field(dev_t, dev ) 753 __string(msg, msg ) 754 ), 755 756 TP_fast_assign( 757 __entry->dev = c->dev; 758 __assign_str(msg, msg); 759 ), 760 761 TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg)) 762 ); 763 764 DEFINE_EVENT(bkey, move_extent_alloc_mem_fail, 765 TP_PROTO(struct bch_fs *c, const char *k), 766 TP_ARGS(c, k) 767 ); 768 769 TRACE_EVENT(move_data, 770 TP_PROTO(struct bch_fs *c, u64 sectors_moved, 771 u64 keys_moved), 772 TP_ARGS(c, sectors_moved, keys_moved), 773 774 TP_STRUCT__entry( 775 __field(dev_t, dev ) 776 __field(u64, sectors_moved ) 777 __field(u64, keys_moved ) 778 ), 779 780 TP_fast_assign( 781 __entry->dev = c->dev; 782 __entry->sectors_moved = sectors_moved; 783 __entry->keys_moved = keys_moved; 784 ), 785 786 TP_printk("%d,%d sectors_moved %llu keys_moved %llu", 787 MAJOR(__entry->dev), MINOR(__entry->dev), 788 __entry->sectors_moved, __entry->keys_moved) 789 ); 790 791 TRACE_EVENT(evacuate_bucket, 792 TP_PROTO(struct bch_fs *c, struct bpos *bucket, 793 unsigned sectors, unsigned bucket_size, 794 u64 fragmentation, int ret), 795 TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret), 796 797 TP_STRUCT__entry( 798 __field(dev_t, dev ) 799 __field(u64, member ) 800 __field(u64, bucket ) 801 __field(u32, sectors ) 802 __field(u32, bucket_size ) 803 __field(u64, fragmentation ) 804 __field(int, ret ) 805 ), 806 807 TP_fast_assign( 808 __entry->dev = c->dev; 809 __entry->member = bucket->inode; 810 __entry->bucket = bucket->offset; 811 __entry->sectors = sectors; 812 __entry->bucket_size = bucket_size; 813 __entry->fragmentation = fragmentation; 814 __entry->ret = ret; 815 ), 816 817 TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i", 818 MAJOR(__entry->dev), MINOR(__entry->dev), 819 __entry->member, __entry->bucket, 820 __entry->sectors, __entry->bucket_size, 821 __entry->fragmentation, __entry->ret) 822 ); 823 824 TRACE_EVENT(copygc, 825 TP_PROTO(struct bch_fs *c, 826 u64 sectors_moved, u64 sectors_not_moved, 827 u64 buckets_moved, u64 buckets_not_moved), 828 TP_ARGS(c, 829 sectors_moved, sectors_not_moved, 830 buckets_moved, buckets_not_moved), 831 832 TP_STRUCT__entry( 833 __field(dev_t, dev ) 834 __field(u64, sectors_moved ) 835 __field(u64, sectors_not_moved ) 836 __field(u64, buckets_moved ) 837 __field(u64, buckets_not_moved ) 838 ), 839 840 TP_fast_assign( 841 __entry->dev = c->dev; 842 __entry->sectors_moved = sectors_moved; 843 __entry->sectors_not_moved = sectors_not_moved; 844 __entry->buckets_moved = buckets_moved; 845 __entry->buckets_not_moved = buckets_moved; 846 ), 847 848 TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu", 849 MAJOR(__entry->dev), MINOR(__entry->dev), 850 __entry->sectors_moved, __entry->sectors_not_moved, 851 __entry->buckets_moved, __entry->buckets_not_moved) 852 ); 853 854 TRACE_EVENT(copygc_wait, 855 TP_PROTO(struct bch_fs *c, 856 u64 wait_amount, u64 until), 857 TP_ARGS(c, wait_amount, until), 858 859 TP_STRUCT__entry( 860 __field(dev_t, dev ) 861 __field(u64, wait_amount ) 862 __field(u64, until ) 863 ), 864 865 TP_fast_assign( 866 __entry->dev = c->dev; 867 __entry->wait_amount = wait_amount; 868 __entry->until = until; 869 ), 870 871 TP_printk("%d,%u waiting for %llu sectors until %llu", 872 MAJOR(__entry->dev), MINOR(__entry->dev), 873 __entry->wait_amount, __entry->until) 874 ); 875 876 /* btree transactions: */ 877 878 DECLARE_EVENT_CLASS(transaction_event, 879 TP_PROTO(struct btree_trans *trans, 880 unsigned long caller_ip), 881 TP_ARGS(trans, caller_ip), 882 883 TP_STRUCT__entry( 884 __array(char, trans_fn, 32 ) 885 __field(unsigned long, caller_ip ) 886 ), 887 888 TP_fast_assign( 889 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 890 __entry->caller_ip = caller_ip; 891 ), 892 893 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip) 894 ); 895 896 DEFINE_EVENT(transaction_event, transaction_commit, 897 TP_PROTO(struct btree_trans *trans, 898 unsigned long caller_ip), 899 TP_ARGS(trans, caller_ip) 900 ); 901 902 DEFINE_EVENT(transaction_event, trans_restart_injected, 903 TP_PROTO(struct btree_trans *trans, 904 unsigned long caller_ip), 905 TP_ARGS(trans, caller_ip) 906 ); 907 908 TRACE_EVENT(trans_restart_split_race, 909 TP_PROTO(struct btree_trans *trans, 910 unsigned long caller_ip, 911 struct btree *b), 912 TP_ARGS(trans, caller_ip, b), 913 914 TP_STRUCT__entry( 915 __array(char, trans_fn, 32 ) 916 __field(unsigned long, caller_ip ) 917 __field(u8, level ) 918 __field(u16, written ) 919 __field(u16, blocks ) 920 __field(u16, u64s_remaining ) 921 ), 922 923 TP_fast_assign( 924 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 925 __entry->caller_ip = caller_ip; 926 __entry->level = b->c.level; 927 __entry->written = b->written; 928 __entry->blocks = btree_blocks(trans->c); 929 __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b); 930 ), 931 932 TP_printk("%s %pS l=%u written %u/%u u64s remaining %u", 933 __entry->trans_fn, (void *) __entry->caller_ip, 934 __entry->level, 935 __entry->written, __entry->blocks, 936 __entry->u64s_remaining) 937 ); 938 939 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim, 940 TP_PROTO(struct btree_trans *trans, 941 unsigned long caller_ip), 942 TP_ARGS(trans, caller_ip) 943 ); 944 945 TRACE_EVENT(trans_restart_journal_preres_get, 946 TP_PROTO(struct btree_trans *trans, 947 unsigned long caller_ip, 948 unsigned flags), 949 TP_ARGS(trans, caller_ip, flags), 950 951 TP_STRUCT__entry( 952 __array(char, trans_fn, 32 ) 953 __field(unsigned long, caller_ip ) 954 __field(unsigned, flags ) 955 ), 956 957 TP_fast_assign( 958 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 959 __entry->caller_ip = caller_ip; 960 __entry->flags = flags; 961 ), 962 963 TP_printk("%s %pS %x", __entry->trans_fn, 964 (void *) __entry->caller_ip, 965 __entry->flags) 966 ); 967 968 DEFINE_EVENT(transaction_event, trans_restart_fault_inject, 969 TP_PROTO(struct btree_trans *trans, 970 unsigned long caller_ip), 971 TP_ARGS(trans, caller_ip) 972 ); 973 974 DEFINE_EVENT(transaction_event, trans_traverse_all, 975 TP_PROTO(struct btree_trans *trans, 976 unsigned long caller_ip), 977 TP_ARGS(trans, caller_ip) 978 ); 979 980 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced, 981 TP_PROTO(struct btree_trans *trans, 982 unsigned long caller_ip), 983 TP_ARGS(trans, caller_ip) 984 ); 985 986 DEFINE_EVENT(transaction_event, trans_restart_too_many_iters, 987 TP_PROTO(struct btree_trans *trans, 988 unsigned long caller_ip), 989 TP_ARGS(trans, caller_ip) 990 ); 991 992 DECLARE_EVENT_CLASS(transaction_restart_iter, 993 TP_PROTO(struct btree_trans *trans, 994 unsigned long caller_ip, 995 struct btree_path *path), 996 TP_ARGS(trans, caller_ip, path), 997 998 TP_STRUCT__entry( 999 __array(char, trans_fn, 32 ) 1000 __field(unsigned long, caller_ip ) 1001 __field(u8, btree_id ) 1002 TRACE_BPOS_entries(pos) 1003 ), 1004 1005 TP_fast_assign( 1006 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1007 __entry->caller_ip = caller_ip; 1008 __entry->btree_id = path->btree_id; 1009 TRACE_BPOS_assign(pos, path->pos) 1010 ), 1011 1012 TP_printk("%s %pS btree %s pos %llu:%llu:%u", 1013 __entry->trans_fn, 1014 (void *) __entry->caller_ip, 1015 bch2_btree_ids[__entry->btree_id], 1016 __entry->pos_inode, 1017 __entry->pos_offset, 1018 __entry->pos_snapshot) 1019 ); 1020 1021 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused, 1022 TP_PROTO(struct btree_trans *trans, 1023 unsigned long caller_ip, 1024 struct btree_path *path), 1025 TP_ARGS(trans, caller_ip, path) 1026 ); 1027 1028 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split, 1029 TP_PROTO(struct btree_trans *trans, 1030 unsigned long caller_ip, 1031 struct btree_path *path), 1032 TP_ARGS(trans, caller_ip, path) 1033 ); 1034 1035 TRACE_EVENT(trans_restart_upgrade, 1036 TP_PROTO(struct btree_trans *trans, 1037 unsigned long caller_ip, 1038 struct btree_path *path, 1039 unsigned old_locks_want, 1040 unsigned new_locks_want), 1041 TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want), 1042 1043 TP_STRUCT__entry( 1044 __array(char, trans_fn, 32 ) 1045 __field(unsigned long, caller_ip ) 1046 __field(u8, btree_id ) 1047 __field(u8, old_locks_want ) 1048 __field(u8, new_locks_want ) 1049 TRACE_BPOS_entries(pos) 1050 ), 1051 1052 TP_fast_assign( 1053 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1054 __entry->caller_ip = caller_ip; 1055 __entry->btree_id = path->btree_id; 1056 __entry->old_locks_want = old_locks_want; 1057 __entry->new_locks_want = new_locks_want; 1058 TRACE_BPOS_assign(pos, path->pos) 1059 ), 1060 1061 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u", 1062 __entry->trans_fn, 1063 (void *) __entry->caller_ip, 1064 bch2_btree_ids[__entry->btree_id], 1065 __entry->pos_inode, 1066 __entry->pos_offset, 1067 __entry->pos_snapshot, 1068 __entry->old_locks_want, 1069 __entry->new_locks_want) 1070 ); 1071 1072 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock, 1073 TP_PROTO(struct btree_trans *trans, 1074 unsigned long caller_ip, 1075 struct btree_path *path), 1076 TP_ARGS(trans, caller_ip, path) 1077 ); 1078 1079 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node, 1080 TP_PROTO(struct btree_trans *trans, 1081 unsigned long caller_ip, 1082 struct btree_path *path), 1083 TP_ARGS(trans, caller_ip, path) 1084 ); 1085 1086 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill, 1087 TP_PROTO(struct btree_trans *trans, 1088 unsigned long caller_ip, 1089 struct btree_path *path), 1090 TP_ARGS(trans, caller_ip, path) 1091 ); 1092 1093 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill, 1094 TP_PROTO(struct btree_trans *trans, 1095 unsigned long caller_ip, 1096 struct btree_path *path), 1097 TP_ARGS(trans, caller_ip, path) 1098 ); 1099 1100 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade, 1101 TP_PROTO(struct btree_trans *trans, 1102 unsigned long caller_ip), 1103 TP_ARGS(trans, caller_ip) 1104 ); 1105 1106 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill, 1107 TP_PROTO(struct btree_trans *trans, 1108 unsigned long caller_ip, 1109 struct btree_path *path), 1110 TP_ARGS(trans, caller_ip, path) 1111 ); 1112 1113 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path, 1114 TP_PROTO(struct btree_trans *trans, 1115 unsigned long caller_ip, 1116 struct btree_path *path), 1117 TP_ARGS(trans, caller_ip, path) 1118 ); 1119 1120 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent, 1121 TP_PROTO(struct btree_trans *trans, 1122 unsigned long caller_ip, 1123 struct btree_path *path), 1124 TP_ARGS(trans, caller_ip, path) 1125 ); 1126 1127 DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse, 1128 TP_PROTO(struct btree_trans *trans, 1129 unsigned long caller_ip, 1130 struct btree_path *path), 1131 TP_ARGS(trans, caller_ip, path) 1132 ); 1133 1134 DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure, 1135 TP_PROTO(struct btree_trans *trans, 1136 unsigned long caller_ip, 1137 struct btree_path *path), 1138 TP_ARGS(trans, caller_ip, path) 1139 ); 1140 1141 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock, 1142 TP_PROTO(struct btree_trans *trans, 1143 unsigned long caller_ip), 1144 TP_ARGS(trans, caller_ip) 1145 ); 1146 1147 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit, 1148 TP_PROTO(struct btree_trans *trans, 1149 unsigned long caller_ip), 1150 TP_ARGS(trans, caller_ip) 1151 ); 1152 1153 TRACE_EVENT(trans_restart_would_deadlock_write, 1154 TP_PROTO(struct btree_trans *trans), 1155 TP_ARGS(trans), 1156 1157 TP_STRUCT__entry( 1158 __array(char, trans_fn, 32 ) 1159 ), 1160 1161 TP_fast_assign( 1162 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1163 ), 1164 1165 TP_printk("%s", __entry->trans_fn) 1166 ); 1167 1168 TRACE_EVENT(trans_restart_mem_realloced, 1169 TP_PROTO(struct btree_trans *trans, 1170 unsigned long caller_ip, 1171 unsigned long bytes), 1172 TP_ARGS(trans, caller_ip, bytes), 1173 1174 TP_STRUCT__entry( 1175 __array(char, trans_fn, 32 ) 1176 __field(unsigned long, caller_ip ) 1177 __field(unsigned long, bytes ) 1178 ), 1179 1180 TP_fast_assign( 1181 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1182 __entry->caller_ip = caller_ip; 1183 __entry->bytes = bytes; 1184 ), 1185 1186 TP_printk("%s %pS bytes %lu", 1187 __entry->trans_fn, 1188 (void *) __entry->caller_ip, 1189 __entry->bytes) 1190 ); 1191 1192 TRACE_EVENT(trans_restart_key_cache_key_realloced, 1193 TP_PROTO(struct btree_trans *trans, 1194 unsigned long caller_ip, 1195 struct btree_path *path, 1196 unsigned old_u64s, 1197 unsigned new_u64s), 1198 TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s), 1199 1200 TP_STRUCT__entry( 1201 __array(char, trans_fn, 32 ) 1202 __field(unsigned long, caller_ip ) 1203 __field(enum btree_id, btree_id ) 1204 TRACE_BPOS_entries(pos) 1205 __field(u32, old_u64s ) 1206 __field(u32, new_u64s ) 1207 ), 1208 1209 TP_fast_assign( 1210 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1211 __entry->caller_ip = caller_ip; 1212 1213 __entry->btree_id = path->btree_id; 1214 TRACE_BPOS_assign(pos, path->pos); 1215 __entry->old_u64s = old_u64s; 1216 __entry->new_u64s = new_u64s; 1217 ), 1218 1219 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u", 1220 __entry->trans_fn, 1221 (void *) __entry->caller_ip, 1222 bch2_btree_ids[__entry->btree_id], 1223 __entry->pos_inode, 1224 __entry->pos_offset, 1225 __entry->pos_snapshot, 1226 __entry->old_u64s, 1227 __entry->new_u64s) 1228 ); 1229 1230 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush, 1231 TP_PROTO(struct btree_trans *trans, 1232 unsigned long caller_ip), 1233 TP_ARGS(trans, caller_ip) 1234 ); 1235 1236 TRACE_EVENT(write_buffer_flush, 1237 TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size), 1238 TP_ARGS(trans, nr, skipped, fast, size), 1239 1240 TP_STRUCT__entry( 1241 __field(size_t, nr ) 1242 __field(size_t, skipped ) 1243 __field(size_t, fast ) 1244 __field(size_t, size ) 1245 ), 1246 1247 TP_fast_assign( 1248 __entry->nr = nr; 1249 __entry->skipped = skipped; 1250 __entry->fast = fast; 1251 __entry->size = size; 1252 ), 1253 1254 TP_printk("%zu/%zu skipped %zu fast %zu", 1255 __entry->nr, __entry->size, __entry->skipped, __entry->fast) 1256 ); 1257 1258 TRACE_EVENT(write_buffer_flush_slowpath, 1259 TP_PROTO(struct btree_trans *trans, size_t nr, size_t size), 1260 TP_ARGS(trans, nr, size), 1261 1262 TP_STRUCT__entry( 1263 __field(size_t, nr ) 1264 __field(size_t, size ) 1265 ), 1266 1267 TP_fast_assign( 1268 __entry->nr = nr; 1269 __entry->size = size; 1270 ), 1271 1272 TP_printk("%zu/%zu", __entry->nr, __entry->size) 1273 ); 1274 1275 #endif /* _TRACE_BCACHEFS_H */ 1276 1277 /* This part must be outside protection */ 1278 #undef TRACE_INCLUDE_PATH 1279 #define TRACE_INCLUDE_PATH ../../fs/bcachefs 1280 1281 #undef TRACE_INCLUDE_FILE 1282 #define TRACE_INCLUDE_FILE trace 1283 1284 #include <trace/define_trace.h> 1285