1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM bcachefs 4 5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_BCACHEFS_H 7 8 #include <linux/tracepoint.h> 9 10 #define TRACE_BPOS_entries(name) \ 11 __field(u64, name##_inode ) \ 12 __field(u64, name##_offset ) \ 13 __field(u32, name##_snapshot ) 14 15 #define TRACE_BPOS_assign(dst, src) \ 16 __entry->dst##_inode = (src).inode; \ 17 __entry->dst##_offset = (src).offset; \ 18 __entry->dst##_snapshot = (src).snapshot 19 20 DECLARE_EVENT_CLASS(bpos, 21 TP_PROTO(const struct bpos *p), 22 TP_ARGS(p), 23 24 TP_STRUCT__entry( 25 TRACE_BPOS_entries(p) 26 ), 27 28 TP_fast_assign( 29 TRACE_BPOS_assign(p, *p); 30 ), 31 32 TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot) 33 ); 34 35 DECLARE_EVENT_CLASS(fs_str, 36 TP_PROTO(struct bch_fs *c, const char *str), 37 TP_ARGS(c, str), 38 39 TP_STRUCT__entry( 40 __field(dev_t, dev ) 41 __string(str, str ) 42 ), 43 44 TP_fast_assign( 45 __entry->dev = c->dev; 46 __assign_str(str); 47 ), 48 49 TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str)) 50 ); 51 52 DECLARE_EVENT_CLASS(trans_str, 53 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str), 54 TP_ARGS(trans, caller_ip, str), 55 56 TP_STRUCT__entry( 57 __field(dev_t, dev ) 58 __array(char, trans_fn, 32 ) 59 __field(unsigned long, caller_ip ) 60 __string(str, str ) 61 ), 62 63 TP_fast_assign( 64 __entry->dev = trans->c->dev; 65 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 66 __entry->caller_ip = caller_ip; 67 __assign_str(str); 68 ), 69 70 TP_printk("%d,%d %s %pS %s", 71 MAJOR(__entry->dev), MINOR(__entry->dev), 72 __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str)) 73 ); 74 75 DECLARE_EVENT_CLASS(trans_str_nocaller, 76 TP_PROTO(struct btree_trans *trans, const char *str), 77 TP_ARGS(trans, str), 78 79 TP_STRUCT__entry( 80 __field(dev_t, dev ) 81 __array(char, trans_fn, 32 ) 82 __string(str, str ) 83 ), 84 85 TP_fast_assign( 86 __entry->dev = trans->c->dev; 87 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 88 __assign_str(str); 89 ), 90 91 TP_printk("%d,%d %s %s", 92 MAJOR(__entry->dev), MINOR(__entry->dev), 93 __entry->trans_fn, __get_str(str)) 94 ); 95 96 DECLARE_EVENT_CLASS(btree_node_nofs, 97 TP_PROTO(struct bch_fs *c, struct btree *b), 98 TP_ARGS(c, b), 99 100 TP_STRUCT__entry( 101 __field(dev_t, dev ) 102 __field(u8, level ) 103 __field(u8, btree_id ) 104 TRACE_BPOS_entries(pos) 105 ), 106 107 TP_fast_assign( 108 __entry->dev = c->dev; 109 __entry->level = b->c.level; 110 __entry->btree_id = b->c.btree_id; 111 TRACE_BPOS_assign(pos, b->key.k.p); 112 ), 113 114 TP_printk("%d,%d %u %s %llu:%llu:%u", 115 MAJOR(__entry->dev), MINOR(__entry->dev), 116 __entry->level, 117 bch2_btree_id_str(__entry->btree_id), 118 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) 119 ); 120 121 DECLARE_EVENT_CLASS(btree_node, 122 TP_PROTO(struct btree_trans *trans, struct btree *b), 123 TP_ARGS(trans, b), 124 125 TP_STRUCT__entry( 126 __field(dev_t, dev ) 127 __array(char, trans_fn, 32 ) 128 __field(u8, level ) 129 __field(u8, btree_id ) 130 TRACE_BPOS_entries(pos) 131 ), 132 133 TP_fast_assign( 134 __entry->dev = trans->c->dev; 135 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 136 __entry->level = b->c.level; 137 __entry->btree_id = b->c.btree_id; 138 TRACE_BPOS_assign(pos, b->key.k.p); 139 ), 140 141 TP_printk("%d,%d %s %u %s %llu:%llu:%u", 142 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn, 143 __entry->level, 144 bch2_btree_id_str(__entry->btree_id), 145 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) 146 ); 147 148 DECLARE_EVENT_CLASS(bch_fs, 149 TP_PROTO(struct bch_fs *c), 150 TP_ARGS(c), 151 152 TP_STRUCT__entry( 153 __field(dev_t, dev ) 154 ), 155 156 TP_fast_assign( 157 __entry->dev = c->dev; 158 ), 159 160 TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev)) 161 ); 162 163 DECLARE_EVENT_CLASS(btree_trans, 164 TP_PROTO(struct btree_trans *trans), 165 TP_ARGS(trans), 166 167 TP_STRUCT__entry( 168 __field(dev_t, dev ) 169 __array(char, trans_fn, 32 ) 170 ), 171 172 TP_fast_assign( 173 __entry->dev = trans->c->dev; 174 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 175 ), 176 177 TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn) 178 ); 179 180 DECLARE_EVENT_CLASS(bio, 181 TP_PROTO(struct bio *bio), 182 TP_ARGS(bio), 183 184 TP_STRUCT__entry( 185 __field(dev_t, dev ) 186 __field(sector_t, sector ) 187 __field(unsigned int, nr_sector ) 188 __array(char, rwbs, 6 ) 189 ), 190 191 TP_fast_assign( 192 __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0; 193 __entry->sector = bio->bi_iter.bi_sector; 194 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 195 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 196 ), 197 198 TP_printk("%d,%d %s %llu + %u", 199 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 200 (unsigned long long)__entry->sector, __entry->nr_sector) 201 ); 202 203 /* fs.c: */ 204 TRACE_EVENT(bch2_sync_fs, 205 TP_PROTO(struct super_block *sb, int wait), 206 207 TP_ARGS(sb, wait), 208 209 TP_STRUCT__entry( 210 __field( dev_t, dev ) 211 __field( int, wait ) 212 213 ), 214 215 TP_fast_assign( 216 __entry->dev = sb->s_dev; 217 __entry->wait = wait; 218 ), 219 220 TP_printk("dev %d,%d wait %d", 221 MAJOR(__entry->dev), MINOR(__entry->dev), 222 __entry->wait) 223 ); 224 225 /* fs-io.c: */ 226 TRACE_EVENT(bch2_fsync, 227 TP_PROTO(struct file *file, int datasync), 228 229 TP_ARGS(file, datasync), 230 231 TP_STRUCT__entry( 232 __field( dev_t, dev ) 233 __field( ino_t, ino ) 234 __field( ino_t, parent ) 235 __field( int, datasync ) 236 ), 237 238 TP_fast_assign( 239 struct dentry *dentry = file->f_path.dentry; 240 241 __entry->dev = dentry->d_sb->s_dev; 242 __entry->ino = d_inode(dentry)->i_ino; 243 __entry->parent = d_inode(dentry->d_parent)->i_ino; 244 __entry->datasync = datasync; 245 ), 246 247 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ", 248 MAJOR(__entry->dev), MINOR(__entry->dev), 249 (unsigned long) __entry->ino, 250 (unsigned long) __entry->parent, __entry->datasync) 251 ); 252 253 /* super-io.c: */ 254 TRACE_EVENT(write_super, 255 TP_PROTO(struct bch_fs *c, unsigned long ip), 256 TP_ARGS(c, ip), 257 258 TP_STRUCT__entry( 259 __field(dev_t, dev ) 260 __field(unsigned long, ip ) 261 ), 262 263 TP_fast_assign( 264 __entry->dev = c->dev; 265 __entry->ip = ip; 266 ), 267 268 TP_printk("%d,%d for %pS", 269 MAJOR(__entry->dev), MINOR(__entry->dev), 270 (void *) __entry->ip) 271 ); 272 273 /* io.c: */ 274 275 DEFINE_EVENT(bio, read_promote, 276 TP_PROTO(struct bio *bio), 277 TP_ARGS(bio) 278 ); 279 280 TRACE_EVENT(read_nopromote, 281 TP_PROTO(struct bch_fs *c, int ret), 282 TP_ARGS(c, ret), 283 284 TP_STRUCT__entry( 285 __field(dev_t, dev ) 286 __array(char, ret, 32 ) 287 ), 288 289 TP_fast_assign( 290 __entry->dev = c->dev; 291 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret)); 292 ), 293 294 TP_printk("%d,%d ret %s", 295 MAJOR(__entry->dev), MINOR(__entry->dev), 296 __entry->ret) 297 ); 298 299 DEFINE_EVENT(bio, read_bounce, 300 TP_PROTO(struct bio *bio), 301 TP_ARGS(bio) 302 ); 303 304 DEFINE_EVENT(bio, read_split, 305 TP_PROTO(struct bio *bio), 306 TP_ARGS(bio) 307 ); 308 309 DEFINE_EVENT(bio, read_retry, 310 TP_PROTO(struct bio *bio), 311 TP_ARGS(bio) 312 ); 313 314 DEFINE_EVENT(bio, read_reuse_race, 315 TP_PROTO(struct bio *bio), 316 TP_ARGS(bio) 317 ); 318 319 /* Journal */ 320 321 DEFINE_EVENT(bch_fs, journal_full, 322 TP_PROTO(struct bch_fs *c), 323 TP_ARGS(c) 324 ); 325 326 DEFINE_EVENT(fs_str, journal_entry_full, 327 TP_PROTO(struct bch_fs *c, const char *str), 328 TP_ARGS(c, str) 329 ); 330 331 DEFINE_EVENT(fs_str, journal_entry_close, 332 TP_PROTO(struct bch_fs *c, const char *str), 333 TP_ARGS(c, str) 334 ); 335 336 DEFINE_EVENT(bio, journal_write, 337 TP_PROTO(struct bio *bio), 338 TP_ARGS(bio) 339 ); 340 341 TRACE_EVENT(journal_reclaim_start, 342 TP_PROTO(struct bch_fs *c, bool direct, bool kicked, 343 u64 min_nr, u64 min_key_cache, 344 u64 btree_cache_dirty, u64 btree_cache_total, 345 u64 btree_key_cache_dirty, u64 btree_key_cache_total), 346 TP_ARGS(c, direct, kicked, min_nr, min_key_cache, 347 btree_cache_dirty, btree_cache_total, 348 btree_key_cache_dirty, btree_key_cache_total), 349 350 TP_STRUCT__entry( 351 __field(dev_t, dev ) 352 __field(bool, direct ) 353 __field(bool, kicked ) 354 __field(u64, min_nr ) 355 __field(u64, min_key_cache ) 356 __field(u64, btree_cache_dirty ) 357 __field(u64, btree_cache_total ) 358 __field(u64, btree_key_cache_dirty ) 359 __field(u64, btree_key_cache_total ) 360 ), 361 362 TP_fast_assign( 363 __entry->dev = c->dev; 364 __entry->direct = direct; 365 __entry->kicked = kicked; 366 __entry->min_nr = min_nr; 367 __entry->min_key_cache = min_key_cache; 368 __entry->btree_cache_dirty = btree_cache_dirty; 369 __entry->btree_cache_total = btree_cache_total; 370 __entry->btree_key_cache_dirty = btree_key_cache_dirty; 371 __entry->btree_key_cache_total = btree_key_cache_total; 372 ), 373 374 TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu", 375 MAJOR(__entry->dev), MINOR(__entry->dev), 376 __entry->direct, 377 __entry->kicked, 378 __entry->min_nr, 379 __entry->min_key_cache, 380 __entry->btree_cache_dirty, 381 __entry->btree_cache_total, 382 __entry->btree_key_cache_dirty, 383 __entry->btree_key_cache_total) 384 ); 385 386 TRACE_EVENT(journal_reclaim_finish, 387 TP_PROTO(struct bch_fs *c, u64 nr_flushed), 388 TP_ARGS(c, nr_flushed), 389 390 TP_STRUCT__entry( 391 __field(dev_t, dev ) 392 __field(u64, nr_flushed ) 393 ), 394 395 TP_fast_assign( 396 __entry->dev = c->dev; 397 __entry->nr_flushed = nr_flushed; 398 ), 399 400 TP_printk("%d,%d flushed %llu", 401 MAJOR(__entry->dev), MINOR(__entry->dev), 402 __entry->nr_flushed) 403 ); 404 405 /* bset.c: */ 406 407 DEFINE_EVENT(bpos, bkey_pack_pos_fail, 408 TP_PROTO(const struct bpos *p), 409 TP_ARGS(p) 410 ); 411 412 /* Btree cache: */ 413 414 TRACE_EVENT(btree_cache_scan, 415 TP_PROTO(long nr_to_scan, long can_free, long ret), 416 TP_ARGS(nr_to_scan, can_free, ret), 417 418 TP_STRUCT__entry( 419 __field(long, nr_to_scan ) 420 __field(long, can_free ) 421 __field(long, ret ) 422 ), 423 424 TP_fast_assign( 425 __entry->nr_to_scan = nr_to_scan; 426 __entry->can_free = can_free; 427 __entry->ret = ret; 428 ), 429 430 TP_printk("scanned for %li nodes, can free %li, ret %li", 431 __entry->nr_to_scan, __entry->can_free, __entry->ret) 432 ); 433 434 DEFINE_EVENT(btree_node_nofs, btree_cache_reap, 435 TP_PROTO(struct bch_fs *c, struct btree *b), 436 TP_ARGS(c, b) 437 ); 438 439 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail, 440 TP_PROTO(struct btree_trans *trans), 441 TP_ARGS(trans) 442 ); 443 444 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock, 445 TP_PROTO(struct btree_trans *trans), 446 TP_ARGS(trans) 447 ); 448 449 DEFINE_EVENT(btree_trans, btree_cache_cannibalize, 450 TP_PROTO(struct btree_trans *trans), 451 TP_ARGS(trans) 452 ); 453 454 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock, 455 TP_PROTO(struct btree_trans *trans), 456 TP_ARGS(trans) 457 ); 458 459 /* Btree */ 460 461 DEFINE_EVENT(btree_node, btree_node_read, 462 TP_PROTO(struct btree_trans *trans, struct btree *b), 463 TP_ARGS(trans, b) 464 ); 465 466 TRACE_EVENT(btree_node_write, 467 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors), 468 TP_ARGS(b, bytes, sectors), 469 470 TP_STRUCT__entry( 471 __field(enum btree_node_type, type) 472 __field(unsigned, bytes ) 473 __field(unsigned, sectors ) 474 ), 475 476 TP_fast_assign( 477 __entry->type = btree_node_type(b); 478 __entry->bytes = bytes; 479 __entry->sectors = sectors; 480 ), 481 482 TP_printk("bkey type %u bytes %u sectors %u", 483 __entry->type , __entry->bytes, __entry->sectors) 484 ); 485 486 DEFINE_EVENT(btree_node, btree_node_alloc, 487 TP_PROTO(struct btree_trans *trans, struct btree *b), 488 TP_ARGS(trans, b) 489 ); 490 491 DEFINE_EVENT(btree_node, btree_node_free, 492 TP_PROTO(struct btree_trans *trans, struct btree *b), 493 TP_ARGS(trans, b) 494 ); 495 496 TRACE_EVENT(btree_reserve_get_fail, 497 TP_PROTO(const char *trans_fn, 498 unsigned long caller_ip, 499 size_t required, 500 int ret), 501 TP_ARGS(trans_fn, caller_ip, required, ret), 502 503 TP_STRUCT__entry( 504 __array(char, trans_fn, 32 ) 505 __field(unsigned long, caller_ip ) 506 __field(size_t, required ) 507 __array(char, ret, 32 ) 508 ), 509 510 TP_fast_assign( 511 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn)); 512 __entry->caller_ip = caller_ip; 513 __entry->required = required; 514 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret)); 515 ), 516 517 TP_printk("%s %pS required %zu ret %s", 518 __entry->trans_fn, 519 (void *) __entry->caller_ip, 520 __entry->required, 521 __entry->ret) 522 ); 523 524 DEFINE_EVENT(btree_node, btree_node_compact, 525 TP_PROTO(struct btree_trans *trans, struct btree *b), 526 TP_ARGS(trans, b) 527 ); 528 529 DEFINE_EVENT(btree_node, btree_node_merge, 530 TP_PROTO(struct btree_trans *trans, struct btree *b), 531 TP_ARGS(trans, b) 532 ); 533 534 DEFINE_EVENT(btree_node, btree_node_split, 535 TP_PROTO(struct btree_trans *trans, struct btree *b), 536 TP_ARGS(trans, b) 537 ); 538 539 DEFINE_EVENT(btree_node, btree_node_rewrite, 540 TP_PROTO(struct btree_trans *trans, struct btree *b), 541 TP_ARGS(trans, b) 542 ); 543 544 DEFINE_EVENT(btree_node, btree_node_set_root, 545 TP_PROTO(struct btree_trans *trans, struct btree *b), 546 TP_ARGS(trans, b) 547 ); 548 549 TRACE_EVENT(btree_path_relock_fail, 550 TP_PROTO(struct btree_trans *trans, 551 unsigned long caller_ip, 552 struct btree_path *path, 553 unsigned level), 554 TP_ARGS(trans, caller_ip, path, level), 555 556 TP_STRUCT__entry( 557 __array(char, trans_fn, 32 ) 558 __field(unsigned long, caller_ip ) 559 __field(u8, btree_id ) 560 __field(u8, level ) 561 TRACE_BPOS_entries(pos) 562 __array(char, node, 24 ) 563 __field(u8, self_read_count ) 564 __field(u8, self_intent_count) 565 __field(u8, read_count ) 566 __field(u8, intent_count ) 567 __field(u32, iter_lock_seq ) 568 __field(u32, node_lock_seq ) 569 ), 570 571 TP_fast_assign( 572 struct btree *b = btree_path_node(path, level); 573 struct six_lock_count c; 574 575 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 576 __entry->caller_ip = caller_ip; 577 __entry->btree_id = path->btree_id; 578 __entry->level = path->level; 579 TRACE_BPOS_assign(pos, path->pos); 580 581 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level); 582 __entry->self_read_count = c.n[SIX_LOCK_read]; 583 __entry->self_intent_count = c.n[SIX_LOCK_intent]; 584 585 if (IS_ERR(b)) { 586 strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node)); 587 } else { 588 c = six_lock_counts(&path->l[level].b->c.lock); 589 __entry->read_count = c.n[SIX_LOCK_read]; 590 __entry->intent_count = c.n[SIX_LOCK_intent]; 591 scnprintf(__entry->node, sizeof(__entry->node), "%px", b); 592 } 593 __entry->iter_lock_seq = path->l[level].lock_seq; 594 __entry->node_lock_seq = is_btree_node(path, level) 595 ? six_lock_seq(&path->l[level].b->c.lock) 596 : 0; 597 ), 598 599 TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u", 600 __entry->trans_fn, 601 (void *) __entry->caller_ip, 602 bch2_btree_id_str(__entry->btree_id), 603 __entry->pos_inode, 604 __entry->pos_offset, 605 __entry->pos_snapshot, 606 __entry->level, 607 __entry->node, 608 __entry->self_read_count, 609 __entry->self_intent_count, 610 __entry->read_count, 611 __entry->intent_count, 612 __entry->iter_lock_seq, 613 __entry->node_lock_seq) 614 ); 615 616 TRACE_EVENT(btree_path_upgrade_fail, 617 TP_PROTO(struct btree_trans *trans, 618 unsigned long caller_ip, 619 struct btree_path *path, 620 unsigned level), 621 TP_ARGS(trans, caller_ip, path, level), 622 623 TP_STRUCT__entry( 624 __array(char, trans_fn, 32 ) 625 __field(unsigned long, caller_ip ) 626 __field(u8, btree_id ) 627 __field(u8, level ) 628 TRACE_BPOS_entries(pos) 629 __field(u8, locked ) 630 __field(u8, self_read_count ) 631 __field(u8, self_intent_count) 632 __field(u8, read_count ) 633 __field(u8, intent_count ) 634 __field(u32, iter_lock_seq ) 635 __field(u32, node_lock_seq ) 636 ), 637 638 TP_fast_assign( 639 struct six_lock_count c; 640 641 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 642 __entry->caller_ip = caller_ip; 643 __entry->btree_id = path->btree_id; 644 __entry->level = level; 645 TRACE_BPOS_assign(pos, path->pos); 646 __entry->locked = btree_node_locked(path, level); 647 648 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level), 649 __entry->self_read_count = c.n[SIX_LOCK_read]; 650 __entry->self_intent_count = c.n[SIX_LOCK_intent]; 651 c = six_lock_counts(&path->l[level].b->c.lock); 652 __entry->read_count = c.n[SIX_LOCK_read]; 653 __entry->intent_count = c.n[SIX_LOCK_intent]; 654 __entry->iter_lock_seq = path->l[level].lock_seq; 655 __entry->node_lock_seq = is_btree_node(path, level) 656 ? six_lock_seq(&path->l[level].b->c.lock) 657 : 0; 658 ), 659 660 TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u", 661 __entry->trans_fn, 662 (void *) __entry->caller_ip, 663 bch2_btree_id_str(__entry->btree_id), 664 __entry->pos_inode, 665 __entry->pos_offset, 666 __entry->pos_snapshot, 667 __entry->level, 668 __entry->locked, 669 __entry->self_read_count, 670 __entry->self_intent_count, 671 __entry->read_count, 672 __entry->intent_count, 673 __entry->iter_lock_seq, 674 __entry->node_lock_seq) 675 ); 676 677 /* Garbage collection */ 678 679 DEFINE_EVENT(bch_fs, gc_gens_start, 680 TP_PROTO(struct bch_fs *c), 681 TP_ARGS(c) 682 ); 683 684 DEFINE_EVENT(bch_fs, gc_gens_end, 685 TP_PROTO(struct bch_fs *c), 686 TP_ARGS(c) 687 ); 688 689 /* Allocator */ 690 691 DEFINE_EVENT(fs_str, bucket_alloc, 692 TP_PROTO(struct bch_fs *c, const char *str), 693 TP_ARGS(c, str) 694 ); 695 696 DEFINE_EVENT(fs_str, bucket_alloc_fail, 697 TP_PROTO(struct bch_fs *c, const char *str), 698 TP_ARGS(c, str) 699 ); 700 701 TRACE_EVENT(discard_buckets, 702 TP_PROTO(struct bch_fs *c, u64 seen, u64 open, 703 u64 need_journal_commit, u64 discarded, const char *err), 704 TP_ARGS(c, seen, open, need_journal_commit, discarded, err), 705 706 TP_STRUCT__entry( 707 __field(dev_t, dev ) 708 __field(u64, seen ) 709 __field(u64, open ) 710 __field(u64, need_journal_commit ) 711 __field(u64, discarded ) 712 __array(char, err, 16 ) 713 ), 714 715 TP_fast_assign( 716 __entry->dev = c->dev; 717 __entry->seen = seen; 718 __entry->open = open; 719 __entry->need_journal_commit = need_journal_commit; 720 __entry->discarded = discarded; 721 strscpy(__entry->err, err, sizeof(__entry->err)); 722 ), 723 724 TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s", 725 MAJOR(__entry->dev), MINOR(__entry->dev), 726 __entry->seen, 727 __entry->open, 728 __entry->need_journal_commit, 729 __entry->discarded, 730 __entry->err) 731 ); 732 733 TRACE_EVENT(bucket_invalidate, 734 TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors), 735 TP_ARGS(c, dev, bucket, sectors), 736 737 TP_STRUCT__entry( 738 __field(dev_t, dev ) 739 __field(u32, dev_idx ) 740 __field(u32, sectors ) 741 __field(u64, bucket ) 742 ), 743 744 TP_fast_assign( 745 __entry->dev = c->dev; 746 __entry->dev_idx = dev; 747 __entry->sectors = sectors; 748 __entry->bucket = bucket; 749 ), 750 751 TP_printk("%d:%d invalidated %u:%llu cached sectors %u", 752 MAJOR(__entry->dev), MINOR(__entry->dev), 753 __entry->dev_idx, __entry->bucket, 754 __entry->sectors) 755 ); 756 757 /* Moving IO */ 758 759 TRACE_EVENT(bucket_evacuate, 760 TP_PROTO(struct bch_fs *c, struct bpos *bucket), 761 TP_ARGS(c, bucket), 762 763 TP_STRUCT__entry( 764 __field(dev_t, dev ) 765 __field(u32, dev_idx ) 766 __field(u64, bucket ) 767 ), 768 769 TP_fast_assign( 770 __entry->dev = c->dev; 771 __entry->dev_idx = bucket->inode; 772 __entry->bucket = bucket->offset; 773 ), 774 775 TP_printk("%d:%d %u:%llu", 776 MAJOR(__entry->dev), MINOR(__entry->dev), 777 __entry->dev_idx, __entry->bucket) 778 ); 779 780 DEFINE_EVENT(fs_str, move_extent, 781 TP_PROTO(struct bch_fs *c, const char *str), 782 TP_ARGS(c, str) 783 ); 784 785 DEFINE_EVENT(fs_str, move_extent_read, 786 TP_PROTO(struct bch_fs *c, const char *str), 787 TP_ARGS(c, str) 788 ); 789 790 DEFINE_EVENT(fs_str, move_extent_write, 791 TP_PROTO(struct bch_fs *c, const char *str), 792 TP_ARGS(c, str) 793 ); 794 795 DEFINE_EVENT(fs_str, move_extent_finish, 796 TP_PROTO(struct bch_fs *c, const char *str), 797 TP_ARGS(c, str) 798 ); 799 800 DEFINE_EVENT(fs_str, move_extent_fail, 801 TP_PROTO(struct bch_fs *c, const char *str), 802 TP_ARGS(c, str) 803 ); 804 805 DEFINE_EVENT(fs_str, move_extent_start_fail, 806 TP_PROTO(struct bch_fs *c, const char *str), 807 TP_ARGS(c, str) 808 ); 809 810 TRACE_EVENT(move_data, 811 TP_PROTO(struct bch_fs *c, 812 struct bch_move_stats *stats), 813 TP_ARGS(c, stats), 814 815 TP_STRUCT__entry( 816 __field(dev_t, dev ) 817 __field(u64, keys_moved ) 818 __field(u64, keys_raced ) 819 __field(u64, sectors_seen ) 820 __field(u64, sectors_moved ) 821 __field(u64, sectors_raced ) 822 ), 823 824 TP_fast_assign( 825 __entry->dev = c->dev; 826 __entry->keys_moved = atomic64_read(&stats->keys_moved); 827 __entry->keys_raced = atomic64_read(&stats->keys_raced); 828 __entry->sectors_seen = atomic64_read(&stats->sectors_seen); 829 __entry->sectors_moved = atomic64_read(&stats->sectors_moved); 830 __entry->sectors_raced = atomic64_read(&stats->sectors_raced); 831 ), 832 833 TP_printk("%d,%d keys moved %llu raced %llu" 834 "sectors seen %llu moved %llu raced %llu", 835 MAJOR(__entry->dev), MINOR(__entry->dev), 836 __entry->keys_moved, 837 __entry->keys_raced, 838 __entry->sectors_seen, 839 __entry->sectors_moved, 840 __entry->sectors_raced) 841 ); 842 843 TRACE_EVENT(evacuate_bucket, 844 TP_PROTO(struct bch_fs *c, struct bpos *bucket, 845 unsigned sectors, unsigned bucket_size, 846 u64 fragmentation, int ret), 847 TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret), 848 849 TP_STRUCT__entry( 850 __field(dev_t, dev ) 851 __field(u64, member ) 852 __field(u64, bucket ) 853 __field(u32, sectors ) 854 __field(u32, bucket_size ) 855 __field(u64, fragmentation ) 856 __field(int, ret ) 857 ), 858 859 TP_fast_assign( 860 __entry->dev = c->dev; 861 __entry->member = bucket->inode; 862 __entry->bucket = bucket->offset; 863 __entry->sectors = sectors; 864 __entry->bucket_size = bucket_size; 865 __entry->fragmentation = fragmentation; 866 __entry->ret = ret; 867 ), 868 869 TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i", 870 MAJOR(__entry->dev), MINOR(__entry->dev), 871 __entry->member, __entry->bucket, 872 __entry->sectors, __entry->bucket_size, 873 __entry->fragmentation, __entry->ret) 874 ); 875 876 TRACE_EVENT(copygc, 877 TP_PROTO(struct bch_fs *c, 878 u64 sectors_moved, u64 sectors_not_moved, 879 u64 buckets_moved, u64 buckets_not_moved), 880 TP_ARGS(c, 881 sectors_moved, sectors_not_moved, 882 buckets_moved, buckets_not_moved), 883 884 TP_STRUCT__entry( 885 __field(dev_t, dev ) 886 __field(u64, sectors_moved ) 887 __field(u64, sectors_not_moved ) 888 __field(u64, buckets_moved ) 889 __field(u64, buckets_not_moved ) 890 ), 891 892 TP_fast_assign( 893 __entry->dev = c->dev; 894 __entry->sectors_moved = sectors_moved; 895 __entry->sectors_not_moved = sectors_not_moved; 896 __entry->buckets_moved = buckets_moved; 897 __entry->buckets_not_moved = buckets_moved; 898 ), 899 900 TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu", 901 MAJOR(__entry->dev), MINOR(__entry->dev), 902 __entry->sectors_moved, __entry->sectors_not_moved, 903 __entry->buckets_moved, __entry->buckets_not_moved) 904 ); 905 906 TRACE_EVENT(copygc_wait, 907 TP_PROTO(struct bch_fs *c, 908 u64 wait_amount, u64 until), 909 TP_ARGS(c, wait_amount, until), 910 911 TP_STRUCT__entry( 912 __field(dev_t, dev ) 913 __field(u64, wait_amount ) 914 __field(u64, until ) 915 ), 916 917 TP_fast_assign( 918 __entry->dev = c->dev; 919 __entry->wait_amount = wait_amount; 920 __entry->until = until; 921 ), 922 923 TP_printk("%d,%u waiting for %llu sectors until %llu", 924 MAJOR(__entry->dev), MINOR(__entry->dev), 925 __entry->wait_amount, __entry->until) 926 ); 927 928 /* btree transactions: */ 929 930 DECLARE_EVENT_CLASS(transaction_event, 931 TP_PROTO(struct btree_trans *trans, 932 unsigned long caller_ip), 933 TP_ARGS(trans, caller_ip), 934 935 TP_STRUCT__entry( 936 __array(char, trans_fn, 32 ) 937 __field(unsigned long, caller_ip ) 938 ), 939 940 TP_fast_assign( 941 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 942 __entry->caller_ip = caller_ip; 943 ), 944 945 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip) 946 ); 947 948 DEFINE_EVENT(transaction_event, transaction_commit, 949 TP_PROTO(struct btree_trans *trans, 950 unsigned long caller_ip), 951 TP_ARGS(trans, caller_ip) 952 ); 953 954 DEFINE_EVENT(transaction_event, trans_restart_injected, 955 TP_PROTO(struct btree_trans *trans, 956 unsigned long caller_ip), 957 TP_ARGS(trans, caller_ip) 958 ); 959 960 TRACE_EVENT(trans_restart_split_race, 961 TP_PROTO(struct btree_trans *trans, 962 unsigned long caller_ip, 963 struct btree *b), 964 TP_ARGS(trans, caller_ip, b), 965 966 TP_STRUCT__entry( 967 __array(char, trans_fn, 32 ) 968 __field(unsigned long, caller_ip ) 969 __field(u8, level ) 970 __field(u16, written ) 971 __field(u16, blocks ) 972 __field(u16, u64s_remaining ) 973 ), 974 975 TP_fast_assign( 976 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 977 __entry->caller_ip = caller_ip; 978 __entry->level = b->c.level; 979 __entry->written = b->written; 980 __entry->blocks = btree_blocks(trans->c); 981 __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b); 982 ), 983 984 TP_printk("%s %pS l=%u written %u/%u u64s remaining %u", 985 __entry->trans_fn, (void *) __entry->caller_ip, 986 __entry->level, 987 __entry->written, __entry->blocks, 988 __entry->u64s_remaining) 989 ); 990 991 DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim, 992 TP_PROTO(struct btree_trans *trans, 993 unsigned long caller_ip), 994 TP_ARGS(trans, caller_ip) 995 ); 996 997 TRACE_EVENT(trans_restart_journal_preres_get, 998 TP_PROTO(struct btree_trans *trans, 999 unsigned long caller_ip, 1000 unsigned flags), 1001 TP_ARGS(trans, caller_ip, flags), 1002 1003 TP_STRUCT__entry( 1004 __array(char, trans_fn, 32 ) 1005 __field(unsigned long, caller_ip ) 1006 __field(unsigned, flags ) 1007 ), 1008 1009 TP_fast_assign( 1010 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1011 __entry->caller_ip = caller_ip; 1012 __entry->flags = flags; 1013 ), 1014 1015 TP_printk("%s %pS %x", __entry->trans_fn, 1016 (void *) __entry->caller_ip, 1017 __entry->flags) 1018 ); 1019 1020 DEFINE_EVENT(transaction_event, trans_restart_fault_inject, 1021 TP_PROTO(struct btree_trans *trans, 1022 unsigned long caller_ip), 1023 TP_ARGS(trans, caller_ip) 1024 ); 1025 1026 DEFINE_EVENT(transaction_event, trans_traverse_all, 1027 TP_PROTO(struct btree_trans *trans, 1028 unsigned long caller_ip), 1029 TP_ARGS(trans, caller_ip) 1030 ); 1031 1032 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced, 1033 TP_PROTO(struct btree_trans *trans, 1034 unsigned long caller_ip), 1035 TP_ARGS(trans, caller_ip) 1036 ); 1037 1038 DEFINE_EVENT(trans_str, trans_restart_too_many_iters, 1039 TP_PROTO(struct btree_trans *trans, 1040 unsigned long caller_ip, 1041 const char *paths), 1042 TP_ARGS(trans, caller_ip, paths) 1043 ); 1044 1045 DECLARE_EVENT_CLASS(transaction_restart_iter, 1046 TP_PROTO(struct btree_trans *trans, 1047 unsigned long caller_ip, 1048 struct btree_path *path), 1049 TP_ARGS(trans, caller_ip, path), 1050 1051 TP_STRUCT__entry( 1052 __array(char, trans_fn, 32 ) 1053 __field(unsigned long, caller_ip ) 1054 __field(u8, btree_id ) 1055 TRACE_BPOS_entries(pos) 1056 ), 1057 1058 TP_fast_assign( 1059 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1060 __entry->caller_ip = caller_ip; 1061 __entry->btree_id = path->btree_id; 1062 TRACE_BPOS_assign(pos, path->pos) 1063 ), 1064 1065 TP_printk("%s %pS btree %s pos %llu:%llu:%u", 1066 __entry->trans_fn, 1067 (void *) __entry->caller_ip, 1068 bch2_btree_id_str(__entry->btree_id), 1069 __entry->pos_inode, 1070 __entry->pos_offset, 1071 __entry->pos_snapshot) 1072 ); 1073 1074 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused, 1075 TP_PROTO(struct btree_trans *trans, 1076 unsigned long caller_ip, 1077 struct btree_path *path), 1078 TP_ARGS(trans, caller_ip, path) 1079 ); 1080 1081 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split, 1082 TP_PROTO(struct btree_trans *trans, 1083 unsigned long caller_ip, 1084 struct btree_path *path), 1085 TP_ARGS(trans, caller_ip, path) 1086 ); 1087 1088 TRACE_EVENT(trans_restart_upgrade, 1089 TP_PROTO(struct btree_trans *trans, 1090 unsigned long caller_ip, 1091 struct btree_path *path, 1092 unsigned old_locks_want, 1093 unsigned new_locks_want, 1094 struct get_locks_fail *f), 1095 TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f), 1096 1097 TP_STRUCT__entry( 1098 __array(char, trans_fn, 32 ) 1099 __field(unsigned long, caller_ip ) 1100 __field(u8, btree_id ) 1101 __field(u8, old_locks_want ) 1102 __field(u8, new_locks_want ) 1103 __field(u8, level ) 1104 __field(u32, path_seq ) 1105 __field(u32, node_seq ) 1106 TRACE_BPOS_entries(pos) 1107 ), 1108 1109 TP_fast_assign( 1110 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1111 __entry->caller_ip = caller_ip; 1112 __entry->btree_id = path->btree_id; 1113 __entry->old_locks_want = old_locks_want; 1114 __entry->new_locks_want = new_locks_want; 1115 __entry->level = f->l; 1116 __entry->path_seq = path->l[f->l].lock_seq; 1117 __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq; 1118 TRACE_BPOS_assign(pos, path->pos) 1119 ), 1120 1121 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u", 1122 __entry->trans_fn, 1123 (void *) __entry->caller_ip, 1124 bch2_btree_id_str(__entry->btree_id), 1125 __entry->pos_inode, 1126 __entry->pos_offset, 1127 __entry->pos_snapshot, 1128 __entry->old_locks_want, 1129 __entry->new_locks_want, 1130 __entry->level, 1131 __entry->path_seq, 1132 __entry->node_seq) 1133 ); 1134 1135 DEFINE_EVENT(trans_str, trans_restart_relock, 1136 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str), 1137 TP_ARGS(trans, caller_ip, str) 1138 ); 1139 1140 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node, 1141 TP_PROTO(struct btree_trans *trans, 1142 unsigned long caller_ip, 1143 struct btree_path *path), 1144 TP_ARGS(trans, caller_ip, path) 1145 ); 1146 1147 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill, 1148 TP_PROTO(struct btree_trans *trans, 1149 unsigned long caller_ip, 1150 struct btree_path *path), 1151 TP_ARGS(trans, caller_ip, path) 1152 ); 1153 1154 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill, 1155 TP_PROTO(struct btree_trans *trans, 1156 unsigned long caller_ip, 1157 struct btree_path *path), 1158 TP_ARGS(trans, caller_ip, path) 1159 ); 1160 1161 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade, 1162 TP_PROTO(struct btree_trans *trans, 1163 unsigned long caller_ip), 1164 TP_ARGS(trans, caller_ip) 1165 ); 1166 1167 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill, 1168 TP_PROTO(struct btree_trans *trans, 1169 unsigned long caller_ip, 1170 struct btree_path *path), 1171 TP_ARGS(trans, caller_ip, path) 1172 ); 1173 1174 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path, 1175 TP_PROTO(struct btree_trans *trans, 1176 unsigned long caller_ip, 1177 struct btree_path *path), 1178 TP_ARGS(trans, caller_ip, path) 1179 ); 1180 1181 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent, 1182 TP_PROTO(struct btree_trans *trans, 1183 unsigned long caller_ip, 1184 struct btree_path *path), 1185 TP_ARGS(trans, caller_ip, path) 1186 ); 1187 1188 DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse, 1189 TP_PROTO(struct btree_trans *trans, 1190 unsigned long caller_ip, 1191 struct btree_path *path), 1192 TP_ARGS(trans, caller_ip, path) 1193 ); 1194 1195 DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure, 1196 TP_PROTO(struct btree_trans *trans, 1197 unsigned long caller_ip, 1198 struct btree_path *path), 1199 TP_ARGS(trans, caller_ip, path) 1200 ); 1201 1202 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock, 1203 TP_PROTO(struct btree_trans *trans, 1204 const char *cycle), 1205 TP_ARGS(trans, cycle) 1206 ); 1207 1208 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit, 1209 TP_PROTO(struct btree_trans *trans, 1210 unsigned long caller_ip), 1211 TP_ARGS(trans, caller_ip) 1212 ); 1213 1214 TRACE_EVENT(trans_restart_would_deadlock_write, 1215 TP_PROTO(struct btree_trans *trans), 1216 TP_ARGS(trans), 1217 1218 TP_STRUCT__entry( 1219 __array(char, trans_fn, 32 ) 1220 ), 1221 1222 TP_fast_assign( 1223 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1224 ), 1225 1226 TP_printk("%s", __entry->trans_fn) 1227 ); 1228 1229 TRACE_EVENT(trans_restart_mem_realloced, 1230 TP_PROTO(struct btree_trans *trans, 1231 unsigned long caller_ip, 1232 unsigned long bytes), 1233 TP_ARGS(trans, caller_ip, bytes), 1234 1235 TP_STRUCT__entry( 1236 __array(char, trans_fn, 32 ) 1237 __field(unsigned long, caller_ip ) 1238 __field(unsigned long, bytes ) 1239 ), 1240 1241 TP_fast_assign( 1242 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1243 __entry->caller_ip = caller_ip; 1244 __entry->bytes = bytes; 1245 ), 1246 1247 TP_printk("%s %pS bytes %lu", 1248 __entry->trans_fn, 1249 (void *) __entry->caller_ip, 1250 __entry->bytes) 1251 ); 1252 1253 TRACE_EVENT(trans_restart_key_cache_key_realloced, 1254 TP_PROTO(struct btree_trans *trans, 1255 unsigned long caller_ip, 1256 struct btree_path *path, 1257 unsigned old_u64s, 1258 unsigned new_u64s), 1259 TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s), 1260 1261 TP_STRUCT__entry( 1262 __array(char, trans_fn, 32 ) 1263 __field(unsigned long, caller_ip ) 1264 __field(enum btree_id, btree_id ) 1265 TRACE_BPOS_entries(pos) 1266 __field(u32, old_u64s ) 1267 __field(u32, new_u64s ) 1268 ), 1269 1270 TP_fast_assign( 1271 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1272 __entry->caller_ip = caller_ip; 1273 1274 __entry->btree_id = path->btree_id; 1275 TRACE_BPOS_assign(pos, path->pos); 1276 __entry->old_u64s = old_u64s; 1277 __entry->new_u64s = new_u64s; 1278 ), 1279 1280 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u", 1281 __entry->trans_fn, 1282 (void *) __entry->caller_ip, 1283 bch2_btree_id_str(__entry->btree_id), 1284 __entry->pos_inode, 1285 __entry->pos_offset, 1286 __entry->pos_snapshot, 1287 __entry->old_u64s, 1288 __entry->new_u64s) 1289 ); 1290 1291 TRACE_EVENT(path_downgrade, 1292 TP_PROTO(struct btree_trans *trans, 1293 unsigned long caller_ip, 1294 struct btree_path *path, 1295 unsigned old_locks_want), 1296 TP_ARGS(trans, caller_ip, path, old_locks_want), 1297 1298 TP_STRUCT__entry( 1299 __array(char, trans_fn, 32 ) 1300 __field(unsigned long, caller_ip ) 1301 __field(unsigned, old_locks_want ) 1302 __field(unsigned, new_locks_want ) 1303 __field(unsigned, btree ) 1304 TRACE_BPOS_entries(pos) 1305 ), 1306 1307 TP_fast_assign( 1308 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1309 __entry->caller_ip = caller_ip; 1310 __entry->old_locks_want = old_locks_want; 1311 __entry->new_locks_want = path->locks_want; 1312 __entry->btree = path->btree_id; 1313 TRACE_BPOS_assign(pos, path->pos); 1314 ), 1315 1316 TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u", 1317 __entry->trans_fn, 1318 (void *) __entry->caller_ip, 1319 __entry->old_locks_want, 1320 __entry->new_locks_want, 1321 bch2_btree_id_str(__entry->btree), 1322 __entry->pos_inode, 1323 __entry->pos_offset, 1324 __entry->pos_snapshot) 1325 ); 1326 1327 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush, 1328 TP_PROTO(struct btree_trans *trans, 1329 unsigned long caller_ip), 1330 TP_ARGS(trans, caller_ip) 1331 ); 1332 1333 TRACE_EVENT(write_buffer_flush, 1334 TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size), 1335 TP_ARGS(trans, nr, skipped, fast, size), 1336 1337 TP_STRUCT__entry( 1338 __field(size_t, nr ) 1339 __field(size_t, skipped ) 1340 __field(size_t, fast ) 1341 __field(size_t, size ) 1342 ), 1343 1344 TP_fast_assign( 1345 __entry->nr = nr; 1346 __entry->skipped = skipped; 1347 __entry->fast = fast; 1348 __entry->size = size; 1349 ), 1350 1351 TP_printk("%zu/%zu skipped %zu fast %zu", 1352 __entry->nr, __entry->size, __entry->skipped, __entry->fast) 1353 ); 1354 1355 TRACE_EVENT(write_buffer_flush_sync, 1356 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip), 1357 TP_ARGS(trans, caller_ip), 1358 1359 TP_STRUCT__entry( 1360 __array(char, trans_fn, 32 ) 1361 __field(unsigned long, caller_ip ) 1362 ), 1363 1364 TP_fast_assign( 1365 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); 1366 __entry->caller_ip = caller_ip; 1367 ), 1368 1369 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip) 1370 ); 1371 1372 TRACE_EVENT(write_buffer_flush_slowpath, 1373 TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total), 1374 TP_ARGS(trans, slowpath, total), 1375 1376 TP_STRUCT__entry( 1377 __field(size_t, slowpath ) 1378 __field(size_t, total ) 1379 ), 1380 1381 TP_fast_assign( 1382 __entry->slowpath = slowpath; 1383 __entry->total = total; 1384 ), 1385 1386 TP_printk("%zu/%zu", __entry->slowpath, __entry->total) 1387 ); 1388 1389 DEFINE_EVENT(fs_str, rebalance_extent, 1390 TP_PROTO(struct bch_fs *c, const char *str), 1391 TP_ARGS(c, str) 1392 ); 1393 1394 DEFINE_EVENT(fs_str, data_update, 1395 TP_PROTO(struct bch_fs *c, const char *str), 1396 TP_ARGS(c, str) 1397 ); 1398 1399 TRACE_EVENT(error_downcast, 1400 TP_PROTO(int bch_err, int std_err, unsigned long ip), 1401 TP_ARGS(bch_err, std_err, ip), 1402 1403 TP_STRUCT__entry( 1404 __array(char, bch_err, 32 ) 1405 __array(char, std_err, 32 ) 1406 __array(char, ip, 32 ) 1407 ), 1408 1409 TP_fast_assign( 1410 strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err)); 1411 strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err)); 1412 snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip); 1413 ), 1414 1415 TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip) 1416 ); 1417 1418 #endif /* _TRACE_BCACHEFS_H */ 1419 1420 /* This part must be outside protection */ 1421 #undef TRACE_INCLUDE_PATH 1422 #define TRACE_INCLUDE_PATH ../../fs/bcachefs 1423 1424 #undef TRACE_INCLUDE_FILE 1425 #define TRACE_INCLUDE_FILE trace 1426 1427 #include <trace/define_trace.h> 1428