1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6
7 #include <linux/tracepoint.h>
8
9 #define TRACE_BPOS_entries(name) \
10 __field(u64, name##_inode ) \
11 __field(u64, name##_offset ) \
12 __field(u32, name##_snapshot )
13
14 #define TRACE_BPOS_assign(dst, src) \
15 __entry->dst##_inode = (src).inode; \
16 __entry->dst##_offset = (src).offset; \
17 __entry->dst##_snapshot = (src).snapshot
18
19 DECLARE_EVENT_CLASS(bpos,
20 TP_PROTO(const struct bpos *p),
21 TP_ARGS(p),
22
23 TP_STRUCT__entry(
24 TRACE_BPOS_entries(p)
25 ),
26
27 TP_fast_assign(
28 TRACE_BPOS_assign(p, *p);
29 ),
30
31 TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
32 );
33
34 DECLARE_EVENT_CLASS(fs_str,
35 TP_PROTO(struct bch_fs *c, const char *str),
36 TP_ARGS(c, str),
37
38 TP_STRUCT__entry(
39 __field(dev_t, dev )
40 __string(str, str )
41 ),
42
43 TP_fast_assign(
44 __entry->dev = c->dev;
45 __assign_str(str);
46 ),
47
48 TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
49 );
50
51 DECLARE_EVENT_CLASS(trans_str,
52 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
53 TP_ARGS(trans, caller_ip, str),
54
55 TP_STRUCT__entry(
56 __field(dev_t, dev )
57 __array(char, trans_fn, 32 )
58 __field(unsigned long, caller_ip )
59 __string(str, str )
60 ),
61
62 TP_fast_assign(
63 __entry->dev = trans->c->dev;
64 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
65 __entry->caller_ip = caller_ip;
66 __assign_str(str);
67 ),
68
69 TP_printk("%d,%d %s %pS %s",
70 MAJOR(__entry->dev), MINOR(__entry->dev),
71 __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
72 );
73
74 DECLARE_EVENT_CLASS(trans_str_nocaller,
75 TP_PROTO(struct btree_trans *trans, const char *str),
76 TP_ARGS(trans, str),
77
78 TP_STRUCT__entry(
79 __field(dev_t, dev )
80 __array(char, trans_fn, 32 )
81 __string(str, str )
82 ),
83
84 TP_fast_assign(
85 __entry->dev = trans->c->dev;
86 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
87 __assign_str(str);
88 ),
89
90 TP_printk("%d,%d %s %s",
91 MAJOR(__entry->dev), MINOR(__entry->dev),
92 __entry->trans_fn, __get_str(str))
93 );
94
95 DECLARE_EVENT_CLASS(btree_node_nofs,
96 TP_PROTO(struct bch_fs *c, struct btree *b),
97 TP_ARGS(c, b),
98
99 TP_STRUCT__entry(
100 __field(dev_t, dev )
101 __field(u8, level )
102 __field(u8, btree_id )
103 TRACE_BPOS_entries(pos)
104 ),
105
106 TP_fast_assign(
107 __entry->dev = c->dev;
108 __entry->level = b->c.level;
109 __entry->btree_id = b->c.btree_id;
110 TRACE_BPOS_assign(pos, b->key.k.p);
111 ),
112
113 TP_printk("%d,%d %u %s %llu:%llu:%u",
114 MAJOR(__entry->dev), MINOR(__entry->dev),
115 __entry->level,
116 bch2_btree_id_str(__entry->btree_id),
117 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
118 );
119
120 DECLARE_EVENT_CLASS(btree_node,
121 TP_PROTO(struct btree_trans *trans, struct btree *b),
122 TP_ARGS(trans, b),
123
124 TP_STRUCT__entry(
125 __field(dev_t, dev )
126 __array(char, trans_fn, 32 )
127 __field(u8, level )
128 __field(u8, btree_id )
129 TRACE_BPOS_entries(pos)
130 ),
131
132 TP_fast_assign(
133 __entry->dev = trans->c->dev;
134 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
135 __entry->level = b->c.level;
136 __entry->btree_id = b->c.btree_id;
137 TRACE_BPOS_assign(pos, b->key.k.p);
138 ),
139
140 TP_printk("%d,%d %s %u %s %llu:%llu:%u",
141 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
142 __entry->level,
143 bch2_btree_id_str(__entry->btree_id),
144 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
145 );
146
147 DECLARE_EVENT_CLASS(bch_fs,
148 TP_PROTO(struct bch_fs *c),
149 TP_ARGS(c),
150
151 TP_STRUCT__entry(
152 __field(dev_t, dev )
153 ),
154
155 TP_fast_assign(
156 __entry->dev = c->dev;
157 ),
158
159 TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
160 );
161
162 DECLARE_EVENT_CLASS(btree_trans,
163 TP_PROTO(struct btree_trans *trans),
164 TP_ARGS(trans),
165
166 TP_STRUCT__entry(
167 __field(dev_t, dev )
168 __array(char, trans_fn, 32 )
169 ),
170
171 TP_fast_assign(
172 __entry->dev = trans->c->dev;
173 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
174 ),
175
176 TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
177 );
178
179 DECLARE_EVENT_CLASS(bio,
180 TP_PROTO(struct bio *bio),
181 TP_ARGS(bio),
182
183 TP_STRUCT__entry(
184 __field(dev_t, dev )
185 __field(sector_t, sector )
186 __field(unsigned int, nr_sector )
187 __array(char, rwbs, 6 )
188 ),
189
190 TP_fast_assign(
191 __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0;
192 __entry->sector = bio->bi_iter.bi_sector;
193 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
194 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
195 ),
196
197 TP_printk("%d,%d %s %llu + %u",
198 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
199 (unsigned long long)__entry->sector, __entry->nr_sector)
200 );
201
202 /* disk_accounting.c */
203
204 TRACE_EVENT(accounting_mem_insert,
205 TP_PROTO(struct bch_fs *c, const char *acc),
206 TP_ARGS(c, acc),
207
208 TP_STRUCT__entry(
209 __field(dev_t, dev )
210 __field(unsigned, new_nr )
211 __string(acc, acc )
212 ),
213
214 TP_fast_assign(
215 __entry->dev = c->dev;
216 __entry->new_nr = c->accounting.k.nr;
217 __assign_str(acc);
218 ),
219
220 TP_printk("%d,%d entries %u added %s",
221 MAJOR(__entry->dev), MINOR(__entry->dev),
222 __entry->new_nr,
223 __get_str(acc))
224 );
225
226 /* fs.c: */
227 TRACE_EVENT(bch2_sync_fs,
228 TP_PROTO(struct super_block *sb, int wait),
229
230 TP_ARGS(sb, wait),
231
232 TP_STRUCT__entry(
233 __field( dev_t, dev )
234 __field( int, wait )
235
236 ),
237
238 TP_fast_assign(
239 __entry->dev = sb->s_dev;
240 __entry->wait = wait;
241 ),
242
243 TP_printk("dev %d,%d wait %d",
244 MAJOR(__entry->dev), MINOR(__entry->dev),
245 __entry->wait)
246 );
247
248 /* fs-io.c: */
249 TRACE_EVENT(bch2_fsync,
250 TP_PROTO(struct file *file, int datasync),
251
252 TP_ARGS(file, datasync),
253
254 TP_STRUCT__entry(
255 __field( dev_t, dev )
256 __field( ino_t, ino )
257 __field( ino_t, parent )
258 __field( int, datasync )
259 ),
260
261 TP_fast_assign(
262 struct dentry *dentry = file->f_path.dentry;
263
264 __entry->dev = dentry->d_sb->s_dev;
265 __entry->ino = d_inode(dentry)->i_ino;
266 __entry->parent = d_inode(dentry->d_parent)->i_ino;
267 __entry->datasync = datasync;
268 ),
269
270 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
271 MAJOR(__entry->dev), MINOR(__entry->dev),
272 (unsigned long) __entry->ino,
273 (unsigned long) __entry->parent, __entry->datasync)
274 );
275
276 /* super-io.c: */
277 TRACE_EVENT(write_super,
278 TP_PROTO(struct bch_fs *c, unsigned long ip),
279 TP_ARGS(c, ip),
280
281 TP_STRUCT__entry(
282 __field(dev_t, dev )
283 __field(unsigned long, ip )
284 ),
285
286 TP_fast_assign(
287 __entry->dev = c->dev;
288 __entry->ip = ip;
289 ),
290
291 TP_printk("%d,%d for %pS",
292 MAJOR(__entry->dev), MINOR(__entry->dev),
293 (void *) __entry->ip)
294 );
295
296 /* io.c: */
297
298 DEFINE_EVENT(bio, read_promote,
299 TP_PROTO(struct bio *bio),
300 TP_ARGS(bio)
301 );
302
303 TRACE_EVENT(read_nopromote,
304 TP_PROTO(struct bch_fs *c, int ret),
305 TP_ARGS(c, ret),
306
307 TP_STRUCT__entry(
308 __field(dev_t, dev )
309 __array(char, ret, 32 )
310 ),
311
312 TP_fast_assign(
313 __entry->dev = c->dev;
314 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
315 ),
316
317 TP_printk("%d,%d ret %s",
318 MAJOR(__entry->dev), MINOR(__entry->dev),
319 __entry->ret)
320 );
321
322 DEFINE_EVENT(bio, read_bounce,
323 TP_PROTO(struct bio *bio),
324 TP_ARGS(bio)
325 );
326
327 DEFINE_EVENT(bio, read_split,
328 TP_PROTO(struct bio *bio),
329 TP_ARGS(bio)
330 );
331
332 DEFINE_EVENT(bio, read_retry,
333 TP_PROTO(struct bio *bio),
334 TP_ARGS(bio)
335 );
336
337 DEFINE_EVENT(bio, read_reuse_race,
338 TP_PROTO(struct bio *bio),
339 TP_ARGS(bio)
340 );
341
342 /* Journal */
343
344 DEFINE_EVENT(bch_fs, journal_full,
345 TP_PROTO(struct bch_fs *c),
346 TP_ARGS(c)
347 );
348
349 DEFINE_EVENT(fs_str, journal_entry_full,
350 TP_PROTO(struct bch_fs *c, const char *str),
351 TP_ARGS(c, str)
352 );
353
354 DEFINE_EVENT(fs_str, journal_entry_close,
355 TP_PROTO(struct bch_fs *c, const char *str),
356 TP_ARGS(c, str)
357 );
358
359 DEFINE_EVENT(bio, journal_write,
360 TP_PROTO(struct bio *bio),
361 TP_ARGS(bio)
362 );
363
364 TRACE_EVENT(journal_reclaim_start,
365 TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
366 u64 min_nr, u64 min_key_cache,
367 u64 btree_cache_dirty, u64 btree_cache_total,
368 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
369 TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
370 btree_cache_dirty, btree_cache_total,
371 btree_key_cache_dirty, btree_key_cache_total),
372
373 TP_STRUCT__entry(
374 __field(dev_t, dev )
375 __field(bool, direct )
376 __field(bool, kicked )
377 __field(u64, min_nr )
378 __field(u64, min_key_cache )
379 __field(u64, btree_cache_dirty )
380 __field(u64, btree_cache_total )
381 __field(u64, btree_key_cache_dirty )
382 __field(u64, btree_key_cache_total )
383 ),
384
385 TP_fast_assign(
386 __entry->dev = c->dev;
387 __entry->direct = direct;
388 __entry->kicked = kicked;
389 __entry->min_nr = min_nr;
390 __entry->min_key_cache = min_key_cache;
391 __entry->btree_cache_dirty = btree_cache_dirty;
392 __entry->btree_cache_total = btree_cache_total;
393 __entry->btree_key_cache_dirty = btree_key_cache_dirty;
394 __entry->btree_key_cache_total = btree_key_cache_total;
395 ),
396
397 TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
398 MAJOR(__entry->dev), MINOR(__entry->dev),
399 __entry->direct,
400 __entry->kicked,
401 __entry->min_nr,
402 __entry->min_key_cache,
403 __entry->btree_cache_dirty,
404 __entry->btree_cache_total,
405 __entry->btree_key_cache_dirty,
406 __entry->btree_key_cache_total)
407 );
408
409 TRACE_EVENT(journal_reclaim_finish,
410 TP_PROTO(struct bch_fs *c, u64 nr_flushed),
411 TP_ARGS(c, nr_flushed),
412
413 TP_STRUCT__entry(
414 __field(dev_t, dev )
415 __field(u64, nr_flushed )
416 ),
417
418 TP_fast_assign(
419 __entry->dev = c->dev;
420 __entry->nr_flushed = nr_flushed;
421 ),
422
423 TP_printk("%d,%d flushed %llu",
424 MAJOR(__entry->dev), MINOR(__entry->dev),
425 __entry->nr_flushed)
426 );
427
428 /* bset.c: */
429
430 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
431 TP_PROTO(const struct bpos *p),
432 TP_ARGS(p)
433 );
434
435 /* Btree cache: */
436
437 TRACE_EVENT(btree_cache_scan,
438 TP_PROTO(long nr_to_scan, long can_free, long ret),
439 TP_ARGS(nr_to_scan, can_free, ret),
440
441 TP_STRUCT__entry(
442 __field(long, nr_to_scan )
443 __field(long, can_free )
444 __field(long, ret )
445 ),
446
447 TP_fast_assign(
448 __entry->nr_to_scan = nr_to_scan;
449 __entry->can_free = can_free;
450 __entry->ret = ret;
451 ),
452
453 TP_printk("scanned for %li nodes, can free %li, ret %li",
454 __entry->nr_to_scan, __entry->can_free, __entry->ret)
455 );
456
457 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
458 TP_PROTO(struct bch_fs *c, struct btree *b),
459 TP_ARGS(c, b)
460 );
461
462 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
463 TP_PROTO(struct btree_trans *trans),
464 TP_ARGS(trans)
465 );
466
467 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
468 TP_PROTO(struct btree_trans *trans),
469 TP_ARGS(trans)
470 );
471
472 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
473 TP_PROTO(struct btree_trans *trans),
474 TP_ARGS(trans)
475 );
476
477 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
478 TP_PROTO(struct btree_trans *trans),
479 TP_ARGS(trans)
480 );
481
482 /* Btree */
483
484 DEFINE_EVENT(btree_node, btree_node_read,
485 TP_PROTO(struct btree_trans *trans, struct btree *b),
486 TP_ARGS(trans, b)
487 );
488
489 TRACE_EVENT(btree_node_write,
490 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
491 TP_ARGS(b, bytes, sectors),
492
493 TP_STRUCT__entry(
494 __field(enum btree_node_type, type)
495 __field(unsigned, bytes )
496 __field(unsigned, sectors )
497 ),
498
499 TP_fast_assign(
500 __entry->type = btree_node_type(b);
501 __entry->bytes = bytes;
502 __entry->sectors = sectors;
503 ),
504
505 TP_printk("bkey type %u bytes %u sectors %u",
506 __entry->type , __entry->bytes, __entry->sectors)
507 );
508
509 DEFINE_EVENT(btree_node, btree_node_alloc,
510 TP_PROTO(struct btree_trans *trans, struct btree *b),
511 TP_ARGS(trans, b)
512 );
513
514 DEFINE_EVENT(btree_node, btree_node_free,
515 TP_PROTO(struct btree_trans *trans, struct btree *b),
516 TP_ARGS(trans, b)
517 );
518
519 TRACE_EVENT(btree_reserve_get_fail,
520 TP_PROTO(const char *trans_fn,
521 unsigned long caller_ip,
522 size_t required,
523 int ret),
524 TP_ARGS(trans_fn, caller_ip, required, ret),
525
526 TP_STRUCT__entry(
527 __array(char, trans_fn, 32 )
528 __field(unsigned long, caller_ip )
529 __field(size_t, required )
530 __array(char, ret, 32 )
531 ),
532
533 TP_fast_assign(
534 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
535 __entry->caller_ip = caller_ip;
536 __entry->required = required;
537 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
538 ),
539
540 TP_printk("%s %pS required %zu ret %s",
541 __entry->trans_fn,
542 (void *) __entry->caller_ip,
543 __entry->required,
544 __entry->ret)
545 );
546
547 DEFINE_EVENT(btree_node, btree_node_compact,
548 TP_PROTO(struct btree_trans *trans, struct btree *b),
549 TP_ARGS(trans, b)
550 );
551
552 DEFINE_EVENT(btree_node, btree_node_merge,
553 TP_PROTO(struct btree_trans *trans, struct btree *b),
554 TP_ARGS(trans, b)
555 );
556
557 DEFINE_EVENT(btree_node, btree_node_split,
558 TP_PROTO(struct btree_trans *trans, struct btree *b),
559 TP_ARGS(trans, b)
560 );
561
562 DEFINE_EVENT(btree_node, btree_node_rewrite,
563 TP_PROTO(struct btree_trans *trans, struct btree *b),
564 TP_ARGS(trans, b)
565 );
566
567 DEFINE_EVENT(btree_node, btree_node_set_root,
568 TP_PROTO(struct btree_trans *trans, struct btree *b),
569 TP_ARGS(trans, b)
570 );
571
572 TRACE_EVENT(btree_path_relock_fail,
573 TP_PROTO(struct btree_trans *trans,
574 unsigned long caller_ip,
575 struct btree_path *path,
576 unsigned level),
577 TP_ARGS(trans, caller_ip, path, level),
578
579 TP_STRUCT__entry(
580 __array(char, trans_fn, 32 )
581 __field(unsigned long, caller_ip )
582 __field(u8, btree_id )
583 __field(u8, level )
584 __field(u8, path_idx)
585 TRACE_BPOS_entries(pos)
586 __array(char, node, 24 )
587 __field(u8, self_read_count )
588 __field(u8, self_intent_count)
589 __field(u8, read_count )
590 __field(u8, intent_count )
591 __field(u32, iter_lock_seq )
592 __field(u32, node_lock_seq )
593 ),
594
595 TP_fast_assign(
596 struct btree *b = btree_path_node(path, level);
597 struct six_lock_count c;
598
599 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
600 __entry->caller_ip = caller_ip;
601 __entry->btree_id = path->btree_id;
602 __entry->level = level;
603 __entry->path_idx = path - trans->paths;
604 TRACE_BPOS_assign(pos, path->pos);
605
606 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
607 __entry->self_read_count = c.n[SIX_LOCK_read];
608 __entry->self_intent_count = c.n[SIX_LOCK_intent];
609
610 if (IS_ERR(b)) {
611 strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
612 } else {
613 c = six_lock_counts(&path->l[level].b->c.lock);
614 __entry->read_count = c.n[SIX_LOCK_read];
615 __entry->intent_count = c.n[SIX_LOCK_intent];
616 scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
617 }
618 __entry->iter_lock_seq = path->l[level].lock_seq;
619 __entry->node_lock_seq = is_btree_node(path, level)
620 ? six_lock_seq(&path->l[level].b->c.lock)
621 : 0;
622 ),
623
624 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
625 __entry->trans_fn,
626 (void *) __entry->caller_ip,
627 __entry->path_idx,
628 bch2_btree_id_str(__entry->btree_id),
629 __entry->pos_inode,
630 __entry->pos_offset,
631 __entry->pos_snapshot,
632 __entry->level,
633 __entry->node,
634 __entry->self_read_count,
635 __entry->self_intent_count,
636 __entry->read_count,
637 __entry->intent_count,
638 __entry->iter_lock_seq,
639 __entry->node_lock_seq)
640 );
641
642 TRACE_EVENT(btree_path_upgrade_fail,
643 TP_PROTO(struct btree_trans *trans,
644 unsigned long caller_ip,
645 struct btree_path *path,
646 unsigned level),
647 TP_ARGS(trans, caller_ip, path, level),
648
649 TP_STRUCT__entry(
650 __array(char, trans_fn, 32 )
651 __field(unsigned long, caller_ip )
652 __field(u8, btree_id )
653 __field(u8, level )
654 __field(u8, path_idx)
655 TRACE_BPOS_entries(pos)
656 __field(u8, locked )
657 __field(u8, self_read_count )
658 __field(u8, self_intent_count)
659 __field(u8, read_count )
660 __field(u8, intent_count )
661 __field(u32, iter_lock_seq )
662 __field(u32, node_lock_seq )
663 ),
664
665 TP_fast_assign(
666 struct six_lock_count c;
667
668 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
669 __entry->caller_ip = caller_ip;
670 __entry->btree_id = path->btree_id;
671 __entry->level = level;
672 __entry->path_idx = path - trans->paths;
673 TRACE_BPOS_assign(pos, path->pos);
674 __entry->locked = btree_node_locked(path, level);
675
676 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
677 __entry->self_read_count = c.n[SIX_LOCK_read];
678 __entry->self_intent_count = c.n[SIX_LOCK_intent];
679 c = six_lock_counts(&path->l[level].b->c.lock);
680 __entry->read_count = c.n[SIX_LOCK_read];
681 __entry->intent_count = c.n[SIX_LOCK_intent];
682 __entry->iter_lock_seq = path->l[level].lock_seq;
683 __entry->node_lock_seq = is_btree_node(path, level)
684 ? six_lock_seq(&path->l[level].b->c.lock)
685 : 0;
686 ),
687
688 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
689 __entry->trans_fn,
690 (void *) __entry->caller_ip,
691 __entry->path_idx,
692 bch2_btree_id_str(__entry->btree_id),
693 __entry->pos_inode,
694 __entry->pos_offset,
695 __entry->pos_snapshot,
696 __entry->level,
697 __entry->locked,
698 __entry->self_read_count,
699 __entry->self_intent_count,
700 __entry->read_count,
701 __entry->intent_count,
702 __entry->iter_lock_seq,
703 __entry->node_lock_seq)
704 );
705
706 /* Garbage collection */
707
708 DEFINE_EVENT(bch_fs, gc_gens_start,
709 TP_PROTO(struct bch_fs *c),
710 TP_ARGS(c)
711 );
712
713 DEFINE_EVENT(bch_fs, gc_gens_end,
714 TP_PROTO(struct bch_fs *c),
715 TP_ARGS(c)
716 );
717
718 /* Allocator */
719
720 DEFINE_EVENT(fs_str, bucket_alloc,
721 TP_PROTO(struct bch_fs *c, const char *str),
722 TP_ARGS(c, str)
723 );
724
725 DEFINE_EVENT(fs_str, bucket_alloc_fail,
726 TP_PROTO(struct bch_fs *c, const char *str),
727 TP_ARGS(c, str)
728 );
729
730 DECLARE_EVENT_CLASS(discard_buckets_class,
731 TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
732 u64 need_journal_commit, u64 discarded, const char *err),
733 TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
734
735 TP_STRUCT__entry(
736 __field(dev_t, dev )
737 __field(u64, seen )
738 __field(u64, open )
739 __field(u64, need_journal_commit )
740 __field(u64, discarded )
741 __array(char, err, 16 )
742 ),
743
744 TP_fast_assign(
745 __entry->dev = c->dev;
746 __entry->seen = seen;
747 __entry->open = open;
748 __entry->need_journal_commit = need_journal_commit;
749 __entry->discarded = discarded;
750 strscpy(__entry->err, err, sizeof(__entry->err));
751 ),
752
753 TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
754 MAJOR(__entry->dev), MINOR(__entry->dev),
755 __entry->seen,
756 __entry->open,
757 __entry->need_journal_commit,
758 __entry->discarded,
759 __entry->err)
760 );
761
762 DEFINE_EVENT(discard_buckets_class, discard_buckets,
763 TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
764 u64 need_journal_commit, u64 discarded, const char *err),
765 TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
766 );
767
768 DEFINE_EVENT(discard_buckets_class, discard_buckets_fast,
769 TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
770 u64 need_journal_commit, u64 discarded, const char *err),
771 TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
772 );
773
774 TRACE_EVENT(bucket_invalidate,
775 TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
776 TP_ARGS(c, dev, bucket, sectors),
777
778 TP_STRUCT__entry(
779 __field(dev_t, dev )
780 __field(u32, dev_idx )
781 __field(u32, sectors )
782 __field(u64, bucket )
783 ),
784
785 TP_fast_assign(
786 __entry->dev = c->dev;
787 __entry->dev_idx = dev;
788 __entry->sectors = sectors;
789 __entry->bucket = bucket;
790 ),
791
792 TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
793 MAJOR(__entry->dev), MINOR(__entry->dev),
794 __entry->dev_idx, __entry->bucket,
795 __entry->sectors)
796 );
797
798 /* Moving IO */
799
800 TRACE_EVENT(bucket_evacuate,
801 TP_PROTO(struct bch_fs *c, struct bpos *bucket),
802 TP_ARGS(c, bucket),
803
804 TP_STRUCT__entry(
805 __field(dev_t, dev )
806 __field(u32, dev_idx )
807 __field(u64, bucket )
808 ),
809
810 TP_fast_assign(
811 __entry->dev = c->dev;
812 __entry->dev_idx = bucket->inode;
813 __entry->bucket = bucket->offset;
814 ),
815
816 TP_printk("%d:%d %u:%llu",
817 MAJOR(__entry->dev), MINOR(__entry->dev),
818 __entry->dev_idx, __entry->bucket)
819 );
820
821 DEFINE_EVENT(fs_str, move_extent,
822 TP_PROTO(struct bch_fs *c, const char *str),
823 TP_ARGS(c, str)
824 );
825
826 DEFINE_EVENT(fs_str, move_extent_read,
827 TP_PROTO(struct bch_fs *c, const char *str),
828 TP_ARGS(c, str)
829 );
830
831 DEFINE_EVENT(fs_str, move_extent_write,
832 TP_PROTO(struct bch_fs *c, const char *str),
833 TP_ARGS(c, str)
834 );
835
836 DEFINE_EVENT(fs_str, move_extent_finish,
837 TP_PROTO(struct bch_fs *c, const char *str),
838 TP_ARGS(c, str)
839 );
840
841 DEFINE_EVENT(fs_str, move_extent_fail,
842 TP_PROTO(struct bch_fs *c, const char *str),
843 TP_ARGS(c, str)
844 );
845
846 DEFINE_EVENT(fs_str, move_extent_start_fail,
847 TP_PROTO(struct bch_fs *c, const char *str),
848 TP_ARGS(c, str)
849 );
850
851 TRACE_EVENT(move_data,
852 TP_PROTO(struct bch_fs *c,
853 struct bch_move_stats *stats),
854 TP_ARGS(c, stats),
855
856 TP_STRUCT__entry(
857 __field(dev_t, dev )
858 __field(u64, keys_moved )
859 __field(u64, keys_raced )
860 __field(u64, sectors_seen )
861 __field(u64, sectors_moved )
862 __field(u64, sectors_raced )
863 ),
864
865 TP_fast_assign(
866 __entry->dev = c->dev;
867 __entry->keys_moved = atomic64_read(&stats->keys_moved);
868 __entry->keys_raced = atomic64_read(&stats->keys_raced);
869 __entry->sectors_seen = atomic64_read(&stats->sectors_seen);
870 __entry->sectors_moved = atomic64_read(&stats->sectors_moved);
871 __entry->sectors_raced = atomic64_read(&stats->sectors_raced);
872 ),
873
874 TP_printk("%d,%d keys moved %llu raced %llu"
875 "sectors seen %llu moved %llu raced %llu",
876 MAJOR(__entry->dev), MINOR(__entry->dev),
877 __entry->keys_moved,
878 __entry->keys_raced,
879 __entry->sectors_seen,
880 __entry->sectors_moved,
881 __entry->sectors_raced)
882 );
883
884 TRACE_EVENT(evacuate_bucket,
885 TP_PROTO(struct bch_fs *c, struct bpos *bucket,
886 unsigned sectors, unsigned bucket_size,
887 int ret),
888 TP_ARGS(c, bucket, sectors, bucket_size, ret),
889
890 TP_STRUCT__entry(
891 __field(dev_t, dev )
892 __field(u64, member )
893 __field(u64, bucket )
894 __field(u32, sectors )
895 __field(u32, bucket_size )
896 __field(int, ret )
897 ),
898
899 TP_fast_assign(
900 __entry->dev = c->dev;
901 __entry->member = bucket->inode;
902 __entry->bucket = bucket->offset;
903 __entry->sectors = sectors;
904 __entry->bucket_size = bucket_size;
905 __entry->ret = ret;
906 ),
907
908 TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i",
909 MAJOR(__entry->dev), MINOR(__entry->dev),
910 __entry->member, __entry->bucket,
911 __entry->sectors, __entry->bucket_size,
912 __entry->ret)
913 );
914
915 TRACE_EVENT(copygc,
916 TP_PROTO(struct bch_fs *c,
917 u64 buckets,
918 u64 sectors_seen,
919 u64 sectors_moved),
920 TP_ARGS(c, buckets, sectors_seen, sectors_moved),
921
922 TP_STRUCT__entry(
923 __field(dev_t, dev )
924 __field(u64, buckets )
925 __field(u64, sectors_seen )
926 __field(u64, sectors_moved )
927 ),
928
929 TP_fast_assign(
930 __entry->dev = c->dev;
931 __entry->buckets = buckets;
932 __entry->sectors_seen = sectors_seen;
933 __entry->sectors_moved = sectors_moved;
934 ),
935
936 TP_printk("%d,%d buckets %llu sectors seen %llu moved %llu",
937 MAJOR(__entry->dev), MINOR(__entry->dev),
938 __entry->buckets,
939 __entry->sectors_seen,
940 __entry->sectors_moved)
941 );
942
943 TRACE_EVENT(copygc_wait,
944 TP_PROTO(struct bch_fs *c,
945 u64 wait_amount, u64 until),
946 TP_ARGS(c, wait_amount, until),
947
948 TP_STRUCT__entry(
949 __field(dev_t, dev )
950 __field(u64, wait_amount )
951 __field(u64, until )
952 ),
953
954 TP_fast_assign(
955 __entry->dev = c->dev;
956 __entry->wait_amount = wait_amount;
957 __entry->until = until;
958 ),
959
960 TP_printk("%d,%u waiting for %llu sectors until %llu",
961 MAJOR(__entry->dev), MINOR(__entry->dev),
962 __entry->wait_amount, __entry->until)
963 );
964
965 /* btree transactions: */
966
967 DECLARE_EVENT_CLASS(transaction_event,
968 TP_PROTO(struct btree_trans *trans,
969 unsigned long caller_ip),
970 TP_ARGS(trans, caller_ip),
971
972 TP_STRUCT__entry(
973 __array(char, trans_fn, 32 )
974 __field(unsigned long, caller_ip )
975 ),
976
977 TP_fast_assign(
978 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
979 __entry->caller_ip = caller_ip;
980 ),
981
982 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
983 );
984
985 DEFINE_EVENT(transaction_event, transaction_commit,
986 TP_PROTO(struct btree_trans *trans,
987 unsigned long caller_ip),
988 TP_ARGS(trans, caller_ip)
989 );
990
991 DEFINE_EVENT(transaction_event, trans_restart_injected,
992 TP_PROTO(struct btree_trans *trans,
993 unsigned long caller_ip),
994 TP_ARGS(trans, caller_ip)
995 );
996
997 TRACE_EVENT(trans_restart_split_race,
998 TP_PROTO(struct btree_trans *trans,
999 unsigned long caller_ip,
1000 struct btree *b),
1001 TP_ARGS(trans, caller_ip, b),
1002
1003 TP_STRUCT__entry(
1004 __array(char, trans_fn, 32 )
1005 __field(unsigned long, caller_ip )
1006 __field(u8, level )
1007 __field(u16, written )
1008 __field(u16, blocks )
1009 __field(u16, u64s_remaining )
1010 ),
1011
1012 TP_fast_assign(
1013 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1014 __entry->caller_ip = caller_ip;
1015 __entry->level = b->c.level;
1016 __entry->written = b->written;
1017 __entry->blocks = btree_blocks(trans->c);
1018 __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b);
1019 ),
1020
1021 TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
1022 __entry->trans_fn, (void *) __entry->caller_ip,
1023 __entry->level,
1024 __entry->written, __entry->blocks,
1025 __entry->u64s_remaining)
1026 );
1027
1028 TRACE_EVENT(trans_blocked_journal_reclaim,
1029 TP_PROTO(struct btree_trans *trans,
1030 unsigned long caller_ip),
1031 TP_ARGS(trans, caller_ip),
1032
1033 TP_STRUCT__entry(
1034 __array(char, trans_fn, 32 )
1035 __field(unsigned long, caller_ip )
1036
1037 __field(unsigned long, key_cache_nr_keys )
1038 __field(unsigned long, key_cache_nr_dirty )
1039 __field(long, must_wait )
1040 ),
1041
1042 TP_fast_assign(
1043 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1044 __entry->caller_ip = caller_ip;
1045 __entry->key_cache_nr_keys = atomic_long_read(&trans->c->btree_key_cache.nr_keys);
1046 __entry->key_cache_nr_dirty = atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
1047 __entry->must_wait = __bch2_btree_key_cache_must_wait(trans->c);
1048 ),
1049
1050 TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
1051 __entry->trans_fn, (void *) __entry->caller_ip,
1052 __entry->key_cache_nr_keys,
1053 __entry->key_cache_nr_dirty,
1054 __entry->must_wait)
1055 );
1056
1057 TRACE_EVENT(trans_restart_journal_preres_get,
1058 TP_PROTO(struct btree_trans *trans,
1059 unsigned long caller_ip,
1060 unsigned flags),
1061 TP_ARGS(trans, caller_ip, flags),
1062
1063 TP_STRUCT__entry(
1064 __array(char, trans_fn, 32 )
1065 __field(unsigned long, caller_ip )
1066 __field(unsigned, flags )
1067 ),
1068
1069 TP_fast_assign(
1070 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1071 __entry->caller_ip = caller_ip;
1072 __entry->flags = flags;
1073 ),
1074
1075 TP_printk("%s %pS %x", __entry->trans_fn,
1076 (void *) __entry->caller_ip,
1077 __entry->flags)
1078 );
1079
1080 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
1081 TP_PROTO(struct btree_trans *trans,
1082 unsigned long caller_ip),
1083 TP_ARGS(trans, caller_ip)
1084 );
1085
1086 DEFINE_EVENT(transaction_event, trans_traverse_all,
1087 TP_PROTO(struct btree_trans *trans,
1088 unsigned long caller_ip),
1089 TP_ARGS(trans, caller_ip)
1090 );
1091
1092 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
1093 TP_PROTO(struct btree_trans *trans,
1094 unsigned long caller_ip),
1095 TP_ARGS(trans, caller_ip)
1096 );
1097
1098 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1099 TP_PROTO(struct btree_trans *trans,
1100 unsigned long caller_ip,
1101 const char *paths),
1102 TP_ARGS(trans, caller_ip, paths)
1103 );
1104
1105 DECLARE_EVENT_CLASS(transaction_restart_iter,
1106 TP_PROTO(struct btree_trans *trans,
1107 unsigned long caller_ip,
1108 struct btree_path *path),
1109 TP_ARGS(trans, caller_ip, path),
1110
1111 TP_STRUCT__entry(
1112 __array(char, trans_fn, 32 )
1113 __field(unsigned long, caller_ip )
1114 __field(u8, btree_id )
1115 TRACE_BPOS_entries(pos)
1116 ),
1117
1118 TP_fast_assign(
1119 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1120 __entry->caller_ip = caller_ip;
1121 __entry->btree_id = path->btree_id;
1122 TRACE_BPOS_assign(pos, path->pos)
1123 ),
1124
1125 TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1126 __entry->trans_fn,
1127 (void *) __entry->caller_ip,
1128 bch2_btree_id_str(__entry->btree_id),
1129 __entry->pos_inode,
1130 __entry->pos_offset,
1131 __entry->pos_snapshot)
1132 );
1133
1134 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
1135 TP_PROTO(struct btree_trans *trans,
1136 unsigned long caller_ip,
1137 struct btree_path *path),
1138 TP_ARGS(trans, caller_ip, path)
1139 );
1140
1141 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
1142 TP_PROTO(struct btree_trans *trans,
1143 unsigned long caller_ip,
1144 struct btree_path *path),
1145 TP_ARGS(trans, caller_ip, path)
1146 );
1147
1148 TRACE_EVENT(trans_restart_upgrade,
1149 TP_PROTO(struct btree_trans *trans,
1150 unsigned long caller_ip,
1151 struct btree_path *path,
1152 unsigned old_locks_want,
1153 unsigned new_locks_want,
1154 struct get_locks_fail *f),
1155 TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1156
1157 TP_STRUCT__entry(
1158 __array(char, trans_fn, 32 )
1159 __field(unsigned long, caller_ip )
1160 __field(u8, btree_id )
1161 __field(u8, old_locks_want )
1162 __field(u8, new_locks_want )
1163 __field(u8, level )
1164 __field(u32, path_seq )
1165 __field(u32, node_seq )
1166 TRACE_BPOS_entries(pos)
1167 ),
1168
1169 TP_fast_assign(
1170 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1171 __entry->caller_ip = caller_ip;
1172 __entry->btree_id = path->btree_id;
1173 __entry->old_locks_want = old_locks_want;
1174 __entry->new_locks_want = new_locks_want;
1175 __entry->level = f->l;
1176 __entry->path_seq = path->l[f->l].lock_seq;
1177 __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1178 TRACE_BPOS_assign(pos, path->pos)
1179 ),
1180
1181 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1182 __entry->trans_fn,
1183 (void *) __entry->caller_ip,
1184 bch2_btree_id_str(__entry->btree_id),
1185 __entry->pos_inode,
1186 __entry->pos_offset,
1187 __entry->pos_snapshot,
1188 __entry->old_locks_want,
1189 __entry->new_locks_want,
1190 __entry->level,
1191 __entry->path_seq,
1192 __entry->node_seq)
1193 );
1194
1195 DEFINE_EVENT(trans_str, trans_restart_relock,
1196 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1197 TP_ARGS(trans, caller_ip, str)
1198 );
1199
1200 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
1201 TP_PROTO(struct btree_trans *trans,
1202 unsigned long caller_ip,
1203 struct btree_path *path),
1204 TP_ARGS(trans, caller_ip, path)
1205 );
1206
1207 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
1208 TP_PROTO(struct btree_trans *trans,
1209 unsigned long caller_ip,
1210 struct btree_path *path),
1211 TP_ARGS(trans, caller_ip, path)
1212 );
1213
1214 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
1215 TP_PROTO(struct btree_trans *trans,
1216 unsigned long caller_ip,
1217 struct btree_path *path),
1218 TP_ARGS(trans, caller_ip, path)
1219 );
1220
1221 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1222 TP_PROTO(struct btree_trans *trans,
1223 unsigned long caller_ip),
1224 TP_ARGS(trans, caller_ip)
1225 );
1226
1227 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
1228 TP_PROTO(struct btree_trans *trans,
1229 unsigned long caller_ip,
1230 struct btree_path *path),
1231 TP_ARGS(trans, caller_ip, path)
1232 );
1233
1234 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
1235 TP_PROTO(struct btree_trans *trans,
1236 unsigned long caller_ip,
1237 struct btree_path *path),
1238 TP_ARGS(trans, caller_ip, path)
1239 );
1240
1241 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
1242 TP_PROTO(struct btree_trans *trans,
1243 unsigned long caller_ip,
1244 struct btree_path *path),
1245 TP_ARGS(trans, caller_ip, path)
1246 );
1247
1248 DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
1249 TP_PROTO(struct btree_trans *trans,
1250 unsigned long caller_ip,
1251 struct btree_path *path),
1252 TP_ARGS(trans, caller_ip, path)
1253 );
1254
1255 DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
1256 TP_PROTO(struct btree_trans *trans,
1257 unsigned long caller_ip,
1258 struct btree_path *path),
1259 TP_ARGS(trans, caller_ip, path)
1260 );
1261
1262 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1263 TP_PROTO(struct btree_trans *trans,
1264 const char *cycle),
1265 TP_ARGS(trans, cycle)
1266 );
1267
1268 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1269 TP_PROTO(struct btree_trans *trans,
1270 unsigned long caller_ip),
1271 TP_ARGS(trans, caller_ip)
1272 );
1273
1274 TRACE_EVENT(trans_restart_would_deadlock_write,
1275 TP_PROTO(struct btree_trans *trans),
1276 TP_ARGS(trans),
1277
1278 TP_STRUCT__entry(
1279 __array(char, trans_fn, 32 )
1280 ),
1281
1282 TP_fast_assign(
1283 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1284 ),
1285
1286 TP_printk("%s", __entry->trans_fn)
1287 );
1288
1289 TRACE_EVENT(trans_restart_mem_realloced,
1290 TP_PROTO(struct btree_trans *trans,
1291 unsigned long caller_ip,
1292 unsigned long bytes),
1293 TP_ARGS(trans, caller_ip, bytes),
1294
1295 TP_STRUCT__entry(
1296 __array(char, trans_fn, 32 )
1297 __field(unsigned long, caller_ip )
1298 __field(unsigned long, bytes )
1299 ),
1300
1301 TP_fast_assign(
1302 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1303 __entry->caller_ip = caller_ip;
1304 __entry->bytes = bytes;
1305 ),
1306
1307 TP_printk("%s %pS bytes %lu",
1308 __entry->trans_fn,
1309 (void *) __entry->caller_ip,
1310 __entry->bytes)
1311 );
1312
1313 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1314 TP_PROTO(struct btree_trans *trans,
1315 unsigned long caller_ip,
1316 struct btree_path *path,
1317 unsigned old_u64s,
1318 unsigned new_u64s),
1319 TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1320
1321 TP_STRUCT__entry(
1322 __array(char, trans_fn, 32 )
1323 __field(unsigned long, caller_ip )
1324 __field(enum btree_id, btree_id )
1325 TRACE_BPOS_entries(pos)
1326 __field(u32, old_u64s )
1327 __field(u32, new_u64s )
1328 ),
1329
1330 TP_fast_assign(
1331 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1332 __entry->caller_ip = caller_ip;
1333
1334 __entry->btree_id = path->btree_id;
1335 TRACE_BPOS_assign(pos, path->pos);
1336 __entry->old_u64s = old_u64s;
1337 __entry->new_u64s = new_u64s;
1338 ),
1339
1340 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1341 __entry->trans_fn,
1342 (void *) __entry->caller_ip,
1343 bch2_btree_id_str(__entry->btree_id),
1344 __entry->pos_inode,
1345 __entry->pos_offset,
1346 __entry->pos_snapshot,
1347 __entry->old_u64s,
1348 __entry->new_u64s)
1349 );
1350
1351 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1352 TP_PROTO(struct btree_trans *trans,
1353 unsigned long caller_ip),
1354 TP_ARGS(trans, caller_ip)
1355 );
1356
1357 TRACE_EVENT(path_downgrade,
1358 TP_PROTO(struct btree_trans *trans,
1359 unsigned long caller_ip,
1360 struct btree_path *path,
1361 unsigned old_locks_want),
1362 TP_ARGS(trans, caller_ip, path, old_locks_want),
1363
1364 TP_STRUCT__entry(
1365 __array(char, trans_fn, 32 )
1366 __field(unsigned long, caller_ip )
1367 __field(unsigned, old_locks_want )
1368 __field(unsigned, new_locks_want )
1369 __field(unsigned, btree )
1370 TRACE_BPOS_entries(pos)
1371 ),
1372
1373 TP_fast_assign(
1374 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1375 __entry->caller_ip = caller_ip;
1376 __entry->old_locks_want = old_locks_want;
1377 __entry->new_locks_want = path->locks_want;
1378 __entry->btree = path->btree_id;
1379 TRACE_BPOS_assign(pos, path->pos);
1380 ),
1381
1382 TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1383 __entry->trans_fn,
1384 (void *) __entry->caller_ip,
1385 __entry->old_locks_want,
1386 __entry->new_locks_want,
1387 bch2_btree_id_str(__entry->btree),
1388 __entry->pos_inode,
1389 __entry->pos_offset,
1390 __entry->pos_snapshot)
1391 );
1392
1393 TRACE_EVENT(key_cache_fill,
1394 TP_PROTO(struct btree_trans *trans, const char *key),
1395 TP_ARGS(trans, key),
1396
1397 TP_STRUCT__entry(
1398 __array(char, trans_fn, 32 )
1399 __string(key, key )
1400 ),
1401
1402 TP_fast_assign(
1403 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1404 __assign_str(key);
1405 ),
1406
1407 TP_printk("%s %s", __entry->trans_fn, __get_str(key))
1408 );
1409
1410 TRACE_EVENT(write_buffer_flush,
1411 TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1412 TP_ARGS(trans, nr, skipped, fast, size),
1413
1414 TP_STRUCT__entry(
1415 __field(size_t, nr )
1416 __field(size_t, skipped )
1417 __field(size_t, fast )
1418 __field(size_t, size )
1419 ),
1420
1421 TP_fast_assign(
1422 __entry->nr = nr;
1423 __entry->skipped = skipped;
1424 __entry->fast = fast;
1425 __entry->size = size;
1426 ),
1427
1428 TP_printk("%zu/%zu skipped %zu fast %zu",
1429 __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1430 );
1431
1432 TRACE_EVENT(write_buffer_flush_sync,
1433 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1434 TP_ARGS(trans, caller_ip),
1435
1436 TP_STRUCT__entry(
1437 __array(char, trans_fn, 32 )
1438 __field(unsigned long, caller_ip )
1439 ),
1440
1441 TP_fast_assign(
1442 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1443 __entry->caller_ip = caller_ip;
1444 ),
1445
1446 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1447 );
1448
1449 TRACE_EVENT(write_buffer_flush_slowpath,
1450 TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1451 TP_ARGS(trans, slowpath, total),
1452
1453 TP_STRUCT__entry(
1454 __field(size_t, slowpath )
1455 __field(size_t, total )
1456 ),
1457
1458 TP_fast_assign(
1459 __entry->slowpath = slowpath;
1460 __entry->total = total;
1461 ),
1462
1463 TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1464 );
1465
1466 TRACE_EVENT(write_buffer_maybe_flush,
1467 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *key),
1468 TP_ARGS(trans, caller_ip, key),
1469
1470 TP_STRUCT__entry(
1471 __array(char, trans_fn, 32 )
1472 __field(unsigned long, caller_ip )
1473 __string(key, key )
1474 ),
1475
1476 TP_fast_assign(
1477 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1478 __assign_str(key);
1479 ),
1480
1481 TP_printk("%s %pS %s", __entry->trans_fn, (void *) __entry->caller_ip, __get_str(key))
1482 );
1483
1484 DEFINE_EVENT(fs_str, rebalance_extent,
1485 TP_PROTO(struct bch_fs *c, const char *str),
1486 TP_ARGS(c, str)
1487 );
1488
1489 DEFINE_EVENT(fs_str, data_update,
1490 TP_PROTO(struct bch_fs *c, const char *str),
1491 TP_ARGS(c, str)
1492 );
1493
1494 TRACE_EVENT(error_downcast,
1495 TP_PROTO(int bch_err, int std_err, unsigned long ip),
1496 TP_ARGS(bch_err, std_err, ip),
1497
1498 TP_STRUCT__entry(
1499 __array(char, bch_err, 32 )
1500 __array(char, std_err, 32 )
1501 __array(char, ip, 32 )
1502 ),
1503
1504 TP_fast_assign(
1505 strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
1506 strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
1507 snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
1508 ),
1509
1510 TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
1511 );
1512
1513 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
1514
1515 TRACE_EVENT(update_by_path,
1516 TP_PROTO(struct btree_trans *trans, struct btree_path *path,
1517 struct btree_insert_entry *i, bool overwrite),
1518 TP_ARGS(trans, path, i, overwrite),
1519
1520 TP_STRUCT__entry(
1521 __array(char, trans_fn, 32 )
1522 __field(btree_path_idx_t, path_idx )
1523 __field(u8, btree_id )
1524 TRACE_BPOS_entries(pos)
1525 __field(u8, overwrite )
1526 __field(btree_path_idx_t, update_idx )
1527 __field(btree_path_idx_t, nr_updates )
1528 ),
1529
1530 TP_fast_assign(
1531 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1532 __entry->path_idx = path - trans->paths;
1533 __entry->btree_id = path->btree_id;
1534 TRACE_BPOS_assign(pos, path->pos);
1535 __entry->overwrite = overwrite;
1536 __entry->update_idx = i - trans->updates;
1537 __entry->nr_updates = trans->nr_updates;
1538 ),
1539
1540 TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
1541 __entry->trans_fn,
1542 __entry->path_idx,
1543 bch2_btree_id_str(__entry->btree_id),
1544 __entry->pos_inode,
1545 __entry->pos_offset,
1546 __entry->pos_snapshot,
1547 __entry->overwrite,
1548 __entry->update_idx,
1549 __entry->nr_updates)
1550 );
1551
1552 TRACE_EVENT(btree_path_lock,
1553 TP_PROTO(struct btree_trans *trans,
1554 unsigned long caller_ip,
1555 struct btree_bkey_cached_common *b),
1556 TP_ARGS(trans, caller_ip, b),
1557
1558 TP_STRUCT__entry(
1559 __array(char, trans_fn, 32 )
1560 __field(unsigned long, caller_ip )
1561 __field(u8, btree_id )
1562 __field(u8, level )
1563 __array(char, node, 24 )
1564 __field(u32, lock_seq )
1565 ),
1566
1567 TP_fast_assign(
1568 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1569 __entry->caller_ip = caller_ip;
1570 __entry->btree_id = b->btree_id;
1571 __entry->level = b->level;
1572
1573 scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
1574 __entry->lock_seq = six_lock_seq(&b->lock);
1575 ),
1576
1577 TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
1578 __entry->trans_fn,
1579 (void *) __entry->caller_ip,
1580 bch2_btree_id_str(__entry->btree_id),
1581 __entry->level,
1582 __entry->node,
1583 __entry->lock_seq)
1584 );
1585
1586 DECLARE_EVENT_CLASS(btree_path_ev,
1587 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1588 TP_ARGS(trans, path),
1589
1590 TP_STRUCT__entry(
1591 __field(u16, idx )
1592 __field(u8, ref )
1593 __field(u8, btree_id )
1594 TRACE_BPOS_entries(pos)
1595 ),
1596
1597 TP_fast_assign(
1598 __entry->idx = path - trans->paths;
1599 __entry->ref = path->ref;
1600 __entry->btree_id = path->btree_id;
1601 TRACE_BPOS_assign(pos, path->pos);
1602 ),
1603
1604 TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
1605 __entry->idx, __entry->ref,
1606 bch2_btree_id_str(__entry->btree_id),
1607 __entry->pos_inode,
1608 __entry->pos_offset,
1609 __entry->pos_snapshot)
1610 );
1611
1612 DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
1613 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1614 TP_ARGS(trans, path)
1615 );
1616
1617 DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
1618 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1619 TP_ARGS(trans, path)
1620 );
1621
1622 DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
1623 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1624 TP_ARGS(trans, path)
1625 );
1626
1627 TRACE_EVENT(btree_path_alloc,
1628 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1629 TP_ARGS(trans, path),
1630
1631 TP_STRUCT__entry(
1632 __field(btree_path_idx_t, idx )
1633 __field(u8, locks_want )
1634 __field(u8, btree_id )
1635 TRACE_BPOS_entries(pos)
1636 ),
1637
1638 TP_fast_assign(
1639 __entry->idx = path - trans->paths;
1640 __entry->locks_want = path->locks_want;
1641 __entry->btree_id = path->btree_id;
1642 TRACE_BPOS_assign(pos, path->pos);
1643 ),
1644
1645 TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
1646 __entry->idx,
1647 bch2_btree_id_str(__entry->btree_id),
1648 __entry->locks_want,
1649 __entry->pos_inode,
1650 __entry->pos_offset,
1651 __entry->pos_snapshot)
1652 );
1653
1654 TRACE_EVENT(btree_path_get,
1655 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
1656 TP_ARGS(trans, path, new_pos),
1657
1658 TP_STRUCT__entry(
1659 __field(btree_path_idx_t, idx )
1660 __field(u8, ref )
1661 __field(u8, preserve )
1662 __field(u8, locks_want )
1663 __field(u8, btree_id )
1664 TRACE_BPOS_entries(old_pos)
1665 TRACE_BPOS_entries(new_pos)
1666 ),
1667
1668 TP_fast_assign(
1669 __entry->idx = path - trans->paths;
1670 __entry->ref = path->ref;
1671 __entry->preserve = path->preserve;
1672 __entry->locks_want = path->locks_want;
1673 __entry->btree_id = path->btree_id;
1674 TRACE_BPOS_assign(old_pos, path->pos);
1675 TRACE_BPOS_assign(new_pos, *new_pos);
1676 ),
1677
1678 TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
1679 __entry->idx,
1680 __entry->ref,
1681 __entry->preserve,
1682 bch2_btree_id_str(__entry->btree_id),
1683 __entry->locks_want,
1684 __entry->old_pos_inode,
1685 __entry->old_pos_offset,
1686 __entry->old_pos_snapshot,
1687 __entry->new_pos_inode,
1688 __entry->new_pos_offset,
1689 __entry->new_pos_snapshot)
1690 );
1691
1692 DECLARE_EVENT_CLASS(btree_path_clone,
1693 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1694 TP_ARGS(trans, path, new),
1695
1696 TP_STRUCT__entry(
1697 __field(btree_path_idx_t, idx )
1698 __field(u8, new_idx )
1699 __field(u8, btree_id )
1700 __field(u8, ref )
1701 __field(u8, preserve )
1702 TRACE_BPOS_entries(pos)
1703 ),
1704
1705 TP_fast_assign(
1706 __entry->idx = path - trans->paths;
1707 __entry->new_idx = new - trans->paths;
1708 __entry->btree_id = path->btree_id;
1709 __entry->ref = path->ref;
1710 __entry->preserve = path->preserve;
1711 TRACE_BPOS_assign(pos, path->pos);
1712 ),
1713
1714 TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
1715 __entry->idx,
1716 __entry->ref,
1717 __entry->preserve,
1718 bch2_btree_id_str(__entry->btree_id),
1719 __entry->pos_inode,
1720 __entry->pos_offset,
1721 __entry->pos_snapshot,
1722 __entry->new_idx)
1723 );
1724
1725 DEFINE_EVENT(btree_path_clone, btree_path_clone,
1726 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1727 TP_ARGS(trans, path, new)
1728 );
1729
1730 DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
1731 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1732 TP_ARGS(trans, path, new)
1733 );
1734
1735 DECLARE_EVENT_CLASS(btree_path_traverse,
1736 TP_PROTO(struct btree_trans *trans,
1737 struct btree_path *path),
1738 TP_ARGS(trans, path),
1739
1740 TP_STRUCT__entry(
1741 __array(char, trans_fn, 32 )
1742 __field(btree_path_idx_t, idx )
1743 __field(u8, ref )
1744 __field(u8, preserve )
1745 __field(u8, should_be_locked )
1746 __field(u8, btree_id )
1747 __field(u8, level )
1748 TRACE_BPOS_entries(pos)
1749 __field(u8, locks_want )
1750 __field(u8, nodes_locked )
1751 __array(char, node0, 24 )
1752 __array(char, node1, 24 )
1753 __array(char, node2, 24 )
1754 __array(char, node3, 24 )
1755 ),
1756
1757 TP_fast_assign(
1758 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1759
1760 __entry->idx = path - trans->paths;
1761 __entry->ref = path->ref;
1762 __entry->preserve = path->preserve;
1763 __entry->btree_id = path->btree_id;
1764 __entry->level = path->level;
1765 TRACE_BPOS_assign(pos, path->pos);
1766
1767 __entry->locks_want = path->locks_want;
1768 __entry->nodes_locked = path->nodes_locked;
1769 struct btree *b = path->l[0].b;
1770 if (IS_ERR(b))
1771 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1772 else
1773 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1774 b = path->l[1].b;
1775 if (IS_ERR(b))
1776 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1777 else
1778 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1779 b = path->l[2].b;
1780 if (IS_ERR(b))
1781 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1782 else
1783 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1784 b = path->l[3].b;
1785 if (IS_ERR(b))
1786 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1787 else
1788 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1789 ),
1790
1791 TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
1792 "locks %u %u %u %u node %s %s %s %s",
1793 __entry->trans_fn,
1794 __entry->idx,
1795 __entry->ref,
1796 __entry->preserve,
1797 bch2_btree_id_str(__entry->btree_id),
1798 __entry->pos_inode,
1799 __entry->pos_offset,
1800 __entry->pos_snapshot,
1801 __entry->level,
1802 __entry->locks_want,
1803 (__entry->nodes_locked >> 6) & 3,
1804 (__entry->nodes_locked >> 4) & 3,
1805 (__entry->nodes_locked >> 2) & 3,
1806 (__entry->nodes_locked >> 0) & 3,
1807 __entry->node3,
1808 __entry->node2,
1809 __entry->node1,
1810 __entry->node0)
1811 );
1812
1813 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
1814 TP_PROTO(struct btree_trans *trans,
1815 struct btree_path *path),
1816 TP_ARGS(trans, path)
1817 );
1818
1819 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
1820 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1821 TP_ARGS(trans, path)
1822 );
1823
1824 TRACE_EVENT(btree_path_set_pos,
1825 TP_PROTO(struct btree_trans *trans,
1826 struct btree_path *path,
1827 struct bpos *new_pos),
1828 TP_ARGS(trans, path, new_pos),
1829
1830 TP_STRUCT__entry(
1831 __field(btree_path_idx_t, idx )
1832 __field(u8, ref )
1833 __field(u8, preserve )
1834 __field(u8, btree_id )
1835 TRACE_BPOS_entries(old_pos)
1836 TRACE_BPOS_entries(new_pos)
1837 __field(u8, locks_want )
1838 __field(u8, nodes_locked )
1839 __array(char, node0, 24 )
1840 __array(char, node1, 24 )
1841 __array(char, node2, 24 )
1842 __array(char, node3, 24 )
1843 ),
1844
1845 TP_fast_assign(
1846 __entry->idx = path - trans->paths;
1847 __entry->ref = path->ref;
1848 __entry->preserve = path->preserve;
1849 __entry->btree_id = path->btree_id;
1850 TRACE_BPOS_assign(old_pos, path->pos);
1851 TRACE_BPOS_assign(new_pos, *new_pos);
1852
1853 __entry->nodes_locked = path->nodes_locked;
1854 struct btree *b = path->l[0].b;
1855 if (IS_ERR(b))
1856 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1857 else
1858 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1859 b = path->l[1].b;
1860 if (IS_ERR(b))
1861 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1862 else
1863 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1864 b = path->l[2].b;
1865 if (IS_ERR(b))
1866 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1867 else
1868 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1869 b = path->l[3].b;
1870 if (IS_ERR(b))
1871 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1872 else
1873 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1874 ),
1875
1876 TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
1877 "locks %u %u %u %u node %s %s %s %s",
1878 __entry->idx,
1879 __entry->ref,
1880 __entry->preserve,
1881 bch2_btree_id_str(__entry->btree_id),
1882 __entry->old_pos_inode,
1883 __entry->old_pos_offset,
1884 __entry->old_pos_snapshot,
1885 __entry->new_pos_inode,
1886 __entry->new_pos_offset,
1887 __entry->new_pos_snapshot,
1888 (__entry->nodes_locked >> 6) & 3,
1889 (__entry->nodes_locked >> 4) & 3,
1890 (__entry->nodes_locked >> 2) & 3,
1891 (__entry->nodes_locked >> 0) & 3,
1892 __entry->node3,
1893 __entry->node2,
1894 __entry->node1,
1895 __entry->node0)
1896 );
1897
1898 TRACE_EVENT(btree_path_free,
1899 TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
1900 TP_ARGS(trans, path, dup),
1901
1902 TP_STRUCT__entry(
1903 __field(btree_path_idx_t, idx )
1904 __field(u8, preserve )
1905 __field(u8, should_be_locked)
1906 __field(s8, dup )
1907 __field(u8, dup_locked )
1908 ),
1909
1910 TP_fast_assign(
1911 __entry->idx = path;
1912 __entry->preserve = trans->paths[path].preserve;
1913 __entry->should_be_locked = trans->paths[path].should_be_locked;
1914 __entry->dup = dup ? dup - trans->paths : -1;
1915 __entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
1916 ),
1917
1918 TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
1919 __entry->preserve ? 'P' : ' ',
1920 __entry->should_be_locked ? 'S' : ' ',
1921 __entry->dup,
1922 __entry->dup_locked)
1923 );
1924
1925 TRACE_EVENT(btree_path_free_trans_begin,
1926 TP_PROTO(btree_path_idx_t path),
1927 TP_ARGS(path),
1928
1929 TP_STRUCT__entry(
1930 __field(btree_path_idx_t, idx )
1931 ),
1932
1933 TP_fast_assign(
1934 __entry->idx = path;
1935 ),
1936
1937 TP_printk(" path %3u", __entry->idx)
1938 );
1939
1940 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1941 #ifndef _TRACE_BCACHEFS_H
1942
trace_update_by_path(struct btree_trans * trans,struct btree_path * path,struct btree_insert_entry * i,bool overwrite)1943 static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
1944 struct btree_insert_entry *i, bool overwrite) {}
trace_btree_path_lock(struct btree_trans * trans,unsigned long caller_ip,struct btree_bkey_cached_common * b)1945 static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
trace_btree_path_get_ll(struct btree_trans * trans,struct btree_path * path)1946 static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_put_ll(struct btree_trans * trans,struct btree_path * path)1947 static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_should_be_locked(struct btree_trans * trans,struct btree_path * path)1948 static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_alloc(struct btree_trans * trans,struct btree_path * path)1949 static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_get(struct btree_trans * trans,struct btree_path * path,struct bpos * new_pos)1950 static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
trace_btree_path_clone(struct btree_trans * trans,struct btree_path * path,struct btree_path * new)1951 static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
trace_btree_path_save_pos(struct btree_trans * trans,struct btree_path * path,struct btree_path * new)1952 static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
trace_btree_path_traverse_start(struct btree_trans * trans,struct btree_path * path)1953 static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_traverse_end(struct btree_trans * trans,struct btree_path * path)1954 static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_set_pos(struct btree_trans * trans,struct btree_path * path,struct bpos * new_pos)1955 static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
trace_btree_path_free(struct btree_trans * trans,btree_path_idx_t path,struct btree_path * dup)1956 static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
trace_btree_path_free_trans_begin(btree_path_idx_t path)1957 static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
1958
1959 #endif
1960 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1961
1962 #define _TRACE_BCACHEFS_H
1963 #endif /* _TRACE_BCACHEFS_H */
1964
1965 /* This part must be outside protection */
1966 #undef TRACE_INCLUDE_PATH
1967 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1968
1969 #undef TRACE_INCLUDE_FILE
1970 #define TRACE_INCLUDE_FILE trace
1971
1972 #include <trace/define_trace.h>
1973