xref: /linux/fs/bcachefs/trace.h (revision 6f2a71a99ebd5dfaa7948a2e9c59eae94b741bd8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4 
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 
7 #include <linux/tracepoint.h>
8 
9 #define TRACE_BPOS_entries(name)				\
10 	__field(u64,			name##_inode	)	\
11 	__field(u64,			name##_offset	)	\
12 	__field(u32,			name##_snapshot	)
13 
14 #define TRACE_BPOS_assign(dst, src)				\
15 	__entry->dst##_inode		= (src).inode;		\
16 	__entry->dst##_offset		= (src).offset;		\
17 	__entry->dst##_snapshot		= (src).snapshot
18 
19 DECLARE_EVENT_CLASS(bpos,
20 	TP_PROTO(const struct bpos *p),
21 	TP_ARGS(p),
22 
23 	TP_STRUCT__entry(
24 		TRACE_BPOS_entries(p)
25 	),
26 
27 	TP_fast_assign(
28 		TRACE_BPOS_assign(p, *p);
29 	),
30 
31 	TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
32 );
33 
34 DECLARE_EVENT_CLASS(fs_str,
35 	TP_PROTO(struct bch_fs *c, const char *str),
36 	TP_ARGS(c, str),
37 
38 	TP_STRUCT__entry(
39 		__field(dev_t,		dev			)
40 		__string(str,		str			)
41 	),
42 
43 	TP_fast_assign(
44 		__entry->dev		= c->dev;
45 		__assign_str(str);
46 	),
47 
48 	TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
49 );
50 
51 DECLARE_EVENT_CLASS(trans_str,
52 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
53 	TP_ARGS(trans, caller_ip, str),
54 
55 	TP_STRUCT__entry(
56 		__field(dev_t,		dev			)
57 		__array(char,		trans_fn, 32		)
58 		__field(unsigned long,	caller_ip		)
59 		__string(str,		str			)
60 	),
61 
62 	TP_fast_assign(
63 		__entry->dev		= trans->c->dev;
64 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
65 		__entry->caller_ip		= caller_ip;
66 		__assign_str(str);
67 	),
68 
69 	TP_printk("%d,%d %s %pS %s",
70 		  MAJOR(__entry->dev), MINOR(__entry->dev),
71 		  __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
72 );
73 
74 DECLARE_EVENT_CLASS(trans_str_nocaller,
75 	TP_PROTO(struct btree_trans *trans, const char *str),
76 	TP_ARGS(trans, str),
77 
78 	TP_STRUCT__entry(
79 		__field(dev_t,		dev			)
80 		__array(char,		trans_fn, 32		)
81 		__string(str,		str			)
82 	),
83 
84 	TP_fast_assign(
85 		__entry->dev		= trans->c->dev;
86 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
87 		__assign_str(str);
88 	),
89 
90 	TP_printk("%d,%d %s %s",
91 		  MAJOR(__entry->dev), MINOR(__entry->dev),
92 		  __entry->trans_fn, __get_str(str))
93 );
94 
95 DECLARE_EVENT_CLASS(btree_node_nofs,
96 	TP_PROTO(struct bch_fs *c, struct btree *b),
97 	TP_ARGS(c, b),
98 
99 	TP_STRUCT__entry(
100 		__field(dev_t,		dev			)
101 		__field(u8,		level			)
102 		__field(u8,		btree_id		)
103 		TRACE_BPOS_entries(pos)
104 	),
105 
106 	TP_fast_assign(
107 		__entry->dev		= c->dev;
108 		__entry->level		= b->c.level;
109 		__entry->btree_id	= b->c.btree_id;
110 		TRACE_BPOS_assign(pos, b->key.k.p);
111 	),
112 
113 	TP_printk("%d,%d %u %s %llu:%llu:%u",
114 		  MAJOR(__entry->dev), MINOR(__entry->dev),
115 		  __entry->level,
116 		  bch2_btree_id_str(__entry->btree_id),
117 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
118 );
119 
120 DECLARE_EVENT_CLASS(btree_node,
121 	TP_PROTO(struct btree_trans *trans, struct btree *b),
122 	TP_ARGS(trans, b),
123 
124 	TP_STRUCT__entry(
125 		__field(dev_t,		dev			)
126 		__array(char,		trans_fn, 32		)
127 		__field(u8,		level			)
128 		__field(u8,		btree_id		)
129 		TRACE_BPOS_entries(pos)
130 	),
131 
132 	TP_fast_assign(
133 		__entry->dev		= trans->c->dev;
134 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
135 		__entry->level		= b->c.level;
136 		__entry->btree_id	= b->c.btree_id;
137 		TRACE_BPOS_assign(pos, b->key.k.p);
138 	),
139 
140 	TP_printk("%d,%d %s %u %s %llu:%llu:%u",
141 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
142 		  __entry->level,
143 		  bch2_btree_id_str(__entry->btree_id),
144 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
145 );
146 
147 DECLARE_EVENT_CLASS(bch_fs,
148 	TP_PROTO(struct bch_fs *c),
149 	TP_ARGS(c),
150 
151 	TP_STRUCT__entry(
152 		__field(dev_t,		dev			)
153 	),
154 
155 	TP_fast_assign(
156 		__entry->dev		= c->dev;
157 	),
158 
159 	TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
160 );
161 
162 DECLARE_EVENT_CLASS(btree_trans,
163 	TP_PROTO(struct btree_trans *trans),
164 	TP_ARGS(trans),
165 
166 	TP_STRUCT__entry(
167 		__field(dev_t,		dev			)
168 		__array(char,		trans_fn, 32		)
169 	),
170 
171 	TP_fast_assign(
172 		__entry->dev		= trans->c->dev;
173 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
174 	),
175 
176 	TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
177 );
178 
179 DECLARE_EVENT_CLASS(bio,
180 	TP_PROTO(struct bio *bio),
181 	TP_ARGS(bio),
182 
183 	TP_STRUCT__entry(
184 		__field(dev_t,		dev			)
185 		__field(sector_t,	sector			)
186 		__field(unsigned int,	nr_sector		)
187 		__array(char,		rwbs,	6		)
188 	),
189 
190 	TP_fast_assign(
191 		__entry->dev		= bio->bi_bdev ? bio_dev(bio) : 0;
192 		__entry->sector		= bio->bi_iter.bi_sector;
193 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
194 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
195 	),
196 
197 	TP_printk("%d,%d  %s %llu + %u",
198 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
199 		  (unsigned long long)__entry->sector, __entry->nr_sector)
200 );
201 
202 /* errors */
203 
204 TRACE_EVENT(error_throw,
205 	TP_PROTO(struct bch_fs *c, int bch_err, unsigned long ip),
206 	TP_ARGS(c, bch_err, ip),
207 
208 	TP_STRUCT__entry(
209 		__field(dev_t,		dev			)
210 		__field(int,		err			)
211 		__array(char,		err_str, 32		)
212 		__array(char,		ip, 32			)
213 	),
214 
215 	TP_fast_assign(
216 		__entry->dev		= c->dev;
217 		__entry->err		= bch_err;
218 		strscpy(__entry->err_str, bch2_err_str(bch_err), sizeof(__entry->err_str));
219 		snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
220 	),
221 
222 	TP_printk("%d,%d %s ret %s", MAJOR(__entry->dev), MINOR(__entry->dev),
223 		  __entry->ip, __entry->err_str)
224 );
225 
226 TRACE_EVENT(error_downcast,
227 	TP_PROTO(int bch_err, int std_err, unsigned long ip),
228 	TP_ARGS(bch_err, std_err, ip),
229 
230 	TP_STRUCT__entry(
231 		__array(char,		bch_err, 32		)
232 		__array(char,		std_err, 32		)
233 		__array(char,		ip, 32			)
234 	),
235 
236 	TP_fast_assign(
237 		strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
238 		strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
239 		snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
240 	),
241 
242 	TP_printk("%s ret %s -> %s %s", __entry->ip,
243 		  __entry->bch_err, __entry->std_err, __entry->ip)
244 );
245 
246 /* disk_accounting.c */
247 
248 TRACE_EVENT(accounting_mem_insert,
249 	TP_PROTO(struct bch_fs *c, const char *acc),
250 	TP_ARGS(c, acc),
251 
252 	TP_STRUCT__entry(
253 		__field(dev_t,		dev			)
254 		__field(unsigned,	new_nr			)
255 		__string(acc,		acc			)
256 	),
257 
258 	TP_fast_assign(
259 		__entry->dev		= c->dev;
260 		__entry->new_nr		= c->accounting.k.nr;
261 		__assign_str(acc);
262 	),
263 
264 	TP_printk("%d,%d entries %u added %s",
265 		  MAJOR(__entry->dev), MINOR(__entry->dev),
266 		  __entry->new_nr,
267 		  __get_str(acc))
268 );
269 
270 /* fs.c: */
271 TRACE_EVENT(bch2_sync_fs,
272 	TP_PROTO(struct super_block *sb, int wait),
273 
274 	TP_ARGS(sb, wait),
275 
276 	TP_STRUCT__entry(
277 		__field(	dev_t,	dev			)
278 		__field(	int,	wait			)
279 
280 	),
281 
282 	TP_fast_assign(
283 		__entry->dev	= sb->s_dev;
284 		__entry->wait	= wait;
285 	),
286 
287 	TP_printk("dev %d,%d wait %d",
288 		  MAJOR(__entry->dev), MINOR(__entry->dev),
289 		  __entry->wait)
290 );
291 
292 /* fs-io.c: */
293 TRACE_EVENT(bch2_fsync,
294 	TP_PROTO(struct file *file, int datasync),
295 
296 	TP_ARGS(file, datasync),
297 
298 	TP_STRUCT__entry(
299 		__field(	dev_t,	dev			)
300 		__field(	ino_t,	ino			)
301 		__field(	ino_t,	parent			)
302 		__field(	int,	datasync		)
303 	),
304 
305 	TP_fast_assign(
306 		struct dentry *dentry = file->f_path.dentry;
307 
308 		__entry->dev		= dentry->d_sb->s_dev;
309 		__entry->ino		= d_inode(dentry)->i_ino;
310 		__entry->parent		= d_inode(dentry->d_parent)->i_ino;
311 		__entry->datasync	= datasync;
312 	),
313 
314 	TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
315 		  MAJOR(__entry->dev), MINOR(__entry->dev),
316 		  (unsigned long) __entry->ino,
317 		  (unsigned long) __entry->parent, __entry->datasync)
318 );
319 
320 /* super-io.c: */
321 TRACE_EVENT(write_super,
322 	TP_PROTO(struct bch_fs *c, unsigned long ip),
323 	TP_ARGS(c, ip),
324 
325 	TP_STRUCT__entry(
326 		__field(dev_t,		dev	)
327 		__field(unsigned long,	ip	)
328 	),
329 
330 	TP_fast_assign(
331 		__entry->dev		= c->dev;
332 		__entry->ip		= ip;
333 	),
334 
335 	TP_printk("%d,%d for %pS",
336 		  MAJOR(__entry->dev), MINOR(__entry->dev),
337 		  (void *) __entry->ip)
338 );
339 
340 /* io.c: */
341 
342 DEFINE_EVENT(bio, io_read_promote,
343 	TP_PROTO(struct bio *bio),
344 	TP_ARGS(bio)
345 );
346 
347 TRACE_EVENT(io_read_nopromote,
348 	TP_PROTO(struct bch_fs *c, int ret),
349 	TP_ARGS(c, ret),
350 
351 	TP_STRUCT__entry(
352 		__field(dev_t,		dev		)
353 		__array(char,		ret, 32		)
354 	),
355 
356 	TP_fast_assign(
357 		__entry->dev		= c->dev;
358 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
359 	),
360 
361 	TP_printk("%d,%d ret %s",
362 		  MAJOR(__entry->dev), MINOR(__entry->dev),
363 		  __entry->ret)
364 );
365 
366 DEFINE_EVENT(bio, io_read_bounce,
367 	TP_PROTO(struct bio *bio),
368 	TP_ARGS(bio)
369 );
370 
371 DEFINE_EVENT(bio, io_read_split,
372 	TP_PROTO(struct bio *bio),
373 	TP_ARGS(bio)
374 );
375 
376 DEFINE_EVENT(bio, io_read_retry,
377 	TP_PROTO(struct bio *bio),
378 	TP_ARGS(bio)
379 );
380 
381 DEFINE_EVENT(bio, io_read_reuse_race,
382 	TP_PROTO(struct bio *bio),
383 	TP_ARGS(bio)
384 );
385 
386 DEFINE_EVENT(bio, io_read_fail_and_poison,
387 	TP_PROTO(struct bio *bio),
388 	TP_ARGS(bio)
389 );
390 
391 /* ec.c */
392 
393 TRACE_EVENT(stripe_create,
394 	TP_PROTO(struct bch_fs *c, u64 idx, int ret),
395 	TP_ARGS(c, idx, ret),
396 
397 	TP_STRUCT__entry(
398 		__field(dev_t,		dev			)
399 		__field(u64,		idx			)
400 		__field(int,		ret			)
401 	),
402 
403 	TP_fast_assign(
404 		__entry->dev			= c->dev;
405 		__entry->idx			= idx;
406 		__entry->ret			= ret;
407 	),
408 
409 	TP_printk("%d,%d idx %llu ret %i",
410 		  MAJOR(__entry->dev), MINOR(__entry->dev),
411 		  __entry->idx,
412 		  __entry->ret)
413 );
414 
415 /* Journal */
416 
417 DEFINE_EVENT(bch_fs, journal_full,
418 	TP_PROTO(struct bch_fs *c),
419 	TP_ARGS(c)
420 );
421 
422 DEFINE_EVENT(fs_str, journal_entry_full,
423 	TP_PROTO(struct bch_fs *c, const char *str),
424 	TP_ARGS(c, str)
425 );
426 
427 DEFINE_EVENT(fs_str, journal_entry_close,
428 	TP_PROTO(struct bch_fs *c, const char *str),
429 	TP_ARGS(c, str)
430 );
431 
432 DEFINE_EVENT(bio, journal_write,
433 	TP_PROTO(struct bio *bio),
434 	TP_ARGS(bio)
435 );
436 
437 TRACE_EVENT(journal_reclaim_start,
438 	TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
439 		 u64 min_nr, u64 min_key_cache,
440 		 u64 btree_cache_dirty, u64 btree_cache_total,
441 		 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
442 	TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
443 		btree_cache_dirty, btree_cache_total,
444 		btree_key_cache_dirty, btree_key_cache_total),
445 
446 	TP_STRUCT__entry(
447 		__field(dev_t,		dev			)
448 		__field(bool,		direct			)
449 		__field(bool,		kicked			)
450 		__field(u64,		min_nr			)
451 		__field(u64,		min_key_cache		)
452 		__field(u64,		btree_cache_dirty	)
453 		__field(u64,		btree_cache_total	)
454 		__field(u64,		btree_key_cache_dirty	)
455 		__field(u64,		btree_key_cache_total	)
456 	),
457 
458 	TP_fast_assign(
459 		__entry->dev			= c->dev;
460 		__entry->direct			= direct;
461 		__entry->kicked			= kicked;
462 		__entry->min_nr			= min_nr;
463 		__entry->min_key_cache		= min_key_cache;
464 		__entry->btree_cache_dirty	= btree_cache_dirty;
465 		__entry->btree_cache_total	= btree_cache_total;
466 		__entry->btree_key_cache_dirty	= btree_key_cache_dirty;
467 		__entry->btree_key_cache_total	= btree_key_cache_total;
468 	),
469 
470 	TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
471 		  MAJOR(__entry->dev), MINOR(__entry->dev),
472 		  __entry->direct,
473 		  __entry->kicked,
474 		  __entry->min_nr,
475 		  __entry->min_key_cache,
476 		  __entry->btree_cache_dirty,
477 		  __entry->btree_cache_total,
478 		  __entry->btree_key_cache_dirty,
479 		  __entry->btree_key_cache_total)
480 );
481 
482 TRACE_EVENT(journal_reclaim_finish,
483 	TP_PROTO(struct bch_fs *c, u64 nr_flushed),
484 	TP_ARGS(c, nr_flushed),
485 
486 	TP_STRUCT__entry(
487 		__field(dev_t,		dev			)
488 		__field(u64,		nr_flushed		)
489 	),
490 
491 	TP_fast_assign(
492 		__entry->dev		= c->dev;
493 		__entry->nr_flushed	= nr_flushed;
494 	),
495 
496 	TP_printk("%d,%d flushed %llu",
497 		  MAJOR(__entry->dev), MINOR(__entry->dev),
498 		  __entry->nr_flushed)
499 );
500 
501 /* bset.c: */
502 
503 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
504 	TP_PROTO(const struct bpos *p),
505 	TP_ARGS(p)
506 );
507 
508 /* Btree cache: */
509 
510 TRACE_EVENT(btree_cache_scan,
511 	TP_PROTO(long nr_to_scan, long can_free, long ret),
512 	TP_ARGS(nr_to_scan, can_free, ret),
513 
514 	TP_STRUCT__entry(
515 		__field(long,	nr_to_scan		)
516 		__field(long,	can_free		)
517 		__field(long,	ret			)
518 	),
519 
520 	TP_fast_assign(
521 		__entry->nr_to_scan	= nr_to_scan;
522 		__entry->can_free	= can_free;
523 		__entry->ret		= ret;
524 	),
525 
526 	TP_printk("scanned for %li nodes, can free %li, ret %li",
527 		  __entry->nr_to_scan, __entry->can_free, __entry->ret)
528 );
529 
530 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
531 	TP_PROTO(struct bch_fs *c, struct btree *b),
532 	TP_ARGS(c, b)
533 );
534 
535 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
536 	TP_PROTO(struct btree_trans *trans),
537 	TP_ARGS(trans)
538 );
539 
540 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
541 	TP_PROTO(struct btree_trans *trans),
542 	TP_ARGS(trans)
543 );
544 
545 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
546 	TP_PROTO(struct btree_trans *trans),
547 	TP_ARGS(trans)
548 );
549 
550 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
551 	TP_PROTO(struct btree_trans *trans),
552 	TP_ARGS(trans)
553 );
554 
555 /* Btree */
556 
557 DEFINE_EVENT(btree_node, btree_node_read,
558 	TP_PROTO(struct btree_trans *trans, struct btree *b),
559 	TP_ARGS(trans, b)
560 );
561 
562 TRACE_EVENT(btree_node_write,
563 	TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
564 	TP_ARGS(b, bytes, sectors),
565 
566 	TP_STRUCT__entry(
567 		__field(enum btree_node_type,	type)
568 		__field(unsigned,	bytes			)
569 		__field(unsigned,	sectors			)
570 	),
571 
572 	TP_fast_assign(
573 		__entry->type	= btree_node_type(b);
574 		__entry->bytes	= bytes;
575 		__entry->sectors = sectors;
576 	),
577 
578 	TP_printk("bkey type %u bytes %u sectors %u",
579 		  __entry->type , __entry->bytes, __entry->sectors)
580 );
581 
582 DEFINE_EVENT(btree_node, btree_node_alloc,
583 	TP_PROTO(struct btree_trans *trans, struct btree *b),
584 	TP_ARGS(trans, b)
585 );
586 
587 DEFINE_EVENT(btree_node, btree_node_free,
588 	TP_PROTO(struct btree_trans *trans, struct btree *b),
589 	TP_ARGS(trans, b)
590 );
591 
592 TRACE_EVENT(btree_reserve_get_fail,
593 	TP_PROTO(const char *trans_fn,
594 		 unsigned long caller_ip,
595 		 size_t required,
596 		 int ret),
597 	TP_ARGS(trans_fn, caller_ip, required, ret),
598 
599 	TP_STRUCT__entry(
600 		__array(char,			trans_fn, 32	)
601 		__field(unsigned long,		caller_ip	)
602 		__field(size_t,			required	)
603 		__array(char,			ret, 32		)
604 	),
605 
606 	TP_fast_assign(
607 		strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
608 		__entry->caller_ip	= caller_ip;
609 		__entry->required	= required;
610 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
611 	),
612 
613 	TP_printk("%s %pS required %zu ret %s",
614 		  __entry->trans_fn,
615 		  (void *) __entry->caller_ip,
616 		  __entry->required,
617 		  __entry->ret)
618 );
619 
620 DEFINE_EVENT(btree_node, btree_node_compact,
621 	TP_PROTO(struct btree_trans *trans, struct btree *b),
622 	TP_ARGS(trans, b)
623 );
624 
625 DEFINE_EVENT(btree_node, btree_node_merge,
626 	TP_PROTO(struct btree_trans *trans, struct btree *b),
627 	TP_ARGS(trans, b)
628 );
629 
630 DEFINE_EVENT(btree_node, btree_node_split,
631 	TP_PROTO(struct btree_trans *trans, struct btree *b),
632 	TP_ARGS(trans, b)
633 );
634 
635 DEFINE_EVENT(btree_node, btree_node_rewrite,
636 	TP_PROTO(struct btree_trans *trans, struct btree *b),
637 	TP_ARGS(trans, b)
638 );
639 
640 DEFINE_EVENT(btree_node, btree_node_set_root,
641 	TP_PROTO(struct btree_trans *trans, struct btree *b),
642 	TP_ARGS(trans, b)
643 );
644 
645 TRACE_EVENT(btree_path_relock_fail,
646 	TP_PROTO(struct btree_trans *trans,
647 		 unsigned long caller_ip,
648 		 struct btree_path *path,
649 		 unsigned level),
650 	TP_ARGS(trans, caller_ip, path, level),
651 
652 	TP_STRUCT__entry(
653 		__array(char,			trans_fn, 32	)
654 		__field(unsigned long,		caller_ip	)
655 		__field(u8,			btree_id	)
656 		__field(u8,			level		)
657 		__field(u8,			path_idx)
658 		TRACE_BPOS_entries(pos)
659 		__array(char,			node, 24	)
660 		__field(u8,			self_read_count	)
661 		__field(u8,			self_intent_count)
662 		__field(u8,			read_count	)
663 		__field(u8,			intent_count	)
664 		__field(u32,			iter_lock_seq	)
665 		__field(u32,			node_lock_seq	)
666 	),
667 
668 	TP_fast_assign(
669 		struct btree *b = btree_path_node(path, level);
670 		struct six_lock_count c;
671 
672 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
673 		__entry->caller_ip		= caller_ip;
674 		__entry->btree_id		= path->btree_id;
675 		__entry->level			= level;
676 		__entry->path_idx		= path - trans->paths;
677 		TRACE_BPOS_assign(pos, path->pos);
678 
679 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
680 		__entry->self_read_count	= c.n[SIX_LOCK_read];
681 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
682 
683 		if (IS_ERR(b)) {
684 			strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
685 		} else {
686 			c = six_lock_counts(&path->l[level].b->c.lock);
687 			__entry->read_count	= c.n[SIX_LOCK_read];
688 			__entry->intent_count	= c.n[SIX_LOCK_intent];
689 			scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
690 		}
691 		__entry->iter_lock_seq		= path->l[level].lock_seq;
692 		__entry->node_lock_seq		= is_btree_node(path, level)
693 			? six_lock_seq(&path->l[level].b->c.lock)
694 			: 0;
695 	),
696 
697 	TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
698 		  __entry->trans_fn,
699 		  (void *) __entry->caller_ip,
700 		  __entry->path_idx,
701 		  bch2_btree_id_str(__entry->btree_id),
702 		  __entry->pos_inode,
703 		  __entry->pos_offset,
704 		  __entry->pos_snapshot,
705 		  __entry->level,
706 		  __entry->node,
707 		  __entry->self_read_count,
708 		  __entry->self_intent_count,
709 		  __entry->read_count,
710 		  __entry->intent_count,
711 		  __entry->iter_lock_seq,
712 		  __entry->node_lock_seq)
713 );
714 
715 TRACE_EVENT(btree_path_upgrade_fail,
716 	TP_PROTO(struct btree_trans *trans,
717 		 unsigned long caller_ip,
718 		 struct btree_path *path,
719 		 unsigned level),
720 	TP_ARGS(trans, caller_ip, path, level),
721 
722 	TP_STRUCT__entry(
723 		__array(char,			trans_fn, 32	)
724 		__field(unsigned long,		caller_ip	)
725 		__field(u8,			btree_id	)
726 		__field(u8,			level		)
727 		__field(u8,			path_idx)
728 		TRACE_BPOS_entries(pos)
729 		__field(u8,			locked		)
730 		__field(u8,			self_read_count	)
731 		__field(u8,			self_intent_count)
732 		__field(u8,			read_count	)
733 		__field(u8,			intent_count	)
734 		__field(u32,			iter_lock_seq	)
735 		__field(u32,			node_lock_seq	)
736 	),
737 
738 	TP_fast_assign(
739 		struct six_lock_count c;
740 
741 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
742 		__entry->caller_ip		= caller_ip;
743 		__entry->btree_id		= path->btree_id;
744 		__entry->level			= level;
745 		__entry->path_idx		= path - trans->paths;
746 		TRACE_BPOS_assign(pos, path->pos);
747 		__entry->locked			= btree_node_locked(path, level);
748 
749 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
750 		__entry->self_read_count	= c.n[SIX_LOCK_read];
751 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
752 		c = six_lock_counts(&path->l[level].b->c.lock);
753 		__entry->read_count		= c.n[SIX_LOCK_read];
754 		__entry->intent_count		= c.n[SIX_LOCK_intent];
755 		__entry->iter_lock_seq		= path->l[level].lock_seq;
756 		__entry->node_lock_seq		= is_btree_node(path, level)
757 			? six_lock_seq(&path->l[level].b->c.lock)
758 			: 0;
759 	),
760 
761 	TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
762 		  __entry->trans_fn,
763 		  (void *) __entry->caller_ip,
764 		  __entry->path_idx,
765 		  bch2_btree_id_str(__entry->btree_id),
766 		  __entry->pos_inode,
767 		  __entry->pos_offset,
768 		  __entry->pos_snapshot,
769 		  __entry->level,
770 		  __entry->locked,
771 		  __entry->self_read_count,
772 		  __entry->self_intent_count,
773 		  __entry->read_count,
774 		  __entry->intent_count,
775 		  __entry->iter_lock_seq,
776 		  __entry->node_lock_seq)
777 );
778 
779 /* Garbage collection */
780 
781 DEFINE_EVENT(bch_fs, gc_gens_start,
782 	TP_PROTO(struct bch_fs *c),
783 	TP_ARGS(c)
784 );
785 
786 DEFINE_EVENT(bch_fs, gc_gens_end,
787 	TP_PROTO(struct bch_fs *c),
788 	TP_ARGS(c)
789 );
790 
791 /* Allocator */
792 
793 DEFINE_EVENT(fs_str, bucket_alloc,
794 	TP_PROTO(struct bch_fs *c, const char *str),
795 	TP_ARGS(c, str)
796 );
797 
798 DEFINE_EVENT(fs_str, bucket_alloc_fail,
799 	TP_PROTO(struct bch_fs *c, const char *str),
800 	TP_ARGS(c, str)
801 );
802 
803 DECLARE_EVENT_CLASS(discard_buckets_class,
804 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
805 		 u64 need_journal_commit, u64 discarded, const char *err),
806 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
807 
808 	TP_STRUCT__entry(
809 		__field(dev_t,		dev			)
810 		__field(u64,		seen			)
811 		__field(u64,		open			)
812 		__field(u64,		need_journal_commit	)
813 		__field(u64,		discarded		)
814 		__array(char,		err,	16		)
815 	),
816 
817 	TP_fast_assign(
818 		__entry->dev			= c->dev;
819 		__entry->seen			= seen;
820 		__entry->open			= open;
821 		__entry->need_journal_commit	= need_journal_commit;
822 		__entry->discarded		= discarded;
823 		strscpy(__entry->err, err, sizeof(__entry->err));
824 	),
825 
826 	TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
827 		  MAJOR(__entry->dev), MINOR(__entry->dev),
828 		  __entry->seen,
829 		  __entry->open,
830 		  __entry->need_journal_commit,
831 		  __entry->discarded,
832 		  __entry->err)
833 );
834 
835 DEFINE_EVENT(discard_buckets_class, discard_buckets,
836 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
837 		 u64 need_journal_commit, u64 discarded, const char *err),
838 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
839 );
840 
841 DEFINE_EVENT(discard_buckets_class, discard_buckets_fast,
842 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
843 		 u64 need_journal_commit, u64 discarded, const char *err),
844 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err)
845 );
846 
847 TRACE_EVENT(bucket_invalidate,
848 	TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
849 	TP_ARGS(c, dev, bucket, sectors),
850 
851 	TP_STRUCT__entry(
852 		__field(dev_t,		dev			)
853 		__field(u32,		dev_idx			)
854 		__field(u32,		sectors			)
855 		__field(u64,		bucket			)
856 	),
857 
858 	TP_fast_assign(
859 		__entry->dev		= c->dev;
860 		__entry->dev_idx	= dev;
861 		__entry->sectors	= sectors;
862 		__entry->bucket		= bucket;
863 	),
864 
865 	TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
866 		  MAJOR(__entry->dev), MINOR(__entry->dev),
867 		  __entry->dev_idx, __entry->bucket,
868 		  __entry->sectors)
869 );
870 
871 /* Moving IO */
872 
873 DEFINE_EVENT(fs_str, io_move,
874 	TP_PROTO(struct bch_fs *c, const char *str),
875 	TP_ARGS(c, str)
876 );
877 
878 DEFINE_EVENT(fs_str, io_move_read,
879 	TP_PROTO(struct bch_fs *c, const char *str),
880 	TP_ARGS(c, str)
881 );
882 
883 DEFINE_EVENT(fs_str, io_move_write,
884 	TP_PROTO(struct bch_fs *c, const char *str),
885 	TP_ARGS(c, str)
886 );
887 
888 DEFINE_EVENT(fs_str, io_move_finish,
889 	TP_PROTO(struct bch_fs *c, const char *str),
890 	TP_ARGS(c, str)
891 );
892 
893 DEFINE_EVENT(fs_str, io_move_fail,
894 	TP_PROTO(struct bch_fs *c, const char *str),
895 	TP_ARGS(c, str)
896 );
897 
898 DEFINE_EVENT(fs_str, io_move_write_fail,
899 	TP_PROTO(struct bch_fs *c, const char *str),
900 	TP_ARGS(c, str)
901 );
902 
903 DEFINE_EVENT(fs_str, io_move_start_fail,
904 	TP_PROTO(struct bch_fs *c, const char *str),
905 	TP_ARGS(c, str)
906 );
907 
908 TRACE_EVENT(move_data,
909 	TP_PROTO(struct bch_fs *c,
910 		 struct bch_move_stats *stats),
911 	TP_ARGS(c, stats),
912 
913 	TP_STRUCT__entry(
914 		__field(dev_t,		dev		)
915 		__field(u64,		keys_moved	)
916 		__field(u64,		keys_raced	)
917 		__field(u64,		sectors_seen	)
918 		__field(u64,		sectors_moved	)
919 		__field(u64,		sectors_raced	)
920 	),
921 
922 	TP_fast_assign(
923 		__entry->dev		= c->dev;
924 		__entry->keys_moved	= atomic64_read(&stats->keys_moved);
925 		__entry->keys_raced	= atomic64_read(&stats->keys_raced);
926 		__entry->sectors_seen	= atomic64_read(&stats->sectors_seen);
927 		__entry->sectors_moved	= atomic64_read(&stats->sectors_moved);
928 		__entry->sectors_raced	= atomic64_read(&stats->sectors_raced);
929 	),
930 
931 	TP_printk("%d,%d keys moved %llu raced %llu"
932 		  "sectors seen %llu moved %llu raced %llu",
933 		  MAJOR(__entry->dev), MINOR(__entry->dev),
934 		  __entry->keys_moved,
935 		  __entry->keys_raced,
936 		  __entry->sectors_seen,
937 		  __entry->sectors_moved,
938 		  __entry->sectors_raced)
939 );
940 
941 TRACE_EVENT(copygc,
942 	TP_PROTO(struct bch_fs *c,
943 		 u64 buckets,
944 		 u64 sectors_seen,
945 		 u64 sectors_moved),
946 	TP_ARGS(c, buckets, sectors_seen, sectors_moved),
947 
948 	TP_STRUCT__entry(
949 		__field(dev_t,		dev			)
950 		__field(u64,		buckets			)
951 		__field(u64,		sectors_seen		)
952 		__field(u64,		sectors_moved		)
953 	),
954 
955 	TP_fast_assign(
956 		__entry->dev			= c->dev;
957 		__entry->buckets		= buckets;
958 		__entry->sectors_seen		= sectors_seen;
959 		__entry->sectors_moved		= sectors_moved;
960 	),
961 
962 	TP_printk("%d,%d buckets %llu sectors seen %llu moved %llu",
963 		  MAJOR(__entry->dev), MINOR(__entry->dev),
964 		  __entry->buckets,
965 		  __entry->sectors_seen,
966 		  __entry->sectors_moved)
967 );
968 
969 TRACE_EVENT(copygc_wait,
970 	TP_PROTO(struct bch_fs *c,
971 		 u64 wait_amount, u64 until),
972 	TP_ARGS(c, wait_amount, until),
973 
974 	TP_STRUCT__entry(
975 		__field(dev_t,		dev			)
976 		__field(u64,		wait_amount		)
977 		__field(u64,		until			)
978 	),
979 
980 	TP_fast_assign(
981 		__entry->dev		= c->dev;
982 		__entry->wait_amount	= wait_amount;
983 		__entry->until		= until;
984 	),
985 
986 	TP_printk("%d,%u waiting for %llu sectors until %llu",
987 		  MAJOR(__entry->dev), MINOR(__entry->dev),
988 		  __entry->wait_amount, __entry->until)
989 );
990 
991 /* btree transactions: */
992 
993 DECLARE_EVENT_CLASS(transaction_event,
994 	TP_PROTO(struct btree_trans *trans,
995 		 unsigned long caller_ip),
996 	TP_ARGS(trans, caller_ip),
997 
998 	TP_STRUCT__entry(
999 		__array(char,			trans_fn, 32	)
1000 		__field(unsigned long,		caller_ip	)
1001 	),
1002 
1003 	TP_fast_assign(
1004 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1005 		__entry->caller_ip		= caller_ip;
1006 	),
1007 
1008 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1009 );
1010 
1011 DEFINE_EVENT(transaction_event,	transaction_commit,
1012 	TP_PROTO(struct btree_trans *trans,
1013 		 unsigned long caller_ip),
1014 	TP_ARGS(trans, caller_ip)
1015 );
1016 
1017 DEFINE_EVENT(transaction_event,	trans_restart_injected,
1018 	TP_PROTO(struct btree_trans *trans,
1019 		 unsigned long caller_ip),
1020 	TP_ARGS(trans, caller_ip)
1021 );
1022 
1023 TRACE_EVENT(trans_restart_split_race,
1024 	TP_PROTO(struct btree_trans *trans,
1025 		 unsigned long caller_ip,
1026 		 struct btree *b),
1027 	TP_ARGS(trans, caller_ip, b),
1028 
1029 	TP_STRUCT__entry(
1030 		__array(char,			trans_fn, 32	)
1031 		__field(unsigned long,		caller_ip	)
1032 		__field(u8,			level		)
1033 		__field(u16,			written		)
1034 		__field(u16,			blocks		)
1035 		__field(u16,			u64s_remaining	)
1036 	),
1037 
1038 	TP_fast_assign(
1039 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1040 		__entry->caller_ip		= caller_ip;
1041 		__entry->level		= b->c.level;
1042 		__entry->written	= b->written;
1043 		__entry->blocks		= btree_blocks(trans->c);
1044 		__entry->u64s_remaining	= bch2_btree_keys_u64s_remaining(b);
1045 	),
1046 
1047 	TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
1048 		  __entry->trans_fn, (void *) __entry->caller_ip,
1049 		  __entry->level,
1050 		  __entry->written, __entry->blocks,
1051 		  __entry->u64s_remaining)
1052 );
1053 
1054 TRACE_EVENT(trans_blocked_journal_reclaim,
1055 	TP_PROTO(struct btree_trans *trans,
1056 		 unsigned long caller_ip),
1057 	TP_ARGS(trans, caller_ip),
1058 
1059 	TP_STRUCT__entry(
1060 		__array(char,			trans_fn, 32	)
1061 		__field(unsigned long,		caller_ip	)
1062 
1063 		__field(unsigned long,		key_cache_nr_keys	)
1064 		__field(unsigned long,		key_cache_nr_dirty	)
1065 		__field(long,			must_wait		)
1066 	),
1067 
1068 	TP_fast_assign(
1069 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1070 		__entry->caller_ip		= caller_ip;
1071 		__entry->key_cache_nr_keys	= atomic_long_read(&trans->c->btree_key_cache.nr_keys);
1072 		__entry->key_cache_nr_dirty	= atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
1073 		__entry->must_wait		= __bch2_btree_key_cache_must_wait(trans->c);
1074 	),
1075 
1076 	TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
1077 		  __entry->trans_fn, (void *) __entry->caller_ip,
1078 		  __entry->key_cache_nr_keys,
1079 		  __entry->key_cache_nr_dirty,
1080 		  __entry->must_wait)
1081 );
1082 
1083 #if 0
1084 /* todo: bring back dynamic fault injection */
1085 DEFINE_EVENT(transaction_event,	trans_restart_fault_inject,
1086 	TP_PROTO(struct btree_trans *trans,
1087 		 unsigned long caller_ip),
1088 	TP_ARGS(trans, caller_ip)
1089 );
1090 #endif
1091 
1092 DEFINE_EVENT(transaction_event,	trans_traverse_all,
1093 	TP_PROTO(struct btree_trans *trans,
1094 		 unsigned long caller_ip),
1095 	TP_ARGS(trans, caller_ip)
1096 );
1097 
1098 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_raced,
1099 	TP_PROTO(struct btree_trans *trans,
1100 		 unsigned long caller_ip),
1101 	TP_ARGS(trans, caller_ip)
1102 );
1103 
1104 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1105 	TP_PROTO(struct btree_trans *trans,
1106 		 unsigned long caller_ip,
1107 		 const char *paths),
1108 	TP_ARGS(trans, caller_ip, paths)
1109 );
1110 
1111 DECLARE_EVENT_CLASS(transaction_restart_iter,
1112 	TP_PROTO(struct btree_trans *trans,
1113 		 unsigned long caller_ip,
1114 		 struct btree_path *path),
1115 	TP_ARGS(trans, caller_ip, path),
1116 
1117 	TP_STRUCT__entry(
1118 		__array(char,			trans_fn, 32	)
1119 		__field(unsigned long,		caller_ip	)
1120 		__field(u8,			btree_id	)
1121 		TRACE_BPOS_entries(pos)
1122 	),
1123 
1124 	TP_fast_assign(
1125 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1126 		__entry->caller_ip		= caller_ip;
1127 		__entry->btree_id		= path->btree_id;
1128 		TRACE_BPOS_assign(pos, path->pos)
1129 	),
1130 
1131 	TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1132 		  __entry->trans_fn,
1133 		  (void *) __entry->caller_ip,
1134 		  bch2_btree_id_str(__entry->btree_id),
1135 		  __entry->pos_inode,
1136 		  __entry->pos_offset,
1137 		  __entry->pos_snapshot)
1138 );
1139 
1140 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_reused,
1141 	TP_PROTO(struct btree_trans *trans,
1142 		 unsigned long caller_ip,
1143 		 struct btree_path *path),
1144 	TP_ARGS(trans, caller_ip, path)
1145 );
1146 
1147 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_split,
1148 	TP_PROTO(struct btree_trans *trans,
1149 		 unsigned long caller_ip,
1150 		 struct btree_path *path),
1151 	TP_ARGS(trans, caller_ip, path)
1152 );
1153 
1154 DEFINE_EVENT(fs_str, trans_restart_upgrade,
1155 	TP_PROTO(struct bch_fs *c, const char *str),
1156 	TP_ARGS(c, str)
1157 );
1158 
1159 DEFINE_EVENT(trans_str,	trans_restart_relock,
1160 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1161 	TP_ARGS(trans, caller_ip, str)
1162 );
1163 
1164 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_next_node,
1165 	TP_PROTO(struct btree_trans *trans,
1166 		 unsigned long caller_ip,
1167 		 struct btree_path *path),
1168 	TP_ARGS(trans, caller_ip, path)
1169 );
1170 
1171 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_parent_for_fill,
1172 	TP_PROTO(struct btree_trans *trans,
1173 		 unsigned long caller_ip,
1174 		 struct btree_path *path),
1175 	TP_ARGS(trans, caller_ip, path)
1176 );
1177 
1178 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_key_cache_fill,
1179 	TP_PROTO(struct btree_trans *trans,
1180 		 unsigned long caller_ip,
1181 		 struct btree_path *path),
1182 	TP_ARGS(trans, caller_ip, path)
1183 );
1184 
1185 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path,
1186 	TP_PROTO(struct btree_trans *trans,
1187 		 unsigned long caller_ip,
1188 		 struct btree_path *path),
1189 	TP_ARGS(trans, caller_ip, path)
1190 );
1191 
1192 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path_intent,
1193 	TP_PROTO(struct btree_trans *trans,
1194 		 unsigned long caller_ip,
1195 		 struct btree_path *path),
1196 	TP_ARGS(trans, caller_ip, path)
1197 );
1198 
1199 DEFINE_EVENT(transaction_restart_iter,	trans_restart_memory_allocation_failure,
1200 	TP_PROTO(struct btree_trans *trans,
1201 		 unsigned long caller_ip,
1202 		 struct btree_path *path),
1203 	TP_ARGS(trans, caller_ip, path)
1204 );
1205 
1206 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1207 	TP_PROTO(struct btree_trans *trans,
1208 		 const char *cycle),
1209 	TP_ARGS(trans, cycle)
1210 );
1211 
1212 DEFINE_EVENT(transaction_event,	trans_restart_would_deadlock_recursion_limit,
1213 	TP_PROTO(struct btree_trans *trans,
1214 		 unsigned long caller_ip),
1215 	TP_ARGS(trans, caller_ip)
1216 );
1217 
1218 TRACE_EVENT(trans_restart_would_deadlock_write,
1219 	TP_PROTO(struct btree_trans *trans),
1220 	TP_ARGS(trans),
1221 
1222 	TP_STRUCT__entry(
1223 		__array(char,			trans_fn, 32	)
1224 	),
1225 
1226 	TP_fast_assign(
1227 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1228 	),
1229 
1230 	TP_printk("%s", __entry->trans_fn)
1231 );
1232 
1233 TRACE_EVENT(trans_restart_mem_realloced,
1234 	TP_PROTO(struct btree_trans *trans,
1235 		 unsigned long caller_ip,
1236 		 unsigned long bytes),
1237 	TP_ARGS(trans, caller_ip, bytes),
1238 
1239 	TP_STRUCT__entry(
1240 		__array(char,			trans_fn, 32	)
1241 		__field(unsigned long,		caller_ip	)
1242 		__field(unsigned long,		bytes		)
1243 	),
1244 
1245 	TP_fast_assign(
1246 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1247 		__entry->caller_ip	= caller_ip;
1248 		__entry->bytes		= bytes;
1249 	),
1250 
1251 	TP_printk("%s %pS bytes %lu",
1252 		  __entry->trans_fn,
1253 		  (void *) __entry->caller_ip,
1254 		  __entry->bytes)
1255 );
1256 
1257 DEFINE_EVENT(transaction_event,	trans_restart_write_buffer_flush,
1258 	TP_PROTO(struct btree_trans *trans,
1259 		 unsigned long caller_ip),
1260 	TP_ARGS(trans, caller_ip)
1261 );
1262 
1263 TRACE_EVENT(path_downgrade,
1264 	TP_PROTO(struct btree_trans *trans,
1265 		 unsigned long caller_ip,
1266 		 struct btree_path *path,
1267 		 unsigned old_locks_want),
1268 	TP_ARGS(trans, caller_ip, path, old_locks_want),
1269 
1270 	TP_STRUCT__entry(
1271 		__array(char,			trans_fn, 32	)
1272 		__field(unsigned long,		caller_ip	)
1273 		__field(unsigned,		old_locks_want	)
1274 		__field(unsigned,		new_locks_want	)
1275 		__field(unsigned,		btree		)
1276 		TRACE_BPOS_entries(pos)
1277 	),
1278 
1279 	TP_fast_assign(
1280 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1281 		__entry->caller_ip		= caller_ip;
1282 		__entry->old_locks_want		= old_locks_want;
1283 		__entry->new_locks_want		= path->locks_want;
1284 		__entry->btree			= path->btree_id;
1285 		TRACE_BPOS_assign(pos, path->pos);
1286 	),
1287 
1288 	TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1289 		  __entry->trans_fn,
1290 		  (void *) __entry->caller_ip,
1291 		  __entry->old_locks_want,
1292 		  __entry->new_locks_want,
1293 		  bch2_btree_id_str(__entry->btree),
1294 		  __entry->pos_inode,
1295 		  __entry->pos_offset,
1296 		  __entry->pos_snapshot)
1297 );
1298 
1299 TRACE_EVENT(key_cache_fill,
1300 	TP_PROTO(struct btree_trans *trans, const char *key),
1301 	TP_ARGS(trans, key),
1302 
1303 	TP_STRUCT__entry(
1304 		__array(char,		trans_fn, 32	)
1305 		__string(key,		key			)
1306 	),
1307 
1308 	TP_fast_assign(
1309 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1310 		__assign_str(key);
1311 	),
1312 
1313 	TP_printk("%s %s", __entry->trans_fn, __get_str(key))
1314 );
1315 
1316 TRACE_EVENT(write_buffer_flush,
1317 	TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1318 	TP_ARGS(trans, nr, skipped, fast, size),
1319 
1320 	TP_STRUCT__entry(
1321 		__field(size_t,		nr		)
1322 		__field(size_t,		skipped		)
1323 		__field(size_t,		fast		)
1324 		__field(size_t,		size		)
1325 	),
1326 
1327 	TP_fast_assign(
1328 		__entry->nr	= nr;
1329 		__entry->skipped = skipped;
1330 		__entry->fast	= fast;
1331 		__entry->size	= size;
1332 	),
1333 
1334 	TP_printk("%zu/%zu skipped %zu fast %zu",
1335 		  __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1336 );
1337 
1338 TRACE_EVENT(write_buffer_flush_sync,
1339 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1340 	TP_ARGS(trans, caller_ip),
1341 
1342 	TP_STRUCT__entry(
1343 		__array(char,			trans_fn, 32	)
1344 		__field(unsigned long,		caller_ip	)
1345 	),
1346 
1347 	TP_fast_assign(
1348 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1349 		__entry->caller_ip		= caller_ip;
1350 	),
1351 
1352 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1353 );
1354 
1355 TRACE_EVENT(write_buffer_flush_slowpath,
1356 	TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1357 	TP_ARGS(trans, slowpath, total),
1358 
1359 	TP_STRUCT__entry(
1360 		__field(size_t,		slowpath	)
1361 		__field(size_t,		total		)
1362 	),
1363 
1364 	TP_fast_assign(
1365 		__entry->slowpath	= slowpath;
1366 		__entry->total		= total;
1367 	),
1368 
1369 	TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1370 );
1371 
1372 TRACE_EVENT(write_buffer_maybe_flush,
1373 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *key),
1374 	TP_ARGS(trans, caller_ip, key),
1375 
1376 	TP_STRUCT__entry(
1377 		__array(char,			trans_fn, 32	)
1378 		__field(unsigned long,		caller_ip	)
1379 		__string(key,			key		)
1380 	),
1381 
1382 	TP_fast_assign(
1383 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1384 		__assign_str(key);
1385 	),
1386 
1387 	TP_printk("%s %pS %s", __entry->trans_fn, (void *) __entry->caller_ip, __get_str(key))
1388 );
1389 
1390 DEFINE_EVENT(fs_str, rebalance_extent,
1391 	TP_PROTO(struct bch_fs *c, const char *str),
1392 	TP_ARGS(c, str)
1393 );
1394 
1395 DEFINE_EVENT(fs_str, data_update,
1396 	TP_PROTO(struct bch_fs *c, const char *str),
1397 	TP_ARGS(c, str)
1398 );
1399 
1400 DEFINE_EVENT(fs_str, io_move_pred,
1401 	TP_PROTO(struct bch_fs *c, const char *str),
1402 	TP_ARGS(c, str)
1403 );
1404 
1405 DEFINE_EVENT(fs_str, io_move_created_rebalance,
1406 	TP_PROTO(struct bch_fs *c, const char *str),
1407 	TP_ARGS(c, str)
1408 );
1409 
1410 DEFINE_EVENT(fs_str, io_move_evacuate_bucket,
1411 	TP_PROTO(struct bch_fs *c, const char *str),
1412 	TP_ARGS(c, str)
1413 );
1414 
1415 DEFINE_EVENT(fs_str, extent_trim_atomic,
1416 	TP_PROTO(struct bch_fs *c, const char *str),
1417 	TP_ARGS(c, str)
1418 );
1419 
1420 DEFINE_EVENT(fs_str, btree_iter_peek_slot,
1421 	TP_PROTO(struct bch_fs *c, const char *str),
1422 	TP_ARGS(c, str)
1423 );
1424 
1425 DEFINE_EVENT(fs_str, __btree_iter_peek,
1426 	TP_PROTO(struct bch_fs *c, const char *str),
1427 	TP_ARGS(c, str)
1428 );
1429 
1430 DEFINE_EVENT(fs_str, btree_iter_peek_max,
1431 	TP_PROTO(struct bch_fs *c, const char *str),
1432 	TP_ARGS(c, str)
1433 );
1434 
1435 DEFINE_EVENT(fs_str, btree_iter_peek_prev_min,
1436 	TP_PROTO(struct bch_fs *c, const char *str),
1437 	TP_ARGS(c, str)
1438 );
1439 
1440 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
1441 
1442 TRACE_EVENT(update_by_path,
1443 	TP_PROTO(struct btree_trans *trans, struct btree_path *path,
1444 		 struct btree_insert_entry *i, bool overwrite),
1445 	TP_ARGS(trans, path, i, overwrite),
1446 
1447 	TP_STRUCT__entry(
1448 		__array(char,			trans_fn, 32	)
1449 		__field(btree_path_idx_t,	path_idx	)
1450 		__field(u8,			btree_id	)
1451 		TRACE_BPOS_entries(pos)
1452 		__field(u8,			overwrite	)
1453 		__field(btree_path_idx_t,	update_idx	)
1454 		__field(btree_path_idx_t,	nr_updates	)
1455 	),
1456 
1457 	TP_fast_assign(
1458 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1459 		__entry->path_idx		= path - trans->paths;
1460 		__entry->btree_id		= path->btree_id;
1461 		TRACE_BPOS_assign(pos, path->pos);
1462 		__entry->overwrite		= overwrite;
1463 		__entry->update_idx		= i - trans->updates;
1464 		__entry->nr_updates		= trans->nr_updates;
1465 	),
1466 
1467 	TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
1468 		  __entry->trans_fn,
1469 		  __entry->path_idx,
1470 		  bch2_btree_id_str(__entry->btree_id),
1471 		  __entry->pos_inode,
1472 		  __entry->pos_offset,
1473 		  __entry->pos_snapshot,
1474 		  __entry->overwrite,
1475 		  __entry->update_idx,
1476 		  __entry->nr_updates)
1477 );
1478 
1479 TRACE_EVENT(btree_path_lock,
1480 	TP_PROTO(struct btree_trans *trans,
1481 		 unsigned long caller_ip,
1482 		 struct btree_bkey_cached_common *b),
1483 	TP_ARGS(trans, caller_ip, b),
1484 
1485 	TP_STRUCT__entry(
1486 		__array(char,			trans_fn, 32	)
1487 		__field(unsigned long,		caller_ip	)
1488 		__field(u8,			btree_id	)
1489 		__field(u8,			level		)
1490 		__array(char,			node, 24	)
1491 		__field(u32,			lock_seq	)
1492 	),
1493 
1494 	TP_fast_assign(
1495 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1496 		__entry->caller_ip		= caller_ip;
1497 		__entry->btree_id		= b->btree_id;
1498 		__entry->level			= b->level;
1499 
1500 		scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
1501 		__entry->lock_seq		= six_lock_seq(&b->lock);
1502 	),
1503 
1504 	TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
1505 		  __entry->trans_fn,
1506 		  (void *) __entry->caller_ip,
1507 		  bch2_btree_id_str(__entry->btree_id),
1508 		  __entry->level,
1509 		  __entry->node,
1510 		  __entry->lock_seq)
1511 );
1512 
1513 DECLARE_EVENT_CLASS(btree_path_ev,
1514 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1515 	TP_ARGS(trans, path),
1516 
1517 	TP_STRUCT__entry(
1518 		__field(u16,			idx		)
1519 		__field(u8,			ref		)
1520 		__field(u8,			btree_id	)
1521 		TRACE_BPOS_entries(pos)
1522 	),
1523 
1524 	TP_fast_assign(
1525 		__entry->idx			= path - trans->paths;
1526 		__entry->ref			= path->ref;
1527 		__entry->btree_id		= path->btree_id;
1528 		TRACE_BPOS_assign(pos, path->pos);
1529 	),
1530 
1531 	TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
1532 		  __entry->idx, __entry->ref,
1533 		  bch2_btree_id_str(__entry->btree_id),
1534 		  __entry->pos_inode,
1535 		  __entry->pos_offset,
1536 		  __entry->pos_snapshot)
1537 );
1538 
1539 DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
1540 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1541 	TP_ARGS(trans, path)
1542 );
1543 
1544 DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
1545 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1546 	TP_ARGS(trans, path)
1547 );
1548 
1549 DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
1550 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1551 	TP_ARGS(trans, path)
1552 );
1553 
1554 TRACE_EVENT(btree_path_alloc,
1555 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1556 	TP_ARGS(trans, path),
1557 
1558 	TP_STRUCT__entry(
1559 		__field(btree_path_idx_t,	idx		)
1560 		__field(u8,			locks_want	)
1561 		__field(u8,			btree_id	)
1562 		TRACE_BPOS_entries(pos)
1563 	),
1564 
1565 	TP_fast_assign(
1566 		__entry->idx			= path - trans->paths;
1567 		__entry->locks_want		= path->locks_want;
1568 		__entry->btree_id		= path->btree_id;
1569 		TRACE_BPOS_assign(pos, path->pos);
1570 	),
1571 
1572 	TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
1573 		  __entry->idx,
1574 		  bch2_btree_id_str(__entry->btree_id),
1575 		  __entry->locks_want,
1576 		  __entry->pos_inode,
1577 		  __entry->pos_offset,
1578 		  __entry->pos_snapshot)
1579 );
1580 
1581 TRACE_EVENT(btree_path_get,
1582 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
1583 	TP_ARGS(trans, path, new_pos),
1584 
1585 	TP_STRUCT__entry(
1586 		__field(btree_path_idx_t,	idx		)
1587 		__field(u8,			ref		)
1588 		__field(u8,			preserve	)
1589 		__field(u8,			locks_want	)
1590 		__field(u8,			btree_id	)
1591 		TRACE_BPOS_entries(old_pos)
1592 		TRACE_BPOS_entries(new_pos)
1593 	),
1594 
1595 	TP_fast_assign(
1596 		__entry->idx			= path - trans->paths;
1597 		__entry->ref			= path->ref;
1598 		__entry->preserve		= path->preserve;
1599 		__entry->locks_want		= path->locks_want;
1600 		__entry->btree_id		= path->btree_id;
1601 		TRACE_BPOS_assign(old_pos, path->pos);
1602 		TRACE_BPOS_assign(new_pos, *new_pos);
1603 	),
1604 
1605 	TP_printk("    path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
1606 		  __entry->idx,
1607 		  __entry->ref,
1608 		  __entry->preserve,
1609 		  bch2_btree_id_str(__entry->btree_id),
1610 		  __entry->locks_want,
1611 		  __entry->old_pos_inode,
1612 		  __entry->old_pos_offset,
1613 		  __entry->old_pos_snapshot,
1614 		  __entry->new_pos_inode,
1615 		  __entry->new_pos_offset,
1616 		  __entry->new_pos_snapshot)
1617 );
1618 
1619 DECLARE_EVENT_CLASS(btree_path_clone,
1620 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1621 	TP_ARGS(trans, path, new),
1622 
1623 	TP_STRUCT__entry(
1624 		__field(btree_path_idx_t,	idx		)
1625 		__field(u8,			new_idx		)
1626 		__field(u8,			btree_id	)
1627 		__field(u8,			ref		)
1628 		__field(u8,			preserve	)
1629 		TRACE_BPOS_entries(pos)
1630 	),
1631 
1632 	TP_fast_assign(
1633 		__entry->idx			= path - trans->paths;
1634 		__entry->new_idx		= new - trans->paths;
1635 		__entry->btree_id		= path->btree_id;
1636 		__entry->ref			= path->ref;
1637 		__entry->preserve		= path->preserve;
1638 		TRACE_BPOS_assign(pos, path->pos);
1639 	),
1640 
1641 	TP_printk("  path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
1642 		  __entry->idx,
1643 		  __entry->ref,
1644 		  __entry->preserve,
1645 		  bch2_btree_id_str(__entry->btree_id),
1646 		  __entry->pos_inode,
1647 		  __entry->pos_offset,
1648 		  __entry->pos_snapshot,
1649 		  __entry->new_idx)
1650 );
1651 
1652 DEFINE_EVENT(btree_path_clone, btree_path_clone,
1653 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1654 	TP_ARGS(trans, path, new)
1655 );
1656 
1657 DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
1658 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1659 	TP_ARGS(trans, path, new)
1660 );
1661 
1662 DECLARE_EVENT_CLASS(btree_path_traverse,
1663 	TP_PROTO(struct btree_trans *trans,
1664 		 struct btree_path *path),
1665 	TP_ARGS(trans, path),
1666 
1667 	TP_STRUCT__entry(
1668 		__array(char,			trans_fn, 32	)
1669 		__field(btree_path_idx_t,	idx		)
1670 		__field(u8,			ref		)
1671 		__field(u8,			preserve	)
1672 		__field(u8,			should_be_locked )
1673 		__field(u8,			btree_id	)
1674 		__field(u8,			level		)
1675 		TRACE_BPOS_entries(pos)
1676 		__field(u8,			locks_want	)
1677 		__field(u8,			nodes_locked	)
1678 		__array(char,			node0, 24	)
1679 		__array(char,			node1, 24	)
1680 		__array(char,			node2, 24	)
1681 		__array(char,			node3, 24	)
1682 	),
1683 
1684 	TP_fast_assign(
1685 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1686 
1687 		__entry->idx			= path - trans->paths;
1688 		__entry->ref			= path->ref;
1689 		__entry->preserve		= path->preserve;
1690 		__entry->btree_id		= path->btree_id;
1691 		__entry->level			= path->level;
1692 		TRACE_BPOS_assign(pos, path->pos);
1693 
1694 		__entry->locks_want		= path->locks_want;
1695 		__entry->nodes_locked		= path->nodes_locked;
1696 		struct btree *b = path->l[0].b;
1697 		if (IS_ERR(b))
1698 			strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1699 		else
1700 			scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1701 		b = path->l[1].b;
1702 		if (IS_ERR(b))
1703 			strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1704 		else
1705 			scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1706 		b = path->l[2].b;
1707 		if (IS_ERR(b))
1708 			strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1709 		else
1710 			scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1711 		b = path->l[3].b;
1712 		if (IS_ERR(b))
1713 			strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1714 		else
1715 			scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1716 	),
1717 
1718 	TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
1719 		  "locks %u %u %u %u node %s %s %s %s",
1720 		  __entry->trans_fn,
1721 		  __entry->idx,
1722 		  __entry->ref,
1723 		  __entry->preserve,
1724 		  bch2_btree_id_str(__entry->btree_id),
1725 		  __entry->pos_inode,
1726 		  __entry->pos_offset,
1727 		  __entry->pos_snapshot,
1728 		  __entry->level,
1729 		  __entry->locks_want,
1730 		  (__entry->nodes_locked >> 6) & 3,
1731 		  (__entry->nodes_locked >> 4) & 3,
1732 		  (__entry->nodes_locked >> 2) & 3,
1733 		  (__entry->nodes_locked >> 0) & 3,
1734 		  __entry->node3,
1735 		  __entry->node2,
1736 		  __entry->node1,
1737 		  __entry->node0)
1738 );
1739 
1740 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
1741 	TP_PROTO(struct btree_trans *trans,
1742 		 struct btree_path *path),
1743 	TP_ARGS(trans, path)
1744 );
1745 
1746 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
1747 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1748 	TP_ARGS(trans, path)
1749 );
1750 
1751 TRACE_EVENT(btree_path_set_pos,
1752 	TP_PROTO(struct btree_trans *trans,
1753 		 struct btree_path *path,
1754 		 struct bpos *new_pos),
1755 	TP_ARGS(trans, path, new_pos),
1756 
1757 	TP_STRUCT__entry(
1758 		__field(btree_path_idx_t,	idx		)
1759 		__field(u8,			ref		)
1760 		__field(u8,			preserve	)
1761 		__field(u8,			btree_id	)
1762 		TRACE_BPOS_entries(old_pos)
1763 		TRACE_BPOS_entries(new_pos)
1764 		__field(u8,			locks_want	)
1765 		__field(u8,			nodes_locked	)
1766 		__array(char,			node0, 24	)
1767 		__array(char,			node1, 24	)
1768 		__array(char,			node2, 24	)
1769 		__array(char,			node3, 24	)
1770 	),
1771 
1772 	TP_fast_assign(
1773 		__entry->idx			= path - trans->paths;
1774 		__entry->ref			= path->ref;
1775 		__entry->preserve		= path->preserve;
1776 		__entry->btree_id		= path->btree_id;
1777 		TRACE_BPOS_assign(old_pos, path->pos);
1778 		TRACE_BPOS_assign(new_pos, *new_pos);
1779 
1780 		__entry->nodes_locked		= path->nodes_locked;
1781 		struct btree *b = path->l[0].b;
1782 		if (IS_ERR(b))
1783 			strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1784 		else
1785 			scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1786 		b = path->l[1].b;
1787 		if (IS_ERR(b))
1788 			strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1789 		else
1790 			scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1791 		b = path->l[2].b;
1792 		if (IS_ERR(b))
1793 			strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1794 		else
1795 			scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1796 		b = path->l[3].b;
1797 		if (IS_ERR(b))
1798 			strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1799 		else
1800 			scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1801 	),
1802 
1803 	TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
1804 		  "locks %u %u %u %u node %s %s %s %s",
1805 		  __entry->idx,
1806 		  __entry->ref,
1807 		  __entry->preserve,
1808 		  bch2_btree_id_str(__entry->btree_id),
1809 		  __entry->old_pos_inode,
1810 		  __entry->old_pos_offset,
1811 		  __entry->old_pos_snapshot,
1812 		  __entry->new_pos_inode,
1813 		  __entry->new_pos_offset,
1814 		  __entry->new_pos_snapshot,
1815 		  (__entry->nodes_locked >> 6) & 3,
1816 		  (__entry->nodes_locked >> 4) & 3,
1817 		  (__entry->nodes_locked >> 2) & 3,
1818 		  (__entry->nodes_locked >> 0) & 3,
1819 		  __entry->node3,
1820 		  __entry->node2,
1821 		  __entry->node1,
1822 		  __entry->node0)
1823 );
1824 
1825 TRACE_EVENT(btree_path_free,
1826 	TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
1827 	TP_ARGS(trans, path, dup),
1828 
1829 	TP_STRUCT__entry(
1830 		__field(btree_path_idx_t,	idx		)
1831 		__field(u8,			preserve	)
1832 		__field(u8,			should_be_locked)
1833 		__field(s8,			dup		)
1834 		__field(u8,			dup_locked	)
1835 	),
1836 
1837 	TP_fast_assign(
1838 		__entry->idx			= path;
1839 		__entry->preserve		= trans->paths[path].preserve;
1840 		__entry->should_be_locked	= trans->paths[path].should_be_locked;
1841 		__entry->dup			= dup ? dup - trans->paths  : -1;
1842 		__entry->dup_locked		= dup ? btree_node_locked(dup, dup->level) : 0;
1843 	),
1844 
1845 	TP_printk("   path %3u %c %c dup %2i locked %u", __entry->idx,
1846 		  __entry->preserve ? 'P' : ' ',
1847 		  __entry->should_be_locked ? 'S' : ' ',
1848 		  __entry->dup,
1849 		  __entry->dup_locked)
1850 );
1851 
1852 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1853 #ifndef _TRACE_BCACHEFS_H
1854 
trace_update_by_path(struct btree_trans * trans,struct btree_path * path,struct btree_insert_entry * i,bool overwrite)1855 static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
1856 					struct btree_insert_entry *i, bool overwrite) {}
trace_btree_path_lock(struct btree_trans * trans,unsigned long caller_ip,struct btree_bkey_cached_common * b)1857 static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
trace_btree_path_get_ll(struct btree_trans * trans,struct btree_path * path)1858 static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_put_ll(struct btree_trans * trans,struct btree_path * path)1859 static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_should_be_locked(struct btree_trans * trans,struct btree_path * path)1860 static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_alloc(struct btree_trans * trans,struct btree_path * path)1861 static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_get(struct btree_trans * trans,struct btree_path * path,struct bpos * new_pos)1862 static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
trace_btree_path_clone(struct btree_trans * trans,struct btree_path * path,struct btree_path * new)1863 static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
trace_btree_path_save_pos(struct btree_trans * trans,struct btree_path * path,struct btree_path * new)1864 static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
trace_btree_path_traverse_start(struct btree_trans * trans,struct btree_path * path)1865 static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_traverse_end(struct btree_trans * trans,struct btree_path * path)1866 static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
trace_btree_path_set_pos(struct btree_trans * trans,struct btree_path * path,struct bpos * new_pos)1867 static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
trace_btree_path_free(struct btree_trans * trans,btree_path_idx_t path,struct btree_path * dup)1868 static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
1869 
1870 #endif
1871 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1872 
1873 #define _TRACE_BCACHEFS_H
1874 #endif /* _TRACE_BCACHEFS_H */
1875 
1876 /* This part must be outside protection */
1877 #undef TRACE_INCLUDE_PATH
1878 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1879 
1880 #undef TRACE_INCLUDE_FILE
1881 #define TRACE_INCLUDE_FILE trace
1882 
1883 #include <trace/define_trace.h>
1884