xref: /linux/fs/bcachefs/trace.h (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4 
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 
7 #include <linux/tracepoint.h>
8 
9 #define TRACE_BPOS_entries(name)				\
10 	__field(u64,			name##_inode	)	\
11 	__field(u64,			name##_offset	)	\
12 	__field(u32,			name##_snapshot	)
13 
14 #define TRACE_BPOS_assign(dst, src)				\
15 	__entry->dst##_inode		= (src).inode;		\
16 	__entry->dst##_offset		= (src).offset;		\
17 	__entry->dst##_snapshot		= (src).snapshot
18 
19 DECLARE_EVENT_CLASS(bpos,
20 	TP_PROTO(const struct bpos *p),
21 	TP_ARGS(p),
22 
23 	TP_STRUCT__entry(
24 		TRACE_BPOS_entries(p)
25 	),
26 
27 	TP_fast_assign(
28 		TRACE_BPOS_assign(p, *p);
29 	),
30 
31 	TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
32 );
33 
34 DECLARE_EVENT_CLASS(fs_str,
35 	TP_PROTO(struct bch_fs *c, const char *str),
36 	TP_ARGS(c, str),
37 
38 	TP_STRUCT__entry(
39 		__field(dev_t,		dev			)
40 		__string(str,		str			)
41 	),
42 
43 	TP_fast_assign(
44 		__entry->dev		= c->dev;
45 		__assign_str(str);
46 	),
47 
48 	TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
49 );
50 
51 DECLARE_EVENT_CLASS(trans_str,
52 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
53 	TP_ARGS(trans, caller_ip, str),
54 
55 	TP_STRUCT__entry(
56 		__field(dev_t,		dev			)
57 		__array(char,		trans_fn, 32		)
58 		__field(unsigned long,	caller_ip		)
59 		__string(str,		str			)
60 	),
61 
62 	TP_fast_assign(
63 		__entry->dev		= trans->c->dev;
64 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
65 		__entry->caller_ip		= caller_ip;
66 		__assign_str(str);
67 	),
68 
69 	TP_printk("%d,%d %s %pS %s",
70 		  MAJOR(__entry->dev), MINOR(__entry->dev),
71 		  __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
72 );
73 
74 DECLARE_EVENT_CLASS(trans_str_nocaller,
75 	TP_PROTO(struct btree_trans *trans, const char *str),
76 	TP_ARGS(trans, str),
77 
78 	TP_STRUCT__entry(
79 		__field(dev_t,		dev			)
80 		__array(char,		trans_fn, 32		)
81 		__string(str,		str			)
82 	),
83 
84 	TP_fast_assign(
85 		__entry->dev		= trans->c->dev;
86 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
87 		__assign_str(str);
88 	),
89 
90 	TP_printk("%d,%d %s %s",
91 		  MAJOR(__entry->dev), MINOR(__entry->dev),
92 		  __entry->trans_fn, __get_str(str))
93 );
94 
95 DECLARE_EVENT_CLASS(btree_node_nofs,
96 	TP_PROTO(struct bch_fs *c, struct btree *b),
97 	TP_ARGS(c, b),
98 
99 	TP_STRUCT__entry(
100 		__field(dev_t,		dev			)
101 		__field(u8,		level			)
102 		__field(u8,		btree_id		)
103 		TRACE_BPOS_entries(pos)
104 	),
105 
106 	TP_fast_assign(
107 		__entry->dev		= c->dev;
108 		__entry->level		= b->c.level;
109 		__entry->btree_id	= b->c.btree_id;
110 		TRACE_BPOS_assign(pos, b->key.k.p);
111 	),
112 
113 	TP_printk("%d,%d %u %s %llu:%llu:%u",
114 		  MAJOR(__entry->dev), MINOR(__entry->dev),
115 		  __entry->level,
116 		  bch2_btree_id_str(__entry->btree_id),
117 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
118 );
119 
120 DECLARE_EVENT_CLASS(btree_node,
121 	TP_PROTO(struct btree_trans *trans, struct btree *b),
122 	TP_ARGS(trans, b),
123 
124 	TP_STRUCT__entry(
125 		__field(dev_t,		dev			)
126 		__array(char,		trans_fn, 32		)
127 		__field(u8,		level			)
128 		__field(u8,		btree_id		)
129 		TRACE_BPOS_entries(pos)
130 	),
131 
132 	TP_fast_assign(
133 		__entry->dev		= trans->c->dev;
134 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
135 		__entry->level		= b->c.level;
136 		__entry->btree_id	= b->c.btree_id;
137 		TRACE_BPOS_assign(pos, b->key.k.p);
138 	),
139 
140 	TP_printk("%d,%d %s %u %s %llu:%llu:%u",
141 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
142 		  __entry->level,
143 		  bch2_btree_id_str(__entry->btree_id),
144 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
145 );
146 
147 DECLARE_EVENT_CLASS(bch_fs,
148 	TP_PROTO(struct bch_fs *c),
149 	TP_ARGS(c),
150 
151 	TP_STRUCT__entry(
152 		__field(dev_t,		dev			)
153 	),
154 
155 	TP_fast_assign(
156 		__entry->dev		= c->dev;
157 	),
158 
159 	TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
160 );
161 
162 DECLARE_EVENT_CLASS(btree_trans,
163 	TP_PROTO(struct btree_trans *trans),
164 	TP_ARGS(trans),
165 
166 	TP_STRUCT__entry(
167 		__field(dev_t,		dev			)
168 		__array(char,		trans_fn, 32		)
169 	),
170 
171 	TP_fast_assign(
172 		__entry->dev		= trans->c->dev;
173 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
174 	),
175 
176 	TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
177 );
178 
179 DECLARE_EVENT_CLASS(bio,
180 	TP_PROTO(struct bio *bio),
181 	TP_ARGS(bio),
182 
183 	TP_STRUCT__entry(
184 		__field(dev_t,		dev			)
185 		__field(sector_t,	sector			)
186 		__field(unsigned int,	nr_sector		)
187 		__array(char,		rwbs,	6		)
188 	),
189 
190 	TP_fast_assign(
191 		__entry->dev		= bio->bi_bdev ? bio_dev(bio) : 0;
192 		__entry->sector		= bio->bi_iter.bi_sector;
193 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
194 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
195 	),
196 
197 	TP_printk("%d,%d  %s %llu + %u",
198 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
199 		  (unsigned long long)__entry->sector, __entry->nr_sector)
200 );
201 
202 /* disk_accounting.c */
203 
204 TRACE_EVENT(accounting_mem_insert,
205 	TP_PROTO(struct bch_fs *c, const char *acc),
206 	TP_ARGS(c, acc),
207 
208 	TP_STRUCT__entry(
209 		__field(dev_t,		dev			)
210 		__field(unsigned,	new_nr			)
211 		__string(acc,		acc			)
212 	),
213 
214 	TP_fast_assign(
215 		__entry->dev		= c->dev;
216 		__entry->new_nr		= c->accounting.k.nr;
217 		__assign_str(acc);
218 	),
219 
220 	TP_printk("%d,%d entries %u added %s",
221 		  MAJOR(__entry->dev), MINOR(__entry->dev),
222 		  __entry->new_nr,
223 		  __get_str(acc))
224 );
225 
226 /* fs.c: */
227 TRACE_EVENT(bch2_sync_fs,
228 	TP_PROTO(struct super_block *sb, int wait),
229 
230 	TP_ARGS(sb, wait),
231 
232 	TP_STRUCT__entry(
233 		__field(	dev_t,	dev			)
234 		__field(	int,	wait			)
235 
236 	),
237 
238 	TP_fast_assign(
239 		__entry->dev	= sb->s_dev;
240 		__entry->wait	= wait;
241 	),
242 
243 	TP_printk("dev %d,%d wait %d",
244 		  MAJOR(__entry->dev), MINOR(__entry->dev),
245 		  __entry->wait)
246 );
247 
248 /* fs-io.c: */
249 TRACE_EVENT(bch2_fsync,
250 	TP_PROTO(struct file *file, int datasync),
251 
252 	TP_ARGS(file, datasync),
253 
254 	TP_STRUCT__entry(
255 		__field(	dev_t,	dev			)
256 		__field(	ino_t,	ino			)
257 		__field(	ino_t,	parent			)
258 		__field(	int,	datasync		)
259 	),
260 
261 	TP_fast_assign(
262 		struct dentry *dentry = file->f_path.dentry;
263 
264 		__entry->dev		= dentry->d_sb->s_dev;
265 		__entry->ino		= d_inode(dentry)->i_ino;
266 		__entry->parent		= d_inode(dentry->d_parent)->i_ino;
267 		__entry->datasync	= datasync;
268 	),
269 
270 	TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
271 		  MAJOR(__entry->dev), MINOR(__entry->dev),
272 		  (unsigned long) __entry->ino,
273 		  (unsigned long) __entry->parent, __entry->datasync)
274 );
275 
276 /* super-io.c: */
277 TRACE_EVENT(write_super,
278 	TP_PROTO(struct bch_fs *c, unsigned long ip),
279 	TP_ARGS(c, ip),
280 
281 	TP_STRUCT__entry(
282 		__field(dev_t,		dev	)
283 		__field(unsigned long,	ip	)
284 	),
285 
286 	TP_fast_assign(
287 		__entry->dev		= c->dev;
288 		__entry->ip		= ip;
289 	),
290 
291 	TP_printk("%d,%d for %pS",
292 		  MAJOR(__entry->dev), MINOR(__entry->dev),
293 		  (void *) __entry->ip)
294 );
295 
296 /* io.c: */
297 
298 DEFINE_EVENT(bio, read_promote,
299 	TP_PROTO(struct bio *bio),
300 	TP_ARGS(bio)
301 );
302 
303 TRACE_EVENT(read_nopromote,
304 	TP_PROTO(struct bch_fs *c, int ret),
305 	TP_ARGS(c, ret),
306 
307 	TP_STRUCT__entry(
308 		__field(dev_t,		dev		)
309 		__array(char,		ret, 32		)
310 	),
311 
312 	TP_fast_assign(
313 		__entry->dev		= c->dev;
314 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
315 	),
316 
317 	TP_printk("%d,%d ret %s",
318 		  MAJOR(__entry->dev), MINOR(__entry->dev),
319 		  __entry->ret)
320 );
321 
322 DEFINE_EVENT(bio, read_bounce,
323 	TP_PROTO(struct bio *bio),
324 	TP_ARGS(bio)
325 );
326 
327 DEFINE_EVENT(bio, read_split,
328 	TP_PROTO(struct bio *bio),
329 	TP_ARGS(bio)
330 );
331 
332 DEFINE_EVENT(bio, read_retry,
333 	TP_PROTO(struct bio *bio),
334 	TP_ARGS(bio)
335 );
336 
337 DEFINE_EVENT(bio, read_reuse_race,
338 	TP_PROTO(struct bio *bio),
339 	TP_ARGS(bio)
340 );
341 
342 /* Journal */
343 
344 DEFINE_EVENT(bch_fs, journal_full,
345 	TP_PROTO(struct bch_fs *c),
346 	TP_ARGS(c)
347 );
348 
349 DEFINE_EVENT(fs_str, journal_entry_full,
350 	TP_PROTO(struct bch_fs *c, const char *str),
351 	TP_ARGS(c, str)
352 );
353 
354 DEFINE_EVENT(fs_str, journal_entry_close,
355 	TP_PROTO(struct bch_fs *c, const char *str),
356 	TP_ARGS(c, str)
357 );
358 
359 DEFINE_EVENT(bio, journal_write,
360 	TP_PROTO(struct bio *bio),
361 	TP_ARGS(bio)
362 );
363 
364 TRACE_EVENT(journal_reclaim_start,
365 	TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
366 		 u64 min_nr, u64 min_key_cache,
367 		 u64 btree_cache_dirty, u64 btree_cache_total,
368 		 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
369 	TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
370 		btree_cache_dirty, btree_cache_total,
371 		btree_key_cache_dirty, btree_key_cache_total),
372 
373 	TP_STRUCT__entry(
374 		__field(dev_t,		dev			)
375 		__field(bool,		direct			)
376 		__field(bool,		kicked			)
377 		__field(u64,		min_nr			)
378 		__field(u64,		min_key_cache		)
379 		__field(u64,		btree_cache_dirty	)
380 		__field(u64,		btree_cache_total	)
381 		__field(u64,		btree_key_cache_dirty	)
382 		__field(u64,		btree_key_cache_total	)
383 	),
384 
385 	TP_fast_assign(
386 		__entry->dev			= c->dev;
387 		__entry->direct			= direct;
388 		__entry->kicked			= kicked;
389 		__entry->min_nr			= min_nr;
390 		__entry->min_key_cache		= min_key_cache;
391 		__entry->btree_cache_dirty	= btree_cache_dirty;
392 		__entry->btree_cache_total	= btree_cache_total;
393 		__entry->btree_key_cache_dirty	= btree_key_cache_dirty;
394 		__entry->btree_key_cache_total	= btree_key_cache_total;
395 	),
396 
397 	TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
398 		  MAJOR(__entry->dev), MINOR(__entry->dev),
399 		  __entry->direct,
400 		  __entry->kicked,
401 		  __entry->min_nr,
402 		  __entry->min_key_cache,
403 		  __entry->btree_cache_dirty,
404 		  __entry->btree_cache_total,
405 		  __entry->btree_key_cache_dirty,
406 		  __entry->btree_key_cache_total)
407 );
408 
409 TRACE_EVENT(journal_reclaim_finish,
410 	TP_PROTO(struct bch_fs *c, u64 nr_flushed),
411 	TP_ARGS(c, nr_flushed),
412 
413 	TP_STRUCT__entry(
414 		__field(dev_t,		dev			)
415 		__field(u64,		nr_flushed		)
416 	),
417 
418 	TP_fast_assign(
419 		__entry->dev		= c->dev;
420 		__entry->nr_flushed	= nr_flushed;
421 	),
422 
423 	TP_printk("%d,%d flushed %llu",
424 		  MAJOR(__entry->dev), MINOR(__entry->dev),
425 		  __entry->nr_flushed)
426 );
427 
428 /* bset.c: */
429 
430 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
431 	TP_PROTO(const struct bpos *p),
432 	TP_ARGS(p)
433 );
434 
435 /* Btree cache: */
436 
437 TRACE_EVENT(btree_cache_scan,
438 	TP_PROTO(long nr_to_scan, long can_free, long ret),
439 	TP_ARGS(nr_to_scan, can_free, ret),
440 
441 	TP_STRUCT__entry(
442 		__field(long,	nr_to_scan		)
443 		__field(long,	can_free		)
444 		__field(long,	ret			)
445 	),
446 
447 	TP_fast_assign(
448 		__entry->nr_to_scan	= nr_to_scan;
449 		__entry->can_free	= can_free;
450 		__entry->ret		= ret;
451 	),
452 
453 	TP_printk("scanned for %li nodes, can free %li, ret %li",
454 		  __entry->nr_to_scan, __entry->can_free, __entry->ret)
455 );
456 
457 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
458 	TP_PROTO(struct bch_fs *c, struct btree *b),
459 	TP_ARGS(c, b)
460 );
461 
462 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
463 	TP_PROTO(struct btree_trans *trans),
464 	TP_ARGS(trans)
465 );
466 
467 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
468 	TP_PROTO(struct btree_trans *trans),
469 	TP_ARGS(trans)
470 );
471 
472 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
473 	TP_PROTO(struct btree_trans *trans),
474 	TP_ARGS(trans)
475 );
476 
477 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
478 	TP_PROTO(struct btree_trans *trans),
479 	TP_ARGS(trans)
480 );
481 
482 /* Btree */
483 
484 DEFINE_EVENT(btree_node, btree_node_read,
485 	TP_PROTO(struct btree_trans *trans, struct btree *b),
486 	TP_ARGS(trans, b)
487 );
488 
489 TRACE_EVENT(btree_node_write,
490 	TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
491 	TP_ARGS(b, bytes, sectors),
492 
493 	TP_STRUCT__entry(
494 		__field(enum btree_node_type,	type)
495 		__field(unsigned,	bytes			)
496 		__field(unsigned,	sectors			)
497 	),
498 
499 	TP_fast_assign(
500 		__entry->type	= btree_node_type(b);
501 		__entry->bytes	= bytes;
502 		__entry->sectors = sectors;
503 	),
504 
505 	TP_printk("bkey type %u bytes %u sectors %u",
506 		  __entry->type , __entry->bytes, __entry->sectors)
507 );
508 
509 DEFINE_EVENT(btree_node, btree_node_alloc,
510 	TP_PROTO(struct btree_trans *trans, struct btree *b),
511 	TP_ARGS(trans, b)
512 );
513 
514 DEFINE_EVENT(btree_node, btree_node_free,
515 	TP_PROTO(struct btree_trans *trans, struct btree *b),
516 	TP_ARGS(trans, b)
517 );
518 
519 TRACE_EVENT(btree_reserve_get_fail,
520 	TP_PROTO(const char *trans_fn,
521 		 unsigned long caller_ip,
522 		 size_t required,
523 		 int ret),
524 	TP_ARGS(trans_fn, caller_ip, required, ret),
525 
526 	TP_STRUCT__entry(
527 		__array(char,			trans_fn, 32	)
528 		__field(unsigned long,		caller_ip	)
529 		__field(size_t,			required	)
530 		__array(char,			ret, 32		)
531 	),
532 
533 	TP_fast_assign(
534 		strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
535 		__entry->caller_ip	= caller_ip;
536 		__entry->required	= required;
537 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
538 	),
539 
540 	TP_printk("%s %pS required %zu ret %s",
541 		  __entry->trans_fn,
542 		  (void *) __entry->caller_ip,
543 		  __entry->required,
544 		  __entry->ret)
545 );
546 
547 DEFINE_EVENT(btree_node, btree_node_compact,
548 	TP_PROTO(struct btree_trans *trans, struct btree *b),
549 	TP_ARGS(trans, b)
550 );
551 
552 DEFINE_EVENT(btree_node, btree_node_merge,
553 	TP_PROTO(struct btree_trans *trans, struct btree *b),
554 	TP_ARGS(trans, b)
555 );
556 
557 DEFINE_EVENT(btree_node, btree_node_split,
558 	TP_PROTO(struct btree_trans *trans, struct btree *b),
559 	TP_ARGS(trans, b)
560 );
561 
562 DEFINE_EVENT(btree_node, btree_node_rewrite,
563 	TP_PROTO(struct btree_trans *trans, struct btree *b),
564 	TP_ARGS(trans, b)
565 );
566 
567 DEFINE_EVENT(btree_node, btree_node_set_root,
568 	TP_PROTO(struct btree_trans *trans, struct btree *b),
569 	TP_ARGS(trans, b)
570 );
571 
572 TRACE_EVENT(btree_path_relock_fail,
573 	TP_PROTO(struct btree_trans *trans,
574 		 unsigned long caller_ip,
575 		 struct btree_path *path,
576 		 unsigned level),
577 	TP_ARGS(trans, caller_ip, path, level),
578 
579 	TP_STRUCT__entry(
580 		__array(char,			trans_fn, 32	)
581 		__field(unsigned long,		caller_ip	)
582 		__field(u8,			btree_id	)
583 		__field(u8,			level		)
584 		__field(u8,			path_idx)
585 		TRACE_BPOS_entries(pos)
586 		__array(char,			node, 24	)
587 		__field(u8,			self_read_count	)
588 		__field(u8,			self_intent_count)
589 		__field(u8,			read_count	)
590 		__field(u8,			intent_count	)
591 		__field(u32,			iter_lock_seq	)
592 		__field(u32,			node_lock_seq	)
593 	),
594 
595 	TP_fast_assign(
596 		struct btree *b = btree_path_node(path, level);
597 		struct six_lock_count c;
598 
599 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
600 		__entry->caller_ip		= caller_ip;
601 		__entry->btree_id		= path->btree_id;
602 		__entry->level			= level;
603 		__entry->path_idx		= path - trans->paths;
604 		TRACE_BPOS_assign(pos, path->pos);
605 
606 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
607 		__entry->self_read_count	= c.n[SIX_LOCK_read];
608 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
609 
610 		if (IS_ERR(b)) {
611 			strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
612 		} else {
613 			c = six_lock_counts(&path->l[level].b->c.lock);
614 			__entry->read_count	= c.n[SIX_LOCK_read];
615 			__entry->intent_count	= c.n[SIX_LOCK_intent];
616 			scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
617 		}
618 		__entry->iter_lock_seq		= path->l[level].lock_seq;
619 		__entry->node_lock_seq		= is_btree_node(path, level)
620 			? six_lock_seq(&path->l[level].b->c.lock)
621 			: 0;
622 	),
623 
624 	TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
625 		  __entry->trans_fn,
626 		  (void *) __entry->caller_ip,
627 		  __entry->path_idx,
628 		  bch2_btree_id_str(__entry->btree_id),
629 		  __entry->pos_inode,
630 		  __entry->pos_offset,
631 		  __entry->pos_snapshot,
632 		  __entry->level,
633 		  __entry->node,
634 		  __entry->self_read_count,
635 		  __entry->self_intent_count,
636 		  __entry->read_count,
637 		  __entry->intent_count,
638 		  __entry->iter_lock_seq,
639 		  __entry->node_lock_seq)
640 );
641 
642 TRACE_EVENT(btree_path_upgrade_fail,
643 	TP_PROTO(struct btree_trans *trans,
644 		 unsigned long caller_ip,
645 		 struct btree_path *path,
646 		 unsigned level),
647 	TP_ARGS(trans, caller_ip, path, level),
648 
649 	TP_STRUCT__entry(
650 		__array(char,			trans_fn, 32	)
651 		__field(unsigned long,		caller_ip	)
652 		__field(u8,			btree_id	)
653 		__field(u8,			level		)
654 		__field(u8,			path_idx)
655 		TRACE_BPOS_entries(pos)
656 		__field(u8,			locked		)
657 		__field(u8,			self_read_count	)
658 		__field(u8,			self_intent_count)
659 		__field(u8,			read_count	)
660 		__field(u8,			intent_count	)
661 		__field(u32,			iter_lock_seq	)
662 		__field(u32,			node_lock_seq	)
663 	),
664 
665 	TP_fast_assign(
666 		struct six_lock_count c;
667 
668 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
669 		__entry->caller_ip		= caller_ip;
670 		__entry->btree_id		= path->btree_id;
671 		__entry->level			= level;
672 		__entry->path_idx		= path - trans->paths;
673 		TRACE_BPOS_assign(pos, path->pos);
674 		__entry->locked			= btree_node_locked(path, level);
675 
676 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
677 		__entry->self_read_count	= c.n[SIX_LOCK_read];
678 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
679 		c = six_lock_counts(&path->l[level].b->c.lock);
680 		__entry->read_count		= c.n[SIX_LOCK_read];
681 		__entry->intent_count		= c.n[SIX_LOCK_intent];
682 		__entry->iter_lock_seq		= path->l[level].lock_seq;
683 		__entry->node_lock_seq		= is_btree_node(path, level)
684 			? six_lock_seq(&path->l[level].b->c.lock)
685 			: 0;
686 	),
687 
688 	TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
689 		  __entry->trans_fn,
690 		  (void *) __entry->caller_ip,
691 		  __entry->path_idx,
692 		  bch2_btree_id_str(__entry->btree_id),
693 		  __entry->pos_inode,
694 		  __entry->pos_offset,
695 		  __entry->pos_snapshot,
696 		  __entry->level,
697 		  __entry->locked,
698 		  __entry->self_read_count,
699 		  __entry->self_intent_count,
700 		  __entry->read_count,
701 		  __entry->intent_count,
702 		  __entry->iter_lock_seq,
703 		  __entry->node_lock_seq)
704 );
705 
706 /* Garbage collection */
707 
708 DEFINE_EVENT(bch_fs, gc_gens_start,
709 	TP_PROTO(struct bch_fs *c),
710 	TP_ARGS(c)
711 );
712 
713 DEFINE_EVENT(bch_fs, gc_gens_end,
714 	TP_PROTO(struct bch_fs *c),
715 	TP_ARGS(c)
716 );
717 
718 /* Allocator */
719 
720 DEFINE_EVENT(fs_str, bucket_alloc,
721 	TP_PROTO(struct bch_fs *c, const char *str),
722 	TP_ARGS(c, str)
723 );
724 
725 DEFINE_EVENT(fs_str, bucket_alloc_fail,
726 	TP_PROTO(struct bch_fs *c, const char *str),
727 	TP_ARGS(c, str)
728 );
729 
730 TRACE_EVENT(discard_buckets,
731 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
732 		 u64 need_journal_commit, u64 discarded, const char *err),
733 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
734 
735 	TP_STRUCT__entry(
736 		__field(dev_t,		dev			)
737 		__field(u64,		seen			)
738 		__field(u64,		open			)
739 		__field(u64,		need_journal_commit	)
740 		__field(u64,		discarded		)
741 		__array(char,		err,	16		)
742 	),
743 
744 	TP_fast_assign(
745 		__entry->dev			= c->dev;
746 		__entry->seen			= seen;
747 		__entry->open			= open;
748 		__entry->need_journal_commit	= need_journal_commit;
749 		__entry->discarded		= discarded;
750 		strscpy(__entry->err, err, sizeof(__entry->err));
751 	),
752 
753 	TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
754 		  MAJOR(__entry->dev), MINOR(__entry->dev),
755 		  __entry->seen,
756 		  __entry->open,
757 		  __entry->need_journal_commit,
758 		  __entry->discarded,
759 		  __entry->err)
760 );
761 
762 TRACE_EVENT(bucket_invalidate,
763 	TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
764 	TP_ARGS(c, dev, bucket, sectors),
765 
766 	TP_STRUCT__entry(
767 		__field(dev_t,		dev			)
768 		__field(u32,		dev_idx			)
769 		__field(u32,		sectors			)
770 		__field(u64,		bucket			)
771 	),
772 
773 	TP_fast_assign(
774 		__entry->dev		= c->dev;
775 		__entry->dev_idx	= dev;
776 		__entry->sectors	= sectors;
777 		__entry->bucket		= bucket;
778 	),
779 
780 	TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
781 		  MAJOR(__entry->dev), MINOR(__entry->dev),
782 		  __entry->dev_idx, __entry->bucket,
783 		  __entry->sectors)
784 );
785 
786 /* Moving IO */
787 
788 TRACE_EVENT(bucket_evacuate,
789 	TP_PROTO(struct bch_fs *c, struct bpos *bucket),
790 	TP_ARGS(c, bucket),
791 
792 	TP_STRUCT__entry(
793 		__field(dev_t,		dev			)
794 		__field(u32,		dev_idx			)
795 		__field(u64,		bucket			)
796 	),
797 
798 	TP_fast_assign(
799 		__entry->dev		= c->dev;
800 		__entry->dev_idx	= bucket->inode;
801 		__entry->bucket		= bucket->offset;
802 	),
803 
804 	TP_printk("%d:%d %u:%llu",
805 		  MAJOR(__entry->dev), MINOR(__entry->dev),
806 		  __entry->dev_idx, __entry->bucket)
807 );
808 
809 DEFINE_EVENT(fs_str, move_extent,
810 	TP_PROTO(struct bch_fs *c, const char *str),
811 	TP_ARGS(c, str)
812 );
813 
814 DEFINE_EVENT(fs_str, move_extent_read,
815 	TP_PROTO(struct bch_fs *c, const char *str),
816 	TP_ARGS(c, str)
817 );
818 
819 DEFINE_EVENT(fs_str, move_extent_write,
820 	TP_PROTO(struct bch_fs *c, const char *str),
821 	TP_ARGS(c, str)
822 );
823 
824 DEFINE_EVENT(fs_str, move_extent_finish,
825 	TP_PROTO(struct bch_fs *c, const char *str),
826 	TP_ARGS(c, str)
827 );
828 
829 DEFINE_EVENT(fs_str, move_extent_fail,
830 	TP_PROTO(struct bch_fs *c, const char *str),
831 	TP_ARGS(c, str)
832 );
833 
834 DEFINE_EVENT(fs_str, move_extent_start_fail,
835 	TP_PROTO(struct bch_fs *c, const char *str),
836 	TP_ARGS(c, str)
837 );
838 
839 TRACE_EVENT(move_data,
840 	TP_PROTO(struct bch_fs *c,
841 		 struct bch_move_stats *stats),
842 	TP_ARGS(c, stats),
843 
844 	TP_STRUCT__entry(
845 		__field(dev_t,		dev		)
846 		__field(u64,		keys_moved	)
847 		__field(u64,		keys_raced	)
848 		__field(u64,		sectors_seen	)
849 		__field(u64,		sectors_moved	)
850 		__field(u64,		sectors_raced	)
851 	),
852 
853 	TP_fast_assign(
854 		__entry->dev		= c->dev;
855 		__entry->keys_moved	= atomic64_read(&stats->keys_moved);
856 		__entry->keys_raced	= atomic64_read(&stats->keys_raced);
857 		__entry->sectors_seen	= atomic64_read(&stats->sectors_seen);
858 		__entry->sectors_moved	= atomic64_read(&stats->sectors_moved);
859 		__entry->sectors_raced	= atomic64_read(&stats->sectors_raced);
860 	),
861 
862 	TP_printk("%d,%d keys moved %llu raced %llu"
863 		  "sectors seen %llu moved %llu raced %llu",
864 		  MAJOR(__entry->dev), MINOR(__entry->dev),
865 		  __entry->keys_moved,
866 		  __entry->keys_raced,
867 		  __entry->sectors_seen,
868 		  __entry->sectors_moved,
869 		  __entry->sectors_raced)
870 );
871 
872 TRACE_EVENT(evacuate_bucket,
873 	TP_PROTO(struct bch_fs *c, struct bpos *bucket,
874 		 unsigned sectors, unsigned bucket_size,
875 		 int ret),
876 	TP_ARGS(c, bucket, sectors, bucket_size, ret),
877 
878 	TP_STRUCT__entry(
879 		__field(dev_t,		dev		)
880 		__field(u64,		member		)
881 		__field(u64,		bucket		)
882 		__field(u32,		sectors		)
883 		__field(u32,		bucket_size	)
884 		__field(int,		ret		)
885 	),
886 
887 	TP_fast_assign(
888 		__entry->dev			= c->dev;
889 		__entry->member			= bucket->inode;
890 		__entry->bucket			= bucket->offset;
891 		__entry->sectors		= sectors;
892 		__entry->bucket_size		= bucket_size;
893 		__entry->ret			= ret;
894 	),
895 
896 	TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i",
897 		  MAJOR(__entry->dev), MINOR(__entry->dev),
898 		  __entry->member, __entry->bucket,
899 		  __entry->sectors, __entry->bucket_size,
900 		  __entry->ret)
901 );
902 
903 TRACE_EVENT(copygc,
904 	TP_PROTO(struct bch_fs *c,
905 		 u64 sectors_moved, u64 sectors_not_moved,
906 		 u64 buckets_moved, u64 buckets_not_moved),
907 	TP_ARGS(c,
908 		sectors_moved, sectors_not_moved,
909 		buckets_moved, buckets_not_moved),
910 
911 	TP_STRUCT__entry(
912 		__field(dev_t,		dev			)
913 		__field(u64,		sectors_moved		)
914 		__field(u64,		sectors_not_moved	)
915 		__field(u64,		buckets_moved		)
916 		__field(u64,		buckets_not_moved	)
917 	),
918 
919 	TP_fast_assign(
920 		__entry->dev			= c->dev;
921 		__entry->sectors_moved		= sectors_moved;
922 		__entry->sectors_not_moved	= sectors_not_moved;
923 		__entry->buckets_moved		= buckets_moved;
924 		__entry->buckets_not_moved = buckets_moved;
925 	),
926 
927 	TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
928 		  MAJOR(__entry->dev), MINOR(__entry->dev),
929 		  __entry->sectors_moved, __entry->sectors_not_moved,
930 		  __entry->buckets_moved, __entry->buckets_not_moved)
931 );
932 
933 TRACE_EVENT(copygc_wait,
934 	TP_PROTO(struct bch_fs *c,
935 		 u64 wait_amount, u64 until),
936 	TP_ARGS(c, wait_amount, until),
937 
938 	TP_STRUCT__entry(
939 		__field(dev_t,		dev			)
940 		__field(u64,		wait_amount		)
941 		__field(u64,		until			)
942 	),
943 
944 	TP_fast_assign(
945 		__entry->dev		= c->dev;
946 		__entry->wait_amount	= wait_amount;
947 		__entry->until		= until;
948 	),
949 
950 	TP_printk("%d,%u waiting for %llu sectors until %llu",
951 		  MAJOR(__entry->dev), MINOR(__entry->dev),
952 		  __entry->wait_amount, __entry->until)
953 );
954 
955 /* btree transactions: */
956 
957 DECLARE_EVENT_CLASS(transaction_event,
958 	TP_PROTO(struct btree_trans *trans,
959 		 unsigned long caller_ip),
960 	TP_ARGS(trans, caller_ip),
961 
962 	TP_STRUCT__entry(
963 		__array(char,			trans_fn, 32	)
964 		__field(unsigned long,		caller_ip	)
965 	),
966 
967 	TP_fast_assign(
968 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
969 		__entry->caller_ip		= caller_ip;
970 	),
971 
972 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
973 );
974 
975 DEFINE_EVENT(transaction_event,	transaction_commit,
976 	TP_PROTO(struct btree_trans *trans,
977 		 unsigned long caller_ip),
978 	TP_ARGS(trans, caller_ip)
979 );
980 
981 DEFINE_EVENT(transaction_event,	trans_restart_injected,
982 	TP_PROTO(struct btree_trans *trans,
983 		 unsigned long caller_ip),
984 	TP_ARGS(trans, caller_ip)
985 );
986 
987 TRACE_EVENT(trans_restart_split_race,
988 	TP_PROTO(struct btree_trans *trans,
989 		 unsigned long caller_ip,
990 		 struct btree *b),
991 	TP_ARGS(trans, caller_ip, b),
992 
993 	TP_STRUCT__entry(
994 		__array(char,			trans_fn, 32	)
995 		__field(unsigned long,		caller_ip	)
996 		__field(u8,			level		)
997 		__field(u16,			written		)
998 		__field(u16,			blocks		)
999 		__field(u16,			u64s_remaining	)
1000 	),
1001 
1002 	TP_fast_assign(
1003 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1004 		__entry->caller_ip		= caller_ip;
1005 		__entry->level		= b->c.level;
1006 		__entry->written	= b->written;
1007 		__entry->blocks		= btree_blocks(trans->c);
1008 		__entry->u64s_remaining	= bch2_btree_keys_u64s_remaining(b);
1009 	),
1010 
1011 	TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
1012 		  __entry->trans_fn, (void *) __entry->caller_ip,
1013 		  __entry->level,
1014 		  __entry->written, __entry->blocks,
1015 		  __entry->u64s_remaining)
1016 );
1017 
1018 TRACE_EVENT(trans_blocked_journal_reclaim,
1019 	TP_PROTO(struct btree_trans *trans,
1020 		 unsigned long caller_ip),
1021 	TP_ARGS(trans, caller_ip),
1022 
1023 	TP_STRUCT__entry(
1024 		__array(char,			trans_fn, 32	)
1025 		__field(unsigned long,		caller_ip	)
1026 
1027 		__field(unsigned long,		key_cache_nr_keys	)
1028 		__field(unsigned long,		key_cache_nr_dirty	)
1029 		__field(long,			must_wait		)
1030 	),
1031 
1032 	TP_fast_assign(
1033 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1034 		__entry->caller_ip		= caller_ip;
1035 		__entry->key_cache_nr_keys	= atomic_long_read(&trans->c->btree_key_cache.nr_keys);
1036 		__entry->key_cache_nr_dirty	= atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
1037 		__entry->must_wait		= __bch2_btree_key_cache_must_wait(trans->c);
1038 	),
1039 
1040 	TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
1041 		  __entry->trans_fn, (void *) __entry->caller_ip,
1042 		  __entry->key_cache_nr_keys,
1043 		  __entry->key_cache_nr_dirty,
1044 		  __entry->must_wait)
1045 );
1046 
1047 TRACE_EVENT(trans_restart_journal_preres_get,
1048 	TP_PROTO(struct btree_trans *trans,
1049 		 unsigned long caller_ip,
1050 		 unsigned flags),
1051 	TP_ARGS(trans, caller_ip, flags),
1052 
1053 	TP_STRUCT__entry(
1054 		__array(char,			trans_fn, 32	)
1055 		__field(unsigned long,		caller_ip	)
1056 		__field(unsigned,		flags		)
1057 	),
1058 
1059 	TP_fast_assign(
1060 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1061 		__entry->caller_ip		= caller_ip;
1062 		__entry->flags			= flags;
1063 	),
1064 
1065 	TP_printk("%s %pS %x", __entry->trans_fn,
1066 		  (void *) __entry->caller_ip,
1067 		  __entry->flags)
1068 );
1069 
1070 DEFINE_EVENT(transaction_event,	trans_restart_fault_inject,
1071 	TP_PROTO(struct btree_trans *trans,
1072 		 unsigned long caller_ip),
1073 	TP_ARGS(trans, caller_ip)
1074 );
1075 
1076 DEFINE_EVENT(transaction_event,	trans_traverse_all,
1077 	TP_PROTO(struct btree_trans *trans,
1078 		 unsigned long caller_ip),
1079 	TP_ARGS(trans, caller_ip)
1080 );
1081 
1082 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_raced,
1083 	TP_PROTO(struct btree_trans *trans,
1084 		 unsigned long caller_ip),
1085 	TP_ARGS(trans, caller_ip)
1086 );
1087 
1088 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1089 	TP_PROTO(struct btree_trans *trans,
1090 		 unsigned long caller_ip,
1091 		 const char *paths),
1092 	TP_ARGS(trans, caller_ip, paths)
1093 );
1094 
1095 DECLARE_EVENT_CLASS(transaction_restart_iter,
1096 	TP_PROTO(struct btree_trans *trans,
1097 		 unsigned long caller_ip,
1098 		 struct btree_path *path),
1099 	TP_ARGS(trans, caller_ip, path),
1100 
1101 	TP_STRUCT__entry(
1102 		__array(char,			trans_fn, 32	)
1103 		__field(unsigned long,		caller_ip	)
1104 		__field(u8,			btree_id	)
1105 		TRACE_BPOS_entries(pos)
1106 	),
1107 
1108 	TP_fast_assign(
1109 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1110 		__entry->caller_ip		= caller_ip;
1111 		__entry->btree_id		= path->btree_id;
1112 		TRACE_BPOS_assign(pos, path->pos)
1113 	),
1114 
1115 	TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1116 		  __entry->trans_fn,
1117 		  (void *) __entry->caller_ip,
1118 		  bch2_btree_id_str(__entry->btree_id),
1119 		  __entry->pos_inode,
1120 		  __entry->pos_offset,
1121 		  __entry->pos_snapshot)
1122 );
1123 
1124 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_reused,
1125 	TP_PROTO(struct btree_trans *trans,
1126 		 unsigned long caller_ip,
1127 		 struct btree_path *path),
1128 	TP_ARGS(trans, caller_ip, path)
1129 );
1130 
1131 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_split,
1132 	TP_PROTO(struct btree_trans *trans,
1133 		 unsigned long caller_ip,
1134 		 struct btree_path *path),
1135 	TP_ARGS(trans, caller_ip, path)
1136 );
1137 
1138 TRACE_EVENT(trans_restart_upgrade,
1139 	TP_PROTO(struct btree_trans *trans,
1140 		 unsigned long caller_ip,
1141 		 struct btree_path *path,
1142 		 unsigned old_locks_want,
1143 		 unsigned new_locks_want,
1144 		 struct get_locks_fail *f),
1145 	TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1146 
1147 	TP_STRUCT__entry(
1148 		__array(char,			trans_fn, 32	)
1149 		__field(unsigned long,		caller_ip	)
1150 		__field(u8,			btree_id	)
1151 		__field(u8,			old_locks_want	)
1152 		__field(u8,			new_locks_want	)
1153 		__field(u8,			level		)
1154 		__field(u32,			path_seq	)
1155 		__field(u32,			node_seq	)
1156 		TRACE_BPOS_entries(pos)
1157 	),
1158 
1159 	TP_fast_assign(
1160 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1161 		__entry->caller_ip		= caller_ip;
1162 		__entry->btree_id		= path->btree_id;
1163 		__entry->old_locks_want		= old_locks_want;
1164 		__entry->new_locks_want		= new_locks_want;
1165 		__entry->level			= f->l;
1166 		__entry->path_seq		= path->l[f->l].lock_seq;
1167 		__entry->node_seq		= IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1168 		TRACE_BPOS_assign(pos, path->pos)
1169 	),
1170 
1171 	TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1172 		  __entry->trans_fn,
1173 		  (void *) __entry->caller_ip,
1174 		  bch2_btree_id_str(__entry->btree_id),
1175 		  __entry->pos_inode,
1176 		  __entry->pos_offset,
1177 		  __entry->pos_snapshot,
1178 		  __entry->old_locks_want,
1179 		  __entry->new_locks_want,
1180 		  __entry->level,
1181 		  __entry->path_seq,
1182 		  __entry->node_seq)
1183 );
1184 
1185 DEFINE_EVENT(trans_str,	trans_restart_relock,
1186 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1187 	TP_ARGS(trans, caller_ip, str)
1188 );
1189 
1190 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_next_node,
1191 	TP_PROTO(struct btree_trans *trans,
1192 		 unsigned long caller_ip,
1193 		 struct btree_path *path),
1194 	TP_ARGS(trans, caller_ip, path)
1195 );
1196 
1197 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_parent_for_fill,
1198 	TP_PROTO(struct btree_trans *trans,
1199 		 unsigned long caller_ip,
1200 		 struct btree_path *path),
1201 	TP_ARGS(trans, caller_ip, path)
1202 );
1203 
1204 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_after_fill,
1205 	TP_PROTO(struct btree_trans *trans,
1206 		 unsigned long caller_ip,
1207 		 struct btree_path *path),
1208 	TP_ARGS(trans, caller_ip, path)
1209 );
1210 
1211 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_upgrade,
1212 	TP_PROTO(struct btree_trans *trans,
1213 		 unsigned long caller_ip),
1214 	TP_ARGS(trans, caller_ip)
1215 );
1216 
1217 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_key_cache_fill,
1218 	TP_PROTO(struct btree_trans *trans,
1219 		 unsigned long caller_ip,
1220 		 struct btree_path *path),
1221 	TP_ARGS(trans, caller_ip, path)
1222 );
1223 
1224 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path,
1225 	TP_PROTO(struct btree_trans *trans,
1226 		 unsigned long caller_ip,
1227 		 struct btree_path *path),
1228 	TP_ARGS(trans, caller_ip, path)
1229 );
1230 
1231 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path_intent,
1232 	TP_PROTO(struct btree_trans *trans,
1233 		 unsigned long caller_ip,
1234 		 struct btree_path *path),
1235 	TP_ARGS(trans, caller_ip, path)
1236 );
1237 
1238 DEFINE_EVENT(transaction_restart_iter,	trans_restart_traverse,
1239 	TP_PROTO(struct btree_trans *trans,
1240 		 unsigned long caller_ip,
1241 		 struct btree_path *path),
1242 	TP_ARGS(trans, caller_ip, path)
1243 );
1244 
1245 DEFINE_EVENT(transaction_restart_iter,	trans_restart_memory_allocation_failure,
1246 	TP_PROTO(struct btree_trans *trans,
1247 		 unsigned long caller_ip,
1248 		 struct btree_path *path),
1249 	TP_ARGS(trans, caller_ip, path)
1250 );
1251 
1252 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1253 	TP_PROTO(struct btree_trans *trans,
1254 		 const char *cycle),
1255 	TP_ARGS(trans, cycle)
1256 );
1257 
1258 DEFINE_EVENT(transaction_event,	trans_restart_would_deadlock_recursion_limit,
1259 	TP_PROTO(struct btree_trans *trans,
1260 		 unsigned long caller_ip),
1261 	TP_ARGS(trans, caller_ip)
1262 );
1263 
1264 TRACE_EVENT(trans_restart_would_deadlock_write,
1265 	TP_PROTO(struct btree_trans *trans),
1266 	TP_ARGS(trans),
1267 
1268 	TP_STRUCT__entry(
1269 		__array(char,			trans_fn, 32	)
1270 	),
1271 
1272 	TP_fast_assign(
1273 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1274 	),
1275 
1276 	TP_printk("%s", __entry->trans_fn)
1277 );
1278 
1279 TRACE_EVENT(trans_restart_mem_realloced,
1280 	TP_PROTO(struct btree_trans *trans,
1281 		 unsigned long caller_ip,
1282 		 unsigned long bytes),
1283 	TP_ARGS(trans, caller_ip, bytes),
1284 
1285 	TP_STRUCT__entry(
1286 		__array(char,			trans_fn, 32	)
1287 		__field(unsigned long,		caller_ip	)
1288 		__field(unsigned long,		bytes		)
1289 	),
1290 
1291 	TP_fast_assign(
1292 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1293 		__entry->caller_ip	= caller_ip;
1294 		__entry->bytes		= bytes;
1295 	),
1296 
1297 	TP_printk("%s %pS bytes %lu",
1298 		  __entry->trans_fn,
1299 		  (void *) __entry->caller_ip,
1300 		  __entry->bytes)
1301 );
1302 
1303 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1304 	TP_PROTO(struct btree_trans *trans,
1305 		 unsigned long caller_ip,
1306 		 struct btree_path *path,
1307 		 unsigned old_u64s,
1308 		 unsigned new_u64s),
1309 	TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1310 
1311 	TP_STRUCT__entry(
1312 		__array(char,			trans_fn, 32	)
1313 		__field(unsigned long,		caller_ip	)
1314 		__field(enum btree_id,		btree_id	)
1315 		TRACE_BPOS_entries(pos)
1316 		__field(u32,			old_u64s	)
1317 		__field(u32,			new_u64s	)
1318 	),
1319 
1320 	TP_fast_assign(
1321 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1322 		__entry->caller_ip		= caller_ip;
1323 
1324 		__entry->btree_id	= path->btree_id;
1325 		TRACE_BPOS_assign(pos, path->pos);
1326 		__entry->old_u64s	= old_u64s;
1327 		__entry->new_u64s	= new_u64s;
1328 	),
1329 
1330 	TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1331 		  __entry->trans_fn,
1332 		  (void *) __entry->caller_ip,
1333 		  bch2_btree_id_str(__entry->btree_id),
1334 		  __entry->pos_inode,
1335 		  __entry->pos_offset,
1336 		  __entry->pos_snapshot,
1337 		  __entry->old_u64s,
1338 		  __entry->new_u64s)
1339 );
1340 
1341 DEFINE_EVENT(transaction_event,	trans_restart_write_buffer_flush,
1342 	TP_PROTO(struct btree_trans *trans,
1343 		 unsigned long caller_ip),
1344 	TP_ARGS(trans, caller_ip)
1345 );
1346 
1347 TRACE_EVENT(path_downgrade,
1348 	TP_PROTO(struct btree_trans *trans,
1349 		 unsigned long caller_ip,
1350 		 struct btree_path *path,
1351 		 unsigned old_locks_want),
1352 	TP_ARGS(trans, caller_ip, path, old_locks_want),
1353 
1354 	TP_STRUCT__entry(
1355 		__array(char,			trans_fn, 32	)
1356 		__field(unsigned long,		caller_ip	)
1357 		__field(unsigned,		old_locks_want	)
1358 		__field(unsigned,		new_locks_want	)
1359 		__field(unsigned,		btree		)
1360 		TRACE_BPOS_entries(pos)
1361 	),
1362 
1363 	TP_fast_assign(
1364 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1365 		__entry->caller_ip		= caller_ip;
1366 		__entry->old_locks_want		= old_locks_want;
1367 		__entry->new_locks_want		= path->locks_want;
1368 		__entry->btree			= path->btree_id;
1369 		TRACE_BPOS_assign(pos, path->pos);
1370 	),
1371 
1372 	TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1373 		  __entry->trans_fn,
1374 		  (void *) __entry->caller_ip,
1375 		  __entry->old_locks_want,
1376 		  __entry->new_locks_want,
1377 		  bch2_btree_id_str(__entry->btree),
1378 		  __entry->pos_inode,
1379 		  __entry->pos_offset,
1380 		  __entry->pos_snapshot)
1381 );
1382 
1383 TRACE_EVENT(key_cache_fill,
1384 	TP_PROTO(struct btree_trans *trans, const char *key),
1385 	TP_ARGS(trans, key),
1386 
1387 	TP_STRUCT__entry(
1388 		__array(char,		trans_fn, 32	)
1389 		__string(key,		key			)
1390 	),
1391 
1392 	TP_fast_assign(
1393 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1394 		__assign_str(key);
1395 	),
1396 
1397 	TP_printk("%s %s", __entry->trans_fn, __get_str(key))
1398 );
1399 
1400 TRACE_EVENT(write_buffer_flush,
1401 	TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1402 	TP_ARGS(trans, nr, skipped, fast, size),
1403 
1404 	TP_STRUCT__entry(
1405 		__field(size_t,		nr		)
1406 		__field(size_t,		skipped		)
1407 		__field(size_t,		fast		)
1408 		__field(size_t,		size		)
1409 	),
1410 
1411 	TP_fast_assign(
1412 		__entry->nr	= nr;
1413 		__entry->skipped = skipped;
1414 		__entry->fast	= fast;
1415 		__entry->size	= size;
1416 	),
1417 
1418 	TP_printk("%zu/%zu skipped %zu fast %zu",
1419 		  __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1420 );
1421 
1422 TRACE_EVENT(write_buffer_flush_sync,
1423 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1424 	TP_ARGS(trans, caller_ip),
1425 
1426 	TP_STRUCT__entry(
1427 		__array(char,			trans_fn, 32	)
1428 		__field(unsigned long,		caller_ip	)
1429 	),
1430 
1431 	TP_fast_assign(
1432 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1433 		__entry->caller_ip		= caller_ip;
1434 	),
1435 
1436 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1437 );
1438 
1439 TRACE_EVENT(write_buffer_flush_slowpath,
1440 	TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1441 	TP_ARGS(trans, slowpath, total),
1442 
1443 	TP_STRUCT__entry(
1444 		__field(size_t,		slowpath	)
1445 		__field(size_t,		total		)
1446 	),
1447 
1448 	TP_fast_assign(
1449 		__entry->slowpath	= slowpath;
1450 		__entry->total		= total;
1451 	),
1452 
1453 	TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1454 );
1455 
1456 TRACE_EVENT(write_buffer_maybe_flush,
1457 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *key),
1458 	TP_ARGS(trans, caller_ip, key),
1459 
1460 	TP_STRUCT__entry(
1461 		__array(char,			trans_fn, 32	)
1462 		__field(unsigned long,		caller_ip	)
1463 		__string(key,			key		)
1464 	),
1465 
1466 	TP_fast_assign(
1467 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1468 		__assign_str(key);
1469 	),
1470 
1471 	TP_printk("%s %pS %s", __entry->trans_fn, (void *) __entry->caller_ip, __get_str(key))
1472 );
1473 
1474 DEFINE_EVENT(fs_str, rebalance_extent,
1475 	TP_PROTO(struct bch_fs *c, const char *str),
1476 	TP_ARGS(c, str)
1477 );
1478 
1479 DEFINE_EVENT(fs_str, data_update,
1480 	TP_PROTO(struct bch_fs *c, const char *str),
1481 	TP_ARGS(c, str)
1482 );
1483 
1484 TRACE_EVENT(error_downcast,
1485 	TP_PROTO(int bch_err, int std_err, unsigned long ip),
1486 	TP_ARGS(bch_err, std_err, ip),
1487 
1488 	TP_STRUCT__entry(
1489 		__array(char,		bch_err, 32		)
1490 		__array(char,		std_err, 32		)
1491 		__array(char,		ip, 32			)
1492 	),
1493 
1494 	TP_fast_assign(
1495 		strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
1496 		strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
1497 		snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
1498 	),
1499 
1500 	TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
1501 );
1502 
1503 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
1504 
1505 TRACE_EVENT(update_by_path,
1506 	TP_PROTO(struct btree_trans *trans, struct btree_path *path,
1507 		 struct btree_insert_entry *i, bool overwrite),
1508 	TP_ARGS(trans, path, i, overwrite),
1509 
1510 	TP_STRUCT__entry(
1511 		__array(char,			trans_fn, 32	)
1512 		__field(btree_path_idx_t,	path_idx	)
1513 		__field(u8,			btree_id	)
1514 		TRACE_BPOS_entries(pos)
1515 		__field(u8,			overwrite	)
1516 		__field(btree_path_idx_t,	update_idx	)
1517 		__field(btree_path_idx_t,	nr_updates	)
1518 	),
1519 
1520 	TP_fast_assign(
1521 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1522 		__entry->path_idx		= path - trans->paths;
1523 		__entry->btree_id		= path->btree_id;
1524 		TRACE_BPOS_assign(pos, path->pos);
1525 		__entry->overwrite		= overwrite;
1526 		__entry->update_idx		= i - trans->updates;
1527 		__entry->nr_updates		= trans->nr_updates;
1528 	),
1529 
1530 	TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
1531 		  __entry->trans_fn,
1532 		  __entry->path_idx,
1533 		  bch2_btree_id_str(__entry->btree_id),
1534 		  __entry->pos_inode,
1535 		  __entry->pos_offset,
1536 		  __entry->pos_snapshot,
1537 		  __entry->overwrite,
1538 		  __entry->update_idx,
1539 		  __entry->nr_updates)
1540 );
1541 
1542 TRACE_EVENT(btree_path_lock,
1543 	TP_PROTO(struct btree_trans *trans,
1544 		 unsigned long caller_ip,
1545 		 struct btree_bkey_cached_common *b),
1546 	TP_ARGS(trans, caller_ip, b),
1547 
1548 	TP_STRUCT__entry(
1549 		__array(char,			trans_fn, 32	)
1550 		__field(unsigned long,		caller_ip	)
1551 		__field(u8,			btree_id	)
1552 		__field(u8,			level		)
1553 		__array(char,			node, 24	)
1554 		__field(u32,			lock_seq	)
1555 	),
1556 
1557 	TP_fast_assign(
1558 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1559 		__entry->caller_ip		= caller_ip;
1560 		__entry->btree_id		= b->btree_id;
1561 		__entry->level			= b->level;
1562 
1563 		scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
1564 		__entry->lock_seq		= six_lock_seq(&b->lock);
1565 	),
1566 
1567 	TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
1568 		  __entry->trans_fn,
1569 		  (void *) __entry->caller_ip,
1570 		  bch2_btree_id_str(__entry->btree_id),
1571 		  __entry->level,
1572 		  __entry->node,
1573 		  __entry->lock_seq)
1574 );
1575 
1576 DECLARE_EVENT_CLASS(btree_path_ev,
1577 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1578 	TP_ARGS(trans, path),
1579 
1580 	TP_STRUCT__entry(
1581 		__field(u16,			idx		)
1582 		__field(u8,			ref		)
1583 		__field(u8,			btree_id	)
1584 		TRACE_BPOS_entries(pos)
1585 	),
1586 
1587 	TP_fast_assign(
1588 		__entry->idx			= path - trans->paths;
1589 		__entry->ref			= path->ref;
1590 		__entry->btree_id		= path->btree_id;
1591 		TRACE_BPOS_assign(pos, path->pos);
1592 	),
1593 
1594 	TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
1595 		  __entry->idx, __entry->ref,
1596 		  bch2_btree_id_str(__entry->btree_id),
1597 		  __entry->pos_inode,
1598 		  __entry->pos_offset,
1599 		  __entry->pos_snapshot)
1600 );
1601 
1602 DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
1603 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1604 	TP_ARGS(trans, path)
1605 );
1606 
1607 DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
1608 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1609 	TP_ARGS(trans, path)
1610 );
1611 
1612 DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
1613 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1614 	TP_ARGS(trans, path)
1615 );
1616 
1617 TRACE_EVENT(btree_path_alloc,
1618 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1619 	TP_ARGS(trans, path),
1620 
1621 	TP_STRUCT__entry(
1622 		__field(btree_path_idx_t,	idx		)
1623 		__field(u8,			locks_want	)
1624 		__field(u8,			btree_id	)
1625 		TRACE_BPOS_entries(pos)
1626 	),
1627 
1628 	TP_fast_assign(
1629 		__entry->idx			= path - trans->paths;
1630 		__entry->locks_want		= path->locks_want;
1631 		__entry->btree_id		= path->btree_id;
1632 		TRACE_BPOS_assign(pos, path->pos);
1633 	),
1634 
1635 	TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
1636 		  __entry->idx,
1637 		  bch2_btree_id_str(__entry->btree_id),
1638 		  __entry->locks_want,
1639 		  __entry->pos_inode,
1640 		  __entry->pos_offset,
1641 		  __entry->pos_snapshot)
1642 );
1643 
1644 TRACE_EVENT(btree_path_get,
1645 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
1646 	TP_ARGS(trans, path, new_pos),
1647 
1648 	TP_STRUCT__entry(
1649 		__field(btree_path_idx_t,	idx		)
1650 		__field(u8,			ref		)
1651 		__field(u8,			preserve	)
1652 		__field(u8,			locks_want	)
1653 		__field(u8,			btree_id	)
1654 		TRACE_BPOS_entries(old_pos)
1655 		TRACE_BPOS_entries(new_pos)
1656 	),
1657 
1658 	TP_fast_assign(
1659 		__entry->idx			= path - trans->paths;
1660 		__entry->ref			= path->ref;
1661 		__entry->preserve		= path->preserve;
1662 		__entry->locks_want		= path->locks_want;
1663 		__entry->btree_id		= path->btree_id;
1664 		TRACE_BPOS_assign(old_pos, path->pos);
1665 		TRACE_BPOS_assign(new_pos, *new_pos);
1666 	),
1667 
1668 	TP_printk("    path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
1669 		  __entry->idx,
1670 		  __entry->ref,
1671 		  __entry->preserve,
1672 		  bch2_btree_id_str(__entry->btree_id),
1673 		  __entry->locks_want,
1674 		  __entry->old_pos_inode,
1675 		  __entry->old_pos_offset,
1676 		  __entry->old_pos_snapshot,
1677 		  __entry->new_pos_inode,
1678 		  __entry->new_pos_offset,
1679 		  __entry->new_pos_snapshot)
1680 );
1681 
1682 DECLARE_EVENT_CLASS(btree_path_clone,
1683 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1684 	TP_ARGS(trans, path, new),
1685 
1686 	TP_STRUCT__entry(
1687 		__field(btree_path_idx_t,	idx		)
1688 		__field(u8,			new_idx		)
1689 		__field(u8,			btree_id	)
1690 		__field(u8,			ref		)
1691 		__field(u8,			preserve	)
1692 		TRACE_BPOS_entries(pos)
1693 	),
1694 
1695 	TP_fast_assign(
1696 		__entry->idx			= path - trans->paths;
1697 		__entry->new_idx		= new - trans->paths;
1698 		__entry->btree_id		= path->btree_id;
1699 		__entry->ref			= path->ref;
1700 		__entry->preserve		= path->preserve;
1701 		TRACE_BPOS_assign(pos, path->pos);
1702 	),
1703 
1704 	TP_printk("  path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
1705 		  __entry->idx,
1706 		  __entry->ref,
1707 		  __entry->preserve,
1708 		  bch2_btree_id_str(__entry->btree_id),
1709 		  __entry->pos_inode,
1710 		  __entry->pos_offset,
1711 		  __entry->pos_snapshot,
1712 		  __entry->new_idx)
1713 );
1714 
1715 DEFINE_EVENT(btree_path_clone, btree_path_clone,
1716 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1717 	TP_ARGS(trans, path, new)
1718 );
1719 
1720 DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
1721 	TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1722 	TP_ARGS(trans, path, new)
1723 );
1724 
1725 DECLARE_EVENT_CLASS(btree_path_traverse,
1726 	TP_PROTO(struct btree_trans *trans,
1727 		 struct btree_path *path),
1728 	TP_ARGS(trans, path),
1729 
1730 	TP_STRUCT__entry(
1731 		__array(char,			trans_fn, 32	)
1732 		__field(btree_path_idx_t,	idx		)
1733 		__field(u8,			ref		)
1734 		__field(u8,			preserve	)
1735 		__field(u8,			should_be_locked )
1736 		__field(u8,			btree_id	)
1737 		__field(u8,			level		)
1738 		TRACE_BPOS_entries(pos)
1739 		__field(u8,			locks_want	)
1740 		__field(u8,			nodes_locked	)
1741 		__array(char,			node0, 24	)
1742 		__array(char,			node1, 24	)
1743 		__array(char,			node2, 24	)
1744 		__array(char,			node3, 24	)
1745 	),
1746 
1747 	TP_fast_assign(
1748 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1749 
1750 		__entry->idx			= path - trans->paths;
1751 		__entry->ref			= path->ref;
1752 		__entry->preserve		= path->preserve;
1753 		__entry->btree_id		= path->btree_id;
1754 		__entry->level			= path->level;
1755 		TRACE_BPOS_assign(pos, path->pos);
1756 
1757 		__entry->locks_want		= path->locks_want;
1758 		__entry->nodes_locked		= path->nodes_locked;
1759 		struct btree *b = path->l[0].b;
1760 		if (IS_ERR(b))
1761 			strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1762 		else
1763 			scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1764 		b = path->l[1].b;
1765 		if (IS_ERR(b))
1766 			strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1767 		else
1768 			scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1769 		b = path->l[2].b;
1770 		if (IS_ERR(b))
1771 			strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1772 		else
1773 			scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1774 		b = path->l[3].b;
1775 		if (IS_ERR(b))
1776 			strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1777 		else
1778 			scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1779 	),
1780 
1781 	TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
1782 		  "locks %u %u %u %u node %s %s %s %s",
1783 		  __entry->trans_fn,
1784 		  __entry->idx,
1785 		  __entry->ref,
1786 		  __entry->preserve,
1787 		  bch2_btree_id_str(__entry->btree_id),
1788 		  __entry->pos_inode,
1789 		  __entry->pos_offset,
1790 		  __entry->pos_snapshot,
1791 		  __entry->level,
1792 		  __entry->locks_want,
1793 		  (__entry->nodes_locked >> 6) & 3,
1794 		  (__entry->nodes_locked >> 4) & 3,
1795 		  (__entry->nodes_locked >> 2) & 3,
1796 		  (__entry->nodes_locked >> 0) & 3,
1797 		  __entry->node3,
1798 		  __entry->node2,
1799 		  __entry->node1,
1800 		  __entry->node0)
1801 );
1802 
1803 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
1804 	TP_PROTO(struct btree_trans *trans,
1805 		 struct btree_path *path),
1806 	TP_ARGS(trans, path)
1807 );
1808 
1809 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
1810 	TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1811 	TP_ARGS(trans, path)
1812 );
1813 
1814 TRACE_EVENT(btree_path_set_pos,
1815 	TP_PROTO(struct btree_trans *trans,
1816 		 struct btree_path *path,
1817 		 struct bpos *new_pos),
1818 	TP_ARGS(trans, path, new_pos),
1819 
1820 	TP_STRUCT__entry(
1821 		__field(btree_path_idx_t,	idx		)
1822 		__field(u8,			ref		)
1823 		__field(u8,			preserve	)
1824 		__field(u8,			btree_id	)
1825 		TRACE_BPOS_entries(old_pos)
1826 		TRACE_BPOS_entries(new_pos)
1827 		__field(u8,			locks_want	)
1828 		__field(u8,			nodes_locked	)
1829 		__array(char,			node0, 24	)
1830 		__array(char,			node1, 24	)
1831 		__array(char,			node2, 24	)
1832 		__array(char,			node3, 24	)
1833 	),
1834 
1835 	TP_fast_assign(
1836 		__entry->idx			= path - trans->paths;
1837 		__entry->ref			= path->ref;
1838 		__entry->preserve		= path->preserve;
1839 		__entry->btree_id		= path->btree_id;
1840 		TRACE_BPOS_assign(old_pos, path->pos);
1841 		TRACE_BPOS_assign(new_pos, *new_pos);
1842 
1843 		__entry->nodes_locked		= path->nodes_locked;
1844 		struct btree *b = path->l[0].b;
1845 		if (IS_ERR(b))
1846 			strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1847 		else
1848 			scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1849 		b = path->l[1].b;
1850 		if (IS_ERR(b))
1851 			strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1852 		else
1853 			scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1854 		b = path->l[2].b;
1855 		if (IS_ERR(b))
1856 			strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1857 		else
1858 			scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1859 		b = path->l[3].b;
1860 		if (IS_ERR(b))
1861 			strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1862 		else
1863 			scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1864 	),
1865 
1866 	TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
1867 		  "locks %u %u %u %u node %s %s %s %s",
1868 		  __entry->idx,
1869 		  __entry->ref,
1870 		  __entry->preserve,
1871 		  bch2_btree_id_str(__entry->btree_id),
1872 		  __entry->old_pos_inode,
1873 		  __entry->old_pos_offset,
1874 		  __entry->old_pos_snapshot,
1875 		  __entry->new_pos_inode,
1876 		  __entry->new_pos_offset,
1877 		  __entry->new_pos_snapshot,
1878 		  (__entry->nodes_locked >> 6) & 3,
1879 		  (__entry->nodes_locked >> 4) & 3,
1880 		  (__entry->nodes_locked >> 2) & 3,
1881 		  (__entry->nodes_locked >> 0) & 3,
1882 		  __entry->node3,
1883 		  __entry->node2,
1884 		  __entry->node1,
1885 		  __entry->node0)
1886 );
1887 
1888 TRACE_EVENT(btree_path_free,
1889 	TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
1890 	TP_ARGS(trans, path, dup),
1891 
1892 	TP_STRUCT__entry(
1893 		__field(btree_path_idx_t,	idx		)
1894 		__field(u8,			preserve	)
1895 		__field(u8,			should_be_locked)
1896 		__field(s8,			dup		)
1897 		__field(u8,			dup_locked	)
1898 	),
1899 
1900 	TP_fast_assign(
1901 		__entry->idx			= path;
1902 		__entry->preserve		= trans->paths[path].preserve;
1903 		__entry->should_be_locked	= trans->paths[path].should_be_locked;
1904 		__entry->dup			= dup ? dup - trans->paths  : -1;
1905 		__entry->dup_locked		= dup ? btree_node_locked(dup, dup->level) : 0;
1906 	),
1907 
1908 	TP_printk("   path %3u %c %c dup %2i locked %u", __entry->idx,
1909 		  __entry->preserve ? 'P' : ' ',
1910 		  __entry->should_be_locked ? 'S' : ' ',
1911 		  __entry->dup,
1912 		  __entry->dup_locked)
1913 );
1914 
1915 TRACE_EVENT(btree_path_free_trans_begin,
1916 	TP_PROTO(btree_path_idx_t path),
1917 	TP_ARGS(path),
1918 
1919 	TP_STRUCT__entry(
1920 		__field(btree_path_idx_t,	idx		)
1921 	),
1922 
1923 	TP_fast_assign(
1924 		__entry->idx			= path;
1925 	),
1926 
1927 	TP_printk("   path %3u", __entry->idx)
1928 );
1929 
1930 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1931 #ifndef _TRACE_BCACHEFS_H
1932 
1933 static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
1934 					struct btree_insert_entry *i, bool overwrite) {}
1935 static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
1936 static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
1937 static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
1938 static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
1939 static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
1940 static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
1941 static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
1942 static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
1943 static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
1944 static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
1945 static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
1946 static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
1947 static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
1948 
1949 #endif
1950 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1951 
1952 #define _TRACE_BCACHEFS_H
1953 #endif /* _TRACE_BCACHEFS_H */
1954 
1955 /* This part must be outside protection */
1956 #undef TRACE_INCLUDE_PATH
1957 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1958 
1959 #undef TRACE_INCLUDE_FILE
1960 #define TRACE_INCLUDE_FILE trace
1961 
1962 #include <trace/define_trace.h>
1963