xref: /linux/fs/bcachefs/trace.h (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4 
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7 
8 #include <linux/tracepoint.h>
9 
10 #define TRACE_BPOS_entries(name)				\
11 	__field(u64,			name##_inode	)	\
12 	__field(u64,			name##_offset	)	\
13 	__field(u32,			name##_snapshot	)
14 
15 #define TRACE_BPOS_assign(dst, src)				\
16 	__entry->dst##_inode		= (src).inode;		\
17 	__entry->dst##_offset		= (src).offset;		\
18 	__entry->dst##_snapshot		= (src).snapshot
19 
20 DECLARE_EVENT_CLASS(bpos,
21 	TP_PROTO(const struct bpos *p),
22 	TP_ARGS(p),
23 
24 	TP_STRUCT__entry(
25 		TRACE_BPOS_entries(p)
26 	),
27 
28 	TP_fast_assign(
29 		TRACE_BPOS_assign(p, *p);
30 	),
31 
32 	TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34 
35 DECLARE_EVENT_CLASS(fs_str,
36 	TP_PROTO(struct bch_fs *c, const char *str),
37 	TP_ARGS(c, str),
38 
39 	TP_STRUCT__entry(
40 		__field(dev_t,		dev			)
41 		__string(str,		str			)
42 	),
43 
44 	TP_fast_assign(
45 		__entry->dev		= c->dev;
46 		__assign_str(str);
47 	),
48 
49 	TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
50 );
51 
52 DECLARE_EVENT_CLASS(trans_str,
53 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
54 	TP_ARGS(trans, caller_ip, str),
55 
56 	TP_STRUCT__entry(
57 		__field(dev_t,		dev			)
58 		__array(char,		trans_fn, 32		)
59 		__field(unsigned long,	caller_ip		)
60 		__string(str,		str			)
61 	),
62 
63 	TP_fast_assign(
64 		__entry->dev		= trans->c->dev;
65 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
66 		__entry->caller_ip		= caller_ip;
67 		__assign_str(str);
68 	),
69 
70 	TP_printk("%d,%d %s %pS %s",
71 		  MAJOR(__entry->dev), MINOR(__entry->dev),
72 		  __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
73 );
74 
75 DECLARE_EVENT_CLASS(trans_str_nocaller,
76 	TP_PROTO(struct btree_trans *trans, const char *str),
77 	TP_ARGS(trans, str),
78 
79 	TP_STRUCT__entry(
80 		__field(dev_t,		dev			)
81 		__array(char,		trans_fn, 32		)
82 		__string(str,		str			)
83 	),
84 
85 	TP_fast_assign(
86 		__entry->dev		= trans->c->dev;
87 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
88 		__assign_str(str);
89 	),
90 
91 	TP_printk("%d,%d %s %s",
92 		  MAJOR(__entry->dev), MINOR(__entry->dev),
93 		  __entry->trans_fn, __get_str(str))
94 );
95 
96 DECLARE_EVENT_CLASS(btree_node_nofs,
97 	TP_PROTO(struct bch_fs *c, struct btree *b),
98 	TP_ARGS(c, b),
99 
100 	TP_STRUCT__entry(
101 		__field(dev_t,		dev			)
102 		__field(u8,		level			)
103 		__field(u8,		btree_id		)
104 		TRACE_BPOS_entries(pos)
105 	),
106 
107 	TP_fast_assign(
108 		__entry->dev		= c->dev;
109 		__entry->level		= b->c.level;
110 		__entry->btree_id	= b->c.btree_id;
111 		TRACE_BPOS_assign(pos, b->key.k.p);
112 	),
113 
114 	TP_printk("%d,%d %u %s %llu:%llu:%u",
115 		  MAJOR(__entry->dev), MINOR(__entry->dev),
116 		  __entry->level,
117 		  bch2_btree_id_str(__entry->btree_id),
118 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
119 );
120 
121 DECLARE_EVENT_CLASS(btree_node,
122 	TP_PROTO(struct btree_trans *trans, struct btree *b),
123 	TP_ARGS(trans, b),
124 
125 	TP_STRUCT__entry(
126 		__field(dev_t,		dev			)
127 		__array(char,		trans_fn, 32		)
128 		__field(u8,		level			)
129 		__field(u8,		btree_id		)
130 		TRACE_BPOS_entries(pos)
131 	),
132 
133 	TP_fast_assign(
134 		__entry->dev		= trans->c->dev;
135 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
136 		__entry->level		= b->c.level;
137 		__entry->btree_id	= b->c.btree_id;
138 		TRACE_BPOS_assign(pos, b->key.k.p);
139 	),
140 
141 	TP_printk("%d,%d %s %u %s %llu:%llu:%u",
142 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
143 		  __entry->level,
144 		  bch2_btree_id_str(__entry->btree_id),
145 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
146 );
147 
148 DECLARE_EVENT_CLASS(bch_fs,
149 	TP_PROTO(struct bch_fs *c),
150 	TP_ARGS(c),
151 
152 	TP_STRUCT__entry(
153 		__field(dev_t,		dev			)
154 	),
155 
156 	TP_fast_assign(
157 		__entry->dev		= c->dev;
158 	),
159 
160 	TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
161 );
162 
163 DECLARE_EVENT_CLASS(btree_trans,
164 	TP_PROTO(struct btree_trans *trans),
165 	TP_ARGS(trans),
166 
167 	TP_STRUCT__entry(
168 		__field(dev_t,		dev			)
169 		__array(char,		trans_fn, 32		)
170 	),
171 
172 	TP_fast_assign(
173 		__entry->dev		= trans->c->dev;
174 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
175 	),
176 
177 	TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
178 );
179 
180 DECLARE_EVENT_CLASS(bio,
181 	TP_PROTO(struct bio *bio),
182 	TP_ARGS(bio),
183 
184 	TP_STRUCT__entry(
185 		__field(dev_t,		dev			)
186 		__field(sector_t,	sector			)
187 		__field(unsigned int,	nr_sector		)
188 		__array(char,		rwbs,	6		)
189 	),
190 
191 	TP_fast_assign(
192 		__entry->dev		= bio->bi_bdev ? bio_dev(bio) : 0;
193 		__entry->sector		= bio->bi_iter.bi_sector;
194 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
195 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
196 	),
197 
198 	TP_printk("%d,%d  %s %llu + %u",
199 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
200 		  (unsigned long long)__entry->sector, __entry->nr_sector)
201 );
202 
203 /* super-io.c: */
204 TRACE_EVENT(write_super,
205 	TP_PROTO(struct bch_fs *c, unsigned long ip),
206 	TP_ARGS(c, ip),
207 
208 	TP_STRUCT__entry(
209 		__field(dev_t,		dev	)
210 		__field(unsigned long,	ip	)
211 	),
212 
213 	TP_fast_assign(
214 		__entry->dev		= c->dev;
215 		__entry->ip		= ip;
216 	),
217 
218 	TP_printk("%d,%d for %pS",
219 		  MAJOR(__entry->dev), MINOR(__entry->dev),
220 		  (void *) __entry->ip)
221 );
222 
223 /* io.c: */
224 
225 DEFINE_EVENT(bio, read_promote,
226 	TP_PROTO(struct bio *bio),
227 	TP_ARGS(bio)
228 );
229 
230 TRACE_EVENT(read_nopromote,
231 	TP_PROTO(struct bch_fs *c, int ret),
232 	TP_ARGS(c, ret),
233 
234 	TP_STRUCT__entry(
235 		__field(dev_t,		dev		)
236 		__array(char,		ret, 32		)
237 	),
238 
239 	TP_fast_assign(
240 		__entry->dev		= c->dev;
241 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
242 	),
243 
244 	TP_printk("%d,%d ret %s",
245 		  MAJOR(__entry->dev), MINOR(__entry->dev),
246 		  __entry->ret)
247 );
248 
249 DEFINE_EVENT(bio, read_bounce,
250 	TP_PROTO(struct bio *bio),
251 	TP_ARGS(bio)
252 );
253 
254 DEFINE_EVENT(bio, read_split,
255 	TP_PROTO(struct bio *bio),
256 	TP_ARGS(bio)
257 );
258 
259 DEFINE_EVENT(bio, read_retry,
260 	TP_PROTO(struct bio *bio),
261 	TP_ARGS(bio)
262 );
263 
264 DEFINE_EVENT(bio, read_reuse_race,
265 	TP_PROTO(struct bio *bio),
266 	TP_ARGS(bio)
267 );
268 
269 /* Journal */
270 
271 DEFINE_EVENT(bch_fs, journal_full,
272 	TP_PROTO(struct bch_fs *c),
273 	TP_ARGS(c)
274 );
275 
276 DEFINE_EVENT(fs_str, journal_entry_full,
277 	TP_PROTO(struct bch_fs *c, const char *str),
278 	TP_ARGS(c, str)
279 );
280 
281 DEFINE_EVENT(fs_str, journal_entry_close,
282 	TP_PROTO(struct bch_fs *c, const char *str),
283 	TP_ARGS(c, str)
284 );
285 
286 DEFINE_EVENT(bio, journal_write,
287 	TP_PROTO(struct bio *bio),
288 	TP_ARGS(bio)
289 );
290 
291 TRACE_EVENT(journal_reclaim_start,
292 	TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
293 		 u64 min_nr, u64 min_key_cache,
294 		 u64 btree_cache_dirty, u64 btree_cache_total,
295 		 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
296 	TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
297 		btree_cache_dirty, btree_cache_total,
298 		btree_key_cache_dirty, btree_key_cache_total),
299 
300 	TP_STRUCT__entry(
301 		__field(dev_t,		dev			)
302 		__field(bool,		direct			)
303 		__field(bool,		kicked			)
304 		__field(u64,		min_nr			)
305 		__field(u64,		min_key_cache		)
306 		__field(u64,		btree_cache_dirty	)
307 		__field(u64,		btree_cache_total	)
308 		__field(u64,		btree_key_cache_dirty	)
309 		__field(u64,		btree_key_cache_total	)
310 	),
311 
312 	TP_fast_assign(
313 		__entry->dev			= c->dev;
314 		__entry->direct			= direct;
315 		__entry->kicked			= kicked;
316 		__entry->min_nr			= min_nr;
317 		__entry->min_key_cache		= min_key_cache;
318 		__entry->btree_cache_dirty	= btree_cache_dirty;
319 		__entry->btree_cache_total	= btree_cache_total;
320 		__entry->btree_key_cache_dirty	= btree_key_cache_dirty;
321 		__entry->btree_key_cache_total	= btree_key_cache_total;
322 	),
323 
324 	TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
325 		  MAJOR(__entry->dev), MINOR(__entry->dev),
326 		  __entry->direct,
327 		  __entry->kicked,
328 		  __entry->min_nr,
329 		  __entry->min_key_cache,
330 		  __entry->btree_cache_dirty,
331 		  __entry->btree_cache_total,
332 		  __entry->btree_key_cache_dirty,
333 		  __entry->btree_key_cache_total)
334 );
335 
336 TRACE_EVENT(journal_reclaim_finish,
337 	TP_PROTO(struct bch_fs *c, u64 nr_flushed),
338 	TP_ARGS(c, nr_flushed),
339 
340 	TP_STRUCT__entry(
341 		__field(dev_t,		dev			)
342 		__field(u64,		nr_flushed		)
343 	),
344 
345 	TP_fast_assign(
346 		__entry->dev		= c->dev;
347 		__entry->nr_flushed	= nr_flushed;
348 	),
349 
350 	TP_printk("%d,%d flushed %llu",
351 		  MAJOR(__entry->dev), MINOR(__entry->dev),
352 		  __entry->nr_flushed)
353 );
354 
355 /* bset.c: */
356 
357 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
358 	TP_PROTO(const struct bpos *p),
359 	TP_ARGS(p)
360 );
361 
362 /* Btree cache: */
363 
364 TRACE_EVENT(btree_cache_scan,
365 	TP_PROTO(long nr_to_scan, long can_free, long ret),
366 	TP_ARGS(nr_to_scan, can_free, ret),
367 
368 	TP_STRUCT__entry(
369 		__field(long,	nr_to_scan		)
370 		__field(long,	can_free		)
371 		__field(long,	ret			)
372 	),
373 
374 	TP_fast_assign(
375 		__entry->nr_to_scan	= nr_to_scan;
376 		__entry->can_free	= can_free;
377 		__entry->ret		= ret;
378 	),
379 
380 	TP_printk("scanned for %li nodes, can free %li, ret %li",
381 		  __entry->nr_to_scan, __entry->can_free, __entry->ret)
382 );
383 
384 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
385 	TP_PROTO(struct bch_fs *c, struct btree *b),
386 	TP_ARGS(c, b)
387 );
388 
389 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
390 	TP_PROTO(struct btree_trans *trans),
391 	TP_ARGS(trans)
392 );
393 
394 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
395 	TP_PROTO(struct btree_trans *trans),
396 	TP_ARGS(trans)
397 );
398 
399 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
400 	TP_PROTO(struct btree_trans *trans),
401 	TP_ARGS(trans)
402 );
403 
404 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
405 	TP_PROTO(struct btree_trans *trans),
406 	TP_ARGS(trans)
407 );
408 
409 /* Btree */
410 
411 DEFINE_EVENT(btree_node, btree_node_read,
412 	TP_PROTO(struct btree_trans *trans, struct btree *b),
413 	TP_ARGS(trans, b)
414 );
415 
416 TRACE_EVENT(btree_node_write,
417 	TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
418 	TP_ARGS(b, bytes, sectors),
419 
420 	TP_STRUCT__entry(
421 		__field(enum btree_node_type,	type)
422 		__field(unsigned,	bytes			)
423 		__field(unsigned,	sectors			)
424 	),
425 
426 	TP_fast_assign(
427 		__entry->type	= btree_node_type(b);
428 		__entry->bytes	= bytes;
429 		__entry->sectors = sectors;
430 	),
431 
432 	TP_printk("bkey type %u bytes %u sectors %u",
433 		  __entry->type , __entry->bytes, __entry->sectors)
434 );
435 
436 DEFINE_EVENT(btree_node, btree_node_alloc,
437 	TP_PROTO(struct btree_trans *trans, struct btree *b),
438 	TP_ARGS(trans, b)
439 );
440 
441 DEFINE_EVENT(btree_node, btree_node_free,
442 	TP_PROTO(struct btree_trans *trans, struct btree *b),
443 	TP_ARGS(trans, b)
444 );
445 
446 TRACE_EVENT(btree_reserve_get_fail,
447 	TP_PROTO(const char *trans_fn,
448 		 unsigned long caller_ip,
449 		 size_t required,
450 		 int ret),
451 	TP_ARGS(trans_fn, caller_ip, required, ret),
452 
453 	TP_STRUCT__entry(
454 		__array(char,			trans_fn, 32	)
455 		__field(unsigned long,		caller_ip	)
456 		__field(size_t,			required	)
457 		__array(char,			ret, 32		)
458 	),
459 
460 	TP_fast_assign(
461 		strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
462 		__entry->caller_ip	= caller_ip;
463 		__entry->required	= required;
464 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
465 	),
466 
467 	TP_printk("%s %pS required %zu ret %s",
468 		  __entry->trans_fn,
469 		  (void *) __entry->caller_ip,
470 		  __entry->required,
471 		  __entry->ret)
472 );
473 
474 DEFINE_EVENT(btree_node, btree_node_compact,
475 	TP_PROTO(struct btree_trans *trans, struct btree *b),
476 	TP_ARGS(trans, b)
477 );
478 
479 DEFINE_EVENT(btree_node, btree_node_merge,
480 	TP_PROTO(struct btree_trans *trans, struct btree *b),
481 	TP_ARGS(trans, b)
482 );
483 
484 DEFINE_EVENT(btree_node, btree_node_split,
485 	TP_PROTO(struct btree_trans *trans, struct btree *b),
486 	TP_ARGS(trans, b)
487 );
488 
489 DEFINE_EVENT(btree_node, btree_node_rewrite,
490 	TP_PROTO(struct btree_trans *trans, struct btree *b),
491 	TP_ARGS(trans, b)
492 );
493 
494 DEFINE_EVENT(btree_node, btree_node_set_root,
495 	TP_PROTO(struct btree_trans *trans, struct btree *b),
496 	TP_ARGS(trans, b)
497 );
498 
499 TRACE_EVENT(btree_path_relock_fail,
500 	TP_PROTO(struct btree_trans *trans,
501 		 unsigned long caller_ip,
502 		 struct btree_path *path,
503 		 unsigned level),
504 	TP_ARGS(trans, caller_ip, path, level),
505 
506 	TP_STRUCT__entry(
507 		__array(char,			trans_fn, 32	)
508 		__field(unsigned long,		caller_ip	)
509 		__field(u8,			btree_id	)
510 		__field(u8,			level		)
511 		TRACE_BPOS_entries(pos)
512 		__array(char,			node, 24	)
513 		__field(u8,			self_read_count	)
514 		__field(u8,			self_intent_count)
515 		__field(u8,			read_count	)
516 		__field(u8,			intent_count	)
517 		__field(u32,			iter_lock_seq	)
518 		__field(u32,			node_lock_seq	)
519 	),
520 
521 	TP_fast_assign(
522 		struct btree *b = btree_path_node(path, level);
523 		struct six_lock_count c;
524 
525 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
526 		__entry->caller_ip		= caller_ip;
527 		__entry->btree_id		= path->btree_id;
528 		__entry->level			= path->level;
529 		TRACE_BPOS_assign(pos, path->pos);
530 
531 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
532 		__entry->self_read_count	= c.n[SIX_LOCK_read];
533 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
534 
535 		if (IS_ERR(b)) {
536 			strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
537 		} else {
538 			c = six_lock_counts(&path->l[level].b->c.lock);
539 			__entry->read_count	= c.n[SIX_LOCK_read];
540 			__entry->intent_count	= c.n[SIX_LOCK_intent];
541 			scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
542 		}
543 		__entry->iter_lock_seq		= path->l[level].lock_seq;
544 		__entry->node_lock_seq		= is_btree_node(path, level)
545 			? six_lock_seq(&path->l[level].b->c.lock)
546 			: 0;
547 	),
548 
549 	TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
550 		  __entry->trans_fn,
551 		  (void *) __entry->caller_ip,
552 		  bch2_btree_id_str(__entry->btree_id),
553 		  __entry->pos_inode,
554 		  __entry->pos_offset,
555 		  __entry->pos_snapshot,
556 		  __entry->level,
557 		  __entry->node,
558 		  __entry->self_read_count,
559 		  __entry->self_intent_count,
560 		  __entry->read_count,
561 		  __entry->intent_count,
562 		  __entry->iter_lock_seq,
563 		  __entry->node_lock_seq)
564 );
565 
566 TRACE_EVENT(btree_path_upgrade_fail,
567 	TP_PROTO(struct btree_trans *trans,
568 		 unsigned long caller_ip,
569 		 struct btree_path *path,
570 		 unsigned level),
571 	TP_ARGS(trans, caller_ip, path, level),
572 
573 	TP_STRUCT__entry(
574 		__array(char,			trans_fn, 32	)
575 		__field(unsigned long,		caller_ip	)
576 		__field(u8,			btree_id	)
577 		__field(u8,			level		)
578 		TRACE_BPOS_entries(pos)
579 		__field(u8,			locked		)
580 		__field(u8,			self_read_count	)
581 		__field(u8,			self_intent_count)
582 		__field(u8,			read_count	)
583 		__field(u8,			intent_count	)
584 		__field(u32,			iter_lock_seq	)
585 		__field(u32,			node_lock_seq	)
586 	),
587 
588 	TP_fast_assign(
589 		struct six_lock_count c;
590 
591 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
592 		__entry->caller_ip		= caller_ip;
593 		__entry->btree_id		= path->btree_id;
594 		__entry->level			= level;
595 		TRACE_BPOS_assign(pos, path->pos);
596 		__entry->locked			= btree_node_locked(path, level);
597 
598 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
599 		__entry->self_read_count	= c.n[SIX_LOCK_read];
600 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
601 		c = six_lock_counts(&path->l[level].b->c.lock);
602 		__entry->read_count		= c.n[SIX_LOCK_read];
603 		__entry->intent_count		= c.n[SIX_LOCK_intent];
604 		__entry->iter_lock_seq		= path->l[level].lock_seq;
605 		__entry->node_lock_seq		= is_btree_node(path, level)
606 			? six_lock_seq(&path->l[level].b->c.lock)
607 			: 0;
608 	),
609 
610 	TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
611 		  __entry->trans_fn,
612 		  (void *) __entry->caller_ip,
613 		  bch2_btree_id_str(__entry->btree_id),
614 		  __entry->pos_inode,
615 		  __entry->pos_offset,
616 		  __entry->pos_snapshot,
617 		  __entry->level,
618 		  __entry->locked,
619 		  __entry->self_read_count,
620 		  __entry->self_intent_count,
621 		  __entry->read_count,
622 		  __entry->intent_count,
623 		  __entry->iter_lock_seq,
624 		  __entry->node_lock_seq)
625 );
626 
627 /* Garbage collection */
628 
629 DEFINE_EVENT(bch_fs, gc_gens_start,
630 	TP_PROTO(struct bch_fs *c),
631 	TP_ARGS(c)
632 );
633 
634 DEFINE_EVENT(bch_fs, gc_gens_end,
635 	TP_PROTO(struct bch_fs *c),
636 	TP_ARGS(c)
637 );
638 
639 /* Allocator */
640 
641 DEFINE_EVENT(fs_str, bucket_alloc,
642 	TP_PROTO(struct bch_fs *c, const char *str),
643 	TP_ARGS(c, str)
644 );
645 
646 DEFINE_EVENT(fs_str, bucket_alloc_fail,
647 	TP_PROTO(struct bch_fs *c, const char *str),
648 	TP_ARGS(c, str)
649 );
650 
651 TRACE_EVENT(discard_buckets,
652 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
653 		 u64 need_journal_commit, u64 discarded, const char *err),
654 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
655 
656 	TP_STRUCT__entry(
657 		__field(dev_t,		dev			)
658 		__field(u64,		seen			)
659 		__field(u64,		open			)
660 		__field(u64,		need_journal_commit	)
661 		__field(u64,		discarded		)
662 		__array(char,		err,	16		)
663 	),
664 
665 	TP_fast_assign(
666 		__entry->dev			= c->dev;
667 		__entry->seen			= seen;
668 		__entry->open			= open;
669 		__entry->need_journal_commit	= need_journal_commit;
670 		__entry->discarded		= discarded;
671 		strscpy(__entry->err, err, sizeof(__entry->err));
672 	),
673 
674 	TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
675 		  MAJOR(__entry->dev), MINOR(__entry->dev),
676 		  __entry->seen,
677 		  __entry->open,
678 		  __entry->need_journal_commit,
679 		  __entry->discarded,
680 		  __entry->err)
681 );
682 
683 TRACE_EVENT(bucket_invalidate,
684 	TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
685 	TP_ARGS(c, dev, bucket, sectors),
686 
687 	TP_STRUCT__entry(
688 		__field(dev_t,		dev			)
689 		__field(u32,		dev_idx			)
690 		__field(u32,		sectors			)
691 		__field(u64,		bucket			)
692 	),
693 
694 	TP_fast_assign(
695 		__entry->dev		= c->dev;
696 		__entry->dev_idx	= dev;
697 		__entry->sectors	= sectors;
698 		__entry->bucket		= bucket;
699 	),
700 
701 	TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
702 		  MAJOR(__entry->dev), MINOR(__entry->dev),
703 		  __entry->dev_idx, __entry->bucket,
704 		  __entry->sectors)
705 );
706 
707 /* Moving IO */
708 
709 TRACE_EVENT(bucket_evacuate,
710 	TP_PROTO(struct bch_fs *c, struct bpos *bucket),
711 	TP_ARGS(c, bucket),
712 
713 	TP_STRUCT__entry(
714 		__field(dev_t,		dev			)
715 		__field(u32,		dev_idx			)
716 		__field(u64,		bucket			)
717 	),
718 
719 	TP_fast_assign(
720 		__entry->dev		= c->dev;
721 		__entry->dev_idx	= bucket->inode;
722 		__entry->bucket		= bucket->offset;
723 	),
724 
725 	TP_printk("%d:%d %u:%llu",
726 		  MAJOR(__entry->dev), MINOR(__entry->dev),
727 		  __entry->dev_idx, __entry->bucket)
728 );
729 
730 DEFINE_EVENT(fs_str, move_extent,
731 	TP_PROTO(struct bch_fs *c, const char *str),
732 	TP_ARGS(c, str)
733 );
734 
735 DEFINE_EVENT(fs_str, move_extent_read,
736 	TP_PROTO(struct bch_fs *c, const char *str),
737 	TP_ARGS(c, str)
738 );
739 
740 DEFINE_EVENT(fs_str, move_extent_write,
741 	TP_PROTO(struct bch_fs *c, const char *str),
742 	TP_ARGS(c, str)
743 );
744 
745 DEFINE_EVENT(fs_str, move_extent_finish,
746 	TP_PROTO(struct bch_fs *c, const char *str),
747 	TP_ARGS(c, str)
748 );
749 
750 DEFINE_EVENT(fs_str, move_extent_fail,
751 	TP_PROTO(struct bch_fs *c, const char *str),
752 	TP_ARGS(c, str)
753 );
754 
755 DEFINE_EVENT(fs_str, move_extent_start_fail,
756 	TP_PROTO(struct bch_fs *c, const char *str),
757 	TP_ARGS(c, str)
758 );
759 
760 TRACE_EVENT(move_data,
761 	TP_PROTO(struct bch_fs *c,
762 		 struct bch_move_stats *stats),
763 	TP_ARGS(c, stats),
764 
765 	TP_STRUCT__entry(
766 		__field(dev_t,		dev		)
767 		__field(u64,		keys_moved	)
768 		__field(u64,		keys_raced	)
769 		__field(u64,		sectors_seen	)
770 		__field(u64,		sectors_moved	)
771 		__field(u64,		sectors_raced	)
772 	),
773 
774 	TP_fast_assign(
775 		__entry->dev		= c->dev;
776 		__entry->keys_moved	= atomic64_read(&stats->keys_moved);
777 		__entry->keys_raced	= atomic64_read(&stats->keys_raced);
778 		__entry->sectors_seen	= atomic64_read(&stats->sectors_seen);
779 		__entry->sectors_moved	= atomic64_read(&stats->sectors_moved);
780 		__entry->sectors_raced	= atomic64_read(&stats->sectors_raced);
781 	),
782 
783 	TP_printk("%d,%d keys moved %llu raced %llu"
784 		  "sectors seen %llu moved %llu raced %llu",
785 		  MAJOR(__entry->dev), MINOR(__entry->dev),
786 		  __entry->keys_moved,
787 		  __entry->keys_raced,
788 		  __entry->sectors_seen,
789 		  __entry->sectors_moved,
790 		  __entry->sectors_raced)
791 );
792 
793 TRACE_EVENT(evacuate_bucket,
794 	TP_PROTO(struct bch_fs *c, struct bpos *bucket,
795 		 unsigned sectors, unsigned bucket_size,
796 		 u64 fragmentation, int ret),
797 	TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
798 
799 	TP_STRUCT__entry(
800 		__field(dev_t,		dev		)
801 		__field(u64,		member		)
802 		__field(u64,		bucket		)
803 		__field(u32,		sectors		)
804 		__field(u32,		bucket_size	)
805 		__field(u64,		fragmentation	)
806 		__field(int,		ret		)
807 	),
808 
809 	TP_fast_assign(
810 		__entry->dev			= c->dev;
811 		__entry->member			= bucket->inode;
812 		__entry->bucket			= bucket->offset;
813 		__entry->sectors		= sectors;
814 		__entry->bucket_size		= bucket_size;
815 		__entry->fragmentation		= fragmentation;
816 		__entry->ret			= ret;
817 	),
818 
819 	TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
820 		  MAJOR(__entry->dev), MINOR(__entry->dev),
821 		  __entry->member, __entry->bucket,
822 		  __entry->sectors, __entry->bucket_size,
823 		  __entry->fragmentation, __entry->ret)
824 );
825 
826 TRACE_EVENT(copygc,
827 	TP_PROTO(struct bch_fs *c,
828 		 u64 sectors_moved, u64 sectors_not_moved,
829 		 u64 buckets_moved, u64 buckets_not_moved),
830 	TP_ARGS(c,
831 		sectors_moved, sectors_not_moved,
832 		buckets_moved, buckets_not_moved),
833 
834 	TP_STRUCT__entry(
835 		__field(dev_t,		dev			)
836 		__field(u64,		sectors_moved		)
837 		__field(u64,		sectors_not_moved	)
838 		__field(u64,		buckets_moved		)
839 		__field(u64,		buckets_not_moved	)
840 	),
841 
842 	TP_fast_assign(
843 		__entry->dev			= c->dev;
844 		__entry->sectors_moved		= sectors_moved;
845 		__entry->sectors_not_moved	= sectors_not_moved;
846 		__entry->buckets_moved		= buckets_moved;
847 		__entry->buckets_not_moved = buckets_moved;
848 	),
849 
850 	TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
851 		  MAJOR(__entry->dev), MINOR(__entry->dev),
852 		  __entry->sectors_moved, __entry->sectors_not_moved,
853 		  __entry->buckets_moved, __entry->buckets_not_moved)
854 );
855 
856 TRACE_EVENT(copygc_wait,
857 	TP_PROTO(struct bch_fs *c,
858 		 u64 wait_amount, u64 until),
859 	TP_ARGS(c, wait_amount, until),
860 
861 	TP_STRUCT__entry(
862 		__field(dev_t,		dev			)
863 		__field(u64,		wait_amount		)
864 		__field(u64,		until			)
865 	),
866 
867 	TP_fast_assign(
868 		__entry->dev		= c->dev;
869 		__entry->wait_amount	= wait_amount;
870 		__entry->until		= until;
871 	),
872 
873 	TP_printk("%d,%u waiting for %llu sectors until %llu",
874 		  MAJOR(__entry->dev), MINOR(__entry->dev),
875 		  __entry->wait_amount, __entry->until)
876 );
877 
878 /* btree transactions: */
879 
880 DECLARE_EVENT_CLASS(transaction_event,
881 	TP_PROTO(struct btree_trans *trans,
882 		 unsigned long caller_ip),
883 	TP_ARGS(trans, caller_ip),
884 
885 	TP_STRUCT__entry(
886 		__array(char,			trans_fn, 32	)
887 		__field(unsigned long,		caller_ip	)
888 	),
889 
890 	TP_fast_assign(
891 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
892 		__entry->caller_ip		= caller_ip;
893 	),
894 
895 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
896 );
897 
898 DEFINE_EVENT(transaction_event,	transaction_commit,
899 	TP_PROTO(struct btree_trans *trans,
900 		 unsigned long caller_ip),
901 	TP_ARGS(trans, caller_ip)
902 );
903 
904 DEFINE_EVENT(transaction_event,	trans_restart_injected,
905 	TP_PROTO(struct btree_trans *trans,
906 		 unsigned long caller_ip),
907 	TP_ARGS(trans, caller_ip)
908 );
909 
910 TRACE_EVENT(trans_restart_split_race,
911 	TP_PROTO(struct btree_trans *trans,
912 		 unsigned long caller_ip,
913 		 struct btree *b),
914 	TP_ARGS(trans, caller_ip, b),
915 
916 	TP_STRUCT__entry(
917 		__array(char,			trans_fn, 32	)
918 		__field(unsigned long,		caller_ip	)
919 		__field(u8,			level		)
920 		__field(u16,			written		)
921 		__field(u16,			blocks		)
922 		__field(u16,			u64s_remaining	)
923 	),
924 
925 	TP_fast_assign(
926 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
927 		__entry->caller_ip		= caller_ip;
928 		__entry->level		= b->c.level;
929 		__entry->written	= b->written;
930 		__entry->blocks		= btree_blocks(trans->c);
931 		__entry->u64s_remaining	= bch2_btree_keys_u64s_remaining(b);
932 	),
933 
934 	TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
935 		  __entry->trans_fn, (void *) __entry->caller_ip,
936 		  __entry->level,
937 		  __entry->written, __entry->blocks,
938 		  __entry->u64s_remaining)
939 );
940 
941 DEFINE_EVENT(transaction_event,	trans_blocked_journal_reclaim,
942 	TP_PROTO(struct btree_trans *trans,
943 		 unsigned long caller_ip),
944 	TP_ARGS(trans, caller_ip)
945 );
946 
947 TRACE_EVENT(trans_restart_journal_preres_get,
948 	TP_PROTO(struct btree_trans *trans,
949 		 unsigned long caller_ip,
950 		 unsigned flags),
951 	TP_ARGS(trans, caller_ip, flags),
952 
953 	TP_STRUCT__entry(
954 		__array(char,			trans_fn, 32	)
955 		__field(unsigned long,		caller_ip	)
956 		__field(unsigned,		flags		)
957 	),
958 
959 	TP_fast_assign(
960 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
961 		__entry->caller_ip		= caller_ip;
962 		__entry->flags			= flags;
963 	),
964 
965 	TP_printk("%s %pS %x", __entry->trans_fn,
966 		  (void *) __entry->caller_ip,
967 		  __entry->flags)
968 );
969 
970 DEFINE_EVENT(transaction_event,	trans_restart_fault_inject,
971 	TP_PROTO(struct btree_trans *trans,
972 		 unsigned long caller_ip),
973 	TP_ARGS(trans, caller_ip)
974 );
975 
976 DEFINE_EVENT(transaction_event,	trans_traverse_all,
977 	TP_PROTO(struct btree_trans *trans,
978 		 unsigned long caller_ip),
979 	TP_ARGS(trans, caller_ip)
980 );
981 
982 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_raced,
983 	TP_PROTO(struct btree_trans *trans,
984 		 unsigned long caller_ip),
985 	TP_ARGS(trans, caller_ip)
986 );
987 
988 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
989 	TP_PROTO(struct btree_trans *trans,
990 		 unsigned long caller_ip,
991 		 const char *paths),
992 	TP_ARGS(trans, caller_ip, paths)
993 );
994 
995 DECLARE_EVENT_CLASS(transaction_restart_iter,
996 	TP_PROTO(struct btree_trans *trans,
997 		 unsigned long caller_ip,
998 		 struct btree_path *path),
999 	TP_ARGS(trans, caller_ip, path),
1000 
1001 	TP_STRUCT__entry(
1002 		__array(char,			trans_fn, 32	)
1003 		__field(unsigned long,		caller_ip	)
1004 		__field(u8,			btree_id	)
1005 		TRACE_BPOS_entries(pos)
1006 	),
1007 
1008 	TP_fast_assign(
1009 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1010 		__entry->caller_ip		= caller_ip;
1011 		__entry->btree_id		= path->btree_id;
1012 		TRACE_BPOS_assign(pos, path->pos)
1013 	),
1014 
1015 	TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1016 		  __entry->trans_fn,
1017 		  (void *) __entry->caller_ip,
1018 		  bch2_btree_id_str(__entry->btree_id),
1019 		  __entry->pos_inode,
1020 		  __entry->pos_offset,
1021 		  __entry->pos_snapshot)
1022 );
1023 
1024 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_reused,
1025 	TP_PROTO(struct btree_trans *trans,
1026 		 unsigned long caller_ip,
1027 		 struct btree_path *path),
1028 	TP_ARGS(trans, caller_ip, path)
1029 );
1030 
1031 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_split,
1032 	TP_PROTO(struct btree_trans *trans,
1033 		 unsigned long caller_ip,
1034 		 struct btree_path *path),
1035 	TP_ARGS(trans, caller_ip, path)
1036 );
1037 
1038 TRACE_EVENT(trans_restart_upgrade,
1039 	TP_PROTO(struct btree_trans *trans,
1040 		 unsigned long caller_ip,
1041 		 struct btree_path *path,
1042 		 unsigned old_locks_want,
1043 		 unsigned new_locks_want,
1044 		 struct get_locks_fail *f),
1045 	TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1046 
1047 	TP_STRUCT__entry(
1048 		__array(char,			trans_fn, 32	)
1049 		__field(unsigned long,		caller_ip	)
1050 		__field(u8,			btree_id	)
1051 		__field(u8,			old_locks_want	)
1052 		__field(u8,			new_locks_want	)
1053 		__field(u8,			level		)
1054 		__field(u32,			path_seq	)
1055 		__field(u32,			node_seq	)
1056 		TRACE_BPOS_entries(pos)
1057 	),
1058 
1059 	TP_fast_assign(
1060 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1061 		__entry->caller_ip		= caller_ip;
1062 		__entry->btree_id		= path->btree_id;
1063 		__entry->old_locks_want		= old_locks_want;
1064 		__entry->new_locks_want		= new_locks_want;
1065 		__entry->level			= f->l;
1066 		__entry->path_seq		= path->l[f->l].lock_seq;
1067 		__entry->node_seq		= IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1068 		TRACE_BPOS_assign(pos, path->pos)
1069 	),
1070 
1071 	TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1072 		  __entry->trans_fn,
1073 		  (void *) __entry->caller_ip,
1074 		  bch2_btree_id_str(__entry->btree_id),
1075 		  __entry->pos_inode,
1076 		  __entry->pos_offset,
1077 		  __entry->pos_snapshot,
1078 		  __entry->old_locks_want,
1079 		  __entry->new_locks_want,
1080 		  __entry->level,
1081 		  __entry->path_seq,
1082 		  __entry->node_seq)
1083 );
1084 
1085 DEFINE_EVENT(trans_str,	trans_restart_relock,
1086 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1087 	TP_ARGS(trans, caller_ip, str)
1088 );
1089 
1090 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_next_node,
1091 	TP_PROTO(struct btree_trans *trans,
1092 		 unsigned long caller_ip,
1093 		 struct btree_path *path),
1094 	TP_ARGS(trans, caller_ip, path)
1095 );
1096 
1097 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_parent_for_fill,
1098 	TP_PROTO(struct btree_trans *trans,
1099 		 unsigned long caller_ip,
1100 		 struct btree_path *path),
1101 	TP_ARGS(trans, caller_ip, path)
1102 );
1103 
1104 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_after_fill,
1105 	TP_PROTO(struct btree_trans *trans,
1106 		 unsigned long caller_ip,
1107 		 struct btree_path *path),
1108 	TP_ARGS(trans, caller_ip, path)
1109 );
1110 
1111 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_upgrade,
1112 	TP_PROTO(struct btree_trans *trans,
1113 		 unsigned long caller_ip),
1114 	TP_ARGS(trans, caller_ip)
1115 );
1116 
1117 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_key_cache_fill,
1118 	TP_PROTO(struct btree_trans *trans,
1119 		 unsigned long caller_ip,
1120 		 struct btree_path *path),
1121 	TP_ARGS(trans, caller_ip, path)
1122 );
1123 
1124 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path,
1125 	TP_PROTO(struct btree_trans *trans,
1126 		 unsigned long caller_ip,
1127 		 struct btree_path *path),
1128 	TP_ARGS(trans, caller_ip, path)
1129 );
1130 
1131 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path_intent,
1132 	TP_PROTO(struct btree_trans *trans,
1133 		 unsigned long caller_ip,
1134 		 struct btree_path *path),
1135 	TP_ARGS(trans, caller_ip, path)
1136 );
1137 
1138 DEFINE_EVENT(transaction_restart_iter,	trans_restart_traverse,
1139 	TP_PROTO(struct btree_trans *trans,
1140 		 unsigned long caller_ip,
1141 		 struct btree_path *path),
1142 	TP_ARGS(trans, caller_ip, path)
1143 );
1144 
1145 DEFINE_EVENT(transaction_restart_iter,	trans_restart_memory_allocation_failure,
1146 	TP_PROTO(struct btree_trans *trans,
1147 		 unsigned long caller_ip,
1148 		 struct btree_path *path),
1149 	TP_ARGS(trans, caller_ip, path)
1150 );
1151 
1152 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1153 	TP_PROTO(struct btree_trans *trans,
1154 		 const char *cycle),
1155 	TP_ARGS(trans, cycle)
1156 );
1157 
1158 DEFINE_EVENT(transaction_event,	trans_restart_would_deadlock_recursion_limit,
1159 	TP_PROTO(struct btree_trans *trans,
1160 		 unsigned long caller_ip),
1161 	TP_ARGS(trans, caller_ip)
1162 );
1163 
1164 TRACE_EVENT(trans_restart_would_deadlock_write,
1165 	TP_PROTO(struct btree_trans *trans),
1166 	TP_ARGS(trans),
1167 
1168 	TP_STRUCT__entry(
1169 		__array(char,			trans_fn, 32	)
1170 	),
1171 
1172 	TP_fast_assign(
1173 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1174 	),
1175 
1176 	TP_printk("%s", __entry->trans_fn)
1177 );
1178 
1179 TRACE_EVENT(trans_restart_mem_realloced,
1180 	TP_PROTO(struct btree_trans *trans,
1181 		 unsigned long caller_ip,
1182 		 unsigned long bytes),
1183 	TP_ARGS(trans, caller_ip, bytes),
1184 
1185 	TP_STRUCT__entry(
1186 		__array(char,			trans_fn, 32	)
1187 		__field(unsigned long,		caller_ip	)
1188 		__field(unsigned long,		bytes		)
1189 	),
1190 
1191 	TP_fast_assign(
1192 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1193 		__entry->caller_ip	= caller_ip;
1194 		__entry->bytes		= bytes;
1195 	),
1196 
1197 	TP_printk("%s %pS bytes %lu",
1198 		  __entry->trans_fn,
1199 		  (void *) __entry->caller_ip,
1200 		  __entry->bytes)
1201 );
1202 
1203 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1204 	TP_PROTO(struct btree_trans *trans,
1205 		 unsigned long caller_ip,
1206 		 struct btree_path *path,
1207 		 unsigned old_u64s,
1208 		 unsigned new_u64s),
1209 	TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1210 
1211 	TP_STRUCT__entry(
1212 		__array(char,			trans_fn, 32	)
1213 		__field(unsigned long,		caller_ip	)
1214 		__field(enum btree_id,		btree_id	)
1215 		TRACE_BPOS_entries(pos)
1216 		__field(u32,			old_u64s	)
1217 		__field(u32,			new_u64s	)
1218 	),
1219 
1220 	TP_fast_assign(
1221 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1222 		__entry->caller_ip		= caller_ip;
1223 
1224 		__entry->btree_id	= path->btree_id;
1225 		TRACE_BPOS_assign(pos, path->pos);
1226 		__entry->old_u64s	= old_u64s;
1227 		__entry->new_u64s	= new_u64s;
1228 	),
1229 
1230 	TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1231 		  __entry->trans_fn,
1232 		  (void *) __entry->caller_ip,
1233 		  bch2_btree_id_str(__entry->btree_id),
1234 		  __entry->pos_inode,
1235 		  __entry->pos_offset,
1236 		  __entry->pos_snapshot,
1237 		  __entry->old_u64s,
1238 		  __entry->new_u64s)
1239 );
1240 
1241 TRACE_EVENT(path_downgrade,
1242 	TP_PROTO(struct btree_trans *trans,
1243 		 unsigned long caller_ip,
1244 		 struct btree_path *path,
1245 		 unsigned old_locks_want),
1246 	TP_ARGS(trans, caller_ip, path, old_locks_want),
1247 
1248 	TP_STRUCT__entry(
1249 		__array(char,			trans_fn, 32	)
1250 		__field(unsigned long,		caller_ip	)
1251 		__field(unsigned,		old_locks_want	)
1252 		__field(unsigned,		new_locks_want	)
1253 		__field(unsigned,		btree		)
1254 		TRACE_BPOS_entries(pos)
1255 	),
1256 
1257 	TP_fast_assign(
1258 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1259 		__entry->caller_ip		= caller_ip;
1260 		__entry->old_locks_want		= old_locks_want;
1261 		__entry->new_locks_want		= path->locks_want;
1262 		__entry->btree			= path->btree_id;
1263 		TRACE_BPOS_assign(pos, path->pos);
1264 	),
1265 
1266 	TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1267 		  __entry->trans_fn,
1268 		  (void *) __entry->caller_ip,
1269 		  __entry->old_locks_want,
1270 		  __entry->new_locks_want,
1271 		  bch2_btree_id_str(__entry->btree),
1272 		  __entry->pos_inode,
1273 		  __entry->pos_offset,
1274 		  __entry->pos_snapshot)
1275 );
1276 
1277 DEFINE_EVENT(transaction_event,	trans_restart_write_buffer_flush,
1278 	TP_PROTO(struct btree_trans *trans,
1279 		 unsigned long caller_ip),
1280 	TP_ARGS(trans, caller_ip)
1281 );
1282 
1283 TRACE_EVENT(write_buffer_flush,
1284 	TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1285 	TP_ARGS(trans, nr, skipped, fast, size),
1286 
1287 	TP_STRUCT__entry(
1288 		__field(size_t,		nr		)
1289 		__field(size_t,		skipped		)
1290 		__field(size_t,		fast		)
1291 		__field(size_t,		size		)
1292 	),
1293 
1294 	TP_fast_assign(
1295 		__entry->nr	= nr;
1296 		__entry->skipped = skipped;
1297 		__entry->fast	= fast;
1298 		__entry->size	= size;
1299 	),
1300 
1301 	TP_printk("%zu/%zu skipped %zu fast %zu",
1302 		  __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1303 );
1304 
1305 TRACE_EVENT(write_buffer_flush_sync,
1306 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1307 	TP_ARGS(trans, caller_ip),
1308 
1309 	TP_STRUCT__entry(
1310 		__array(char,			trans_fn, 32	)
1311 		__field(unsigned long,		caller_ip	)
1312 	),
1313 
1314 	TP_fast_assign(
1315 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1316 		__entry->caller_ip		= caller_ip;
1317 	),
1318 
1319 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1320 );
1321 
1322 TRACE_EVENT(write_buffer_flush_slowpath,
1323 	TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1324 	TP_ARGS(trans, slowpath, total),
1325 
1326 	TP_STRUCT__entry(
1327 		__field(size_t,		slowpath	)
1328 		__field(size_t,		total		)
1329 	),
1330 
1331 	TP_fast_assign(
1332 		__entry->slowpath	= slowpath;
1333 		__entry->total		= total;
1334 	),
1335 
1336 	TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1337 );
1338 
1339 DEFINE_EVENT(fs_str, rebalance_extent,
1340 	TP_PROTO(struct bch_fs *c, const char *str),
1341 	TP_ARGS(c, str)
1342 );
1343 
1344 DEFINE_EVENT(fs_str, data_update,
1345 	TP_PROTO(struct bch_fs *c, const char *str),
1346 	TP_ARGS(c, str)
1347 );
1348 
1349 TRACE_EVENT(error_downcast,
1350 	TP_PROTO(int bch_err, int std_err, unsigned long ip),
1351 	TP_ARGS(bch_err, std_err, ip),
1352 
1353 	TP_STRUCT__entry(
1354 		__array(char,		bch_err, 32		)
1355 		__array(char,		std_err, 32		)
1356 		__array(char,		ip, 32			)
1357 	),
1358 
1359 	TP_fast_assign(
1360 		strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
1361 		strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
1362 		snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
1363 	),
1364 
1365 	TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
1366 );
1367 
1368 #endif /* _TRACE_BCACHEFS_H */
1369 
1370 /* This part must be outside protection */
1371 #undef TRACE_INCLUDE_PATH
1372 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1373 
1374 #undef TRACE_INCLUDE_FILE
1375 #define TRACE_INCLUDE_FILE trace
1376 
1377 #include <trace/define_trace.h>
1378