xref: /linux/fs/bcachefs/trace.h (revision 1b1934dbbdcf9aa2d507932ff488cec47999cf3f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
4 
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHEFS_H
7 
8 #include <linux/tracepoint.h>
9 
10 #define TRACE_BPOS_entries(name)				\
11 	__field(u64,			name##_inode	)	\
12 	__field(u64,			name##_offset	)	\
13 	__field(u32,			name##_snapshot	)
14 
15 #define TRACE_BPOS_assign(dst, src)				\
16 	__entry->dst##_inode		= (src).inode;		\
17 	__entry->dst##_offset		= (src).offset;		\
18 	__entry->dst##_snapshot		= (src).snapshot
19 
20 DECLARE_EVENT_CLASS(bpos,
21 	TP_PROTO(const struct bpos *p),
22 	TP_ARGS(p),
23 
24 	TP_STRUCT__entry(
25 		TRACE_BPOS_entries(p)
26 	),
27 
28 	TP_fast_assign(
29 		TRACE_BPOS_assign(p, *p);
30 	),
31 
32 	TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
33 );
34 
35 DECLARE_EVENT_CLASS(fs_str,
36 	TP_PROTO(struct bch_fs *c, const char *str),
37 	TP_ARGS(c, str),
38 
39 	TP_STRUCT__entry(
40 		__field(dev_t,		dev			)
41 		__string(str,		str			)
42 	),
43 
44 	TP_fast_assign(
45 		__entry->dev		= c->dev;
46 		__assign_str(str, str);
47 	),
48 
49 	TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
50 );
51 
52 DECLARE_EVENT_CLASS(trans_str,
53 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
54 	TP_ARGS(trans, caller_ip, str),
55 
56 	TP_STRUCT__entry(
57 		__field(dev_t,		dev			)
58 		__array(char,		trans_fn, 32		)
59 		__field(unsigned long,	caller_ip		)
60 		__string(str,		str			)
61 	),
62 
63 	TP_fast_assign(
64 		__entry->dev		= trans->c->dev;
65 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
66 		__entry->caller_ip		= caller_ip;
67 		__assign_str(str, str);
68 	),
69 
70 	TP_printk("%d,%d %s %pS %s",
71 		  MAJOR(__entry->dev), MINOR(__entry->dev),
72 		  __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
73 );
74 
75 DECLARE_EVENT_CLASS(trans_str_nocaller,
76 	TP_PROTO(struct btree_trans *trans, const char *str),
77 	TP_ARGS(trans, str),
78 
79 	TP_STRUCT__entry(
80 		__field(dev_t,		dev			)
81 		__array(char,		trans_fn, 32		)
82 		__string(str,		str			)
83 	),
84 
85 	TP_fast_assign(
86 		__entry->dev		= trans->c->dev;
87 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
88 		__assign_str(str, str);
89 	),
90 
91 	TP_printk("%d,%d %s %s",
92 		  MAJOR(__entry->dev), MINOR(__entry->dev),
93 		  __entry->trans_fn, __get_str(str))
94 );
95 
96 DECLARE_EVENT_CLASS(btree_node_nofs,
97 	TP_PROTO(struct bch_fs *c, struct btree *b),
98 	TP_ARGS(c, b),
99 
100 	TP_STRUCT__entry(
101 		__field(dev_t,		dev			)
102 		__field(u8,		level			)
103 		__field(u8,		btree_id		)
104 		TRACE_BPOS_entries(pos)
105 	),
106 
107 	TP_fast_assign(
108 		__entry->dev		= c->dev;
109 		__entry->level		= b->c.level;
110 		__entry->btree_id	= b->c.btree_id;
111 		TRACE_BPOS_assign(pos, b->key.k.p);
112 	),
113 
114 	TP_printk("%d,%d %u %s %llu:%llu:%u",
115 		  MAJOR(__entry->dev), MINOR(__entry->dev),
116 		  __entry->level,
117 		  bch2_btree_id_str(__entry->btree_id),
118 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
119 );
120 
121 DECLARE_EVENT_CLASS(btree_node,
122 	TP_PROTO(struct btree_trans *trans, struct btree *b),
123 	TP_ARGS(trans, b),
124 
125 	TP_STRUCT__entry(
126 		__field(dev_t,		dev			)
127 		__array(char,		trans_fn, 32		)
128 		__field(u8,		level			)
129 		__field(u8,		btree_id		)
130 		TRACE_BPOS_entries(pos)
131 	),
132 
133 	TP_fast_assign(
134 		__entry->dev		= trans->c->dev;
135 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
136 		__entry->level		= b->c.level;
137 		__entry->btree_id	= b->c.btree_id;
138 		TRACE_BPOS_assign(pos, b->key.k.p);
139 	),
140 
141 	TP_printk("%d,%d %s %u %s %llu:%llu:%u",
142 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
143 		  __entry->level,
144 		  bch2_btree_id_str(__entry->btree_id),
145 		  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
146 );
147 
148 DECLARE_EVENT_CLASS(bch_fs,
149 	TP_PROTO(struct bch_fs *c),
150 	TP_ARGS(c),
151 
152 	TP_STRUCT__entry(
153 		__field(dev_t,		dev			)
154 	),
155 
156 	TP_fast_assign(
157 		__entry->dev		= c->dev;
158 	),
159 
160 	TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
161 );
162 
163 DECLARE_EVENT_CLASS(btree_trans,
164 	TP_PROTO(struct btree_trans *trans),
165 	TP_ARGS(trans),
166 
167 	TP_STRUCT__entry(
168 		__field(dev_t,		dev			)
169 		__array(char,		trans_fn, 32		)
170 	),
171 
172 	TP_fast_assign(
173 		__entry->dev		= trans->c->dev;
174 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
175 	),
176 
177 	TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
178 );
179 
180 DECLARE_EVENT_CLASS(bio,
181 	TP_PROTO(struct bio *bio),
182 	TP_ARGS(bio),
183 
184 	TP_STRUCT__entry(
185 		__field(dev_t,		dev			)
186 		__field(sector_t,	sector			)
187 		__field(unsigned int,	nr_sector		)
188 		__array(char,		rwbs,	6		)
189 	),
190 
191 	TP_fast_assign(
192 		__entry->dev		= bio->bi_bdev ? bio_dev(bio) : 0;
193 		__entry->sector		= bio->bi_iter.bi_sector;
194 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
195 		blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
196 	),
197 
198 	TP_printk("%d,%d  %s %llu + %u",
199 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
200 		  (unsigned long long)__entry->sector, __entry->nr_sector)
201 );
202 
203 /* super-io.c: */
204 TRACE_EVENT(write_super,
205 	TP_PROTO(struct bch_fs *c, unsigned long ip),
206 	TP_ARGS(c, ip),
207 
208 	TP_STRUCT__entry(
209 		__field(dev_t,		dev	)
210 		__field(unsigned long,	ip	)
211 	),
212 
213 	TP_fast_assign(
214 		__entry->dev		= c->dev;
215 		__entry->ip		= ip;
216 	),
217 
218 	TP_printk("%d,%d for %pS",
219 		  MAJOR(__entry->dev), MINOR(__entry->dev),
220 		  (void *) __entry->ip)
221 );
222 
223 /* io.c: */
224 
225 DEFINE_EVENT(bio, read_promote,
226 	TP_PROTO(struct bio *bio),
227 	TP_ARGS(bio)
228 );
229 
230 TRACE_EVENT(read_nopromote,
231 	TP_PROTO(struct bch_fs *c, int ret),
232 	TP_ARGS(c, ret),
233 
234 	TP_STRUCT__entry(
235 		__field(dev_t,		dev		)
236 		__array(char,		ret, 32		)
237 	),
238 
239 	TP_fast_assign(
240 		__entry->dev		= c->dev;
241 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
242 	),
243 
244 	TP_printk("%d,%d ret %s",
245 		  MAJOR(__entry->dev), MINOR(__entry->dev),
246 		  __entry->ret)
247 );
248 
249 DEFINE_EVENT(bio, read_bounce,
250 	TP_PROTO(struct bio *bio),
251 	TP_ARGS(bio)
252 );
253 
254 DEFINE_EVENT(bio, read_split,
255 	TP_PROTO(struct bio *bio),
256 	TP_ARGS(bio)
257 );
258 
259 DEFINE_EVENT(bio, read_retry,
260 	TP_PROTO(struct bio *bio),
261 	TP_ARGS(bio)
262 );
263 
264 DEFINE_EVENT(bio, read_reuse_race,
265 	TP_PROTO(struct bio *bio),
266 	TP_ARGS(bio)
267 );
268 
269 /* Journal */
270 
271 DEFINE_EVENT(bch_fs, journal_full,
272 	TP_PROTO(struct bch_fs *c),
273 	TP_ARGS(c)
274 );
275 
276 DEFINE_EVENT(bch_fs, journal_entry_full,
277 	TP_PROTO(struct bch_fs *c),
278 	TP_ARGS(c)
279 );
280 
281 TRACE_EVENT(journal_entry_close,
282 	TP_PROTO(struct bch_fs *c, unsigned bytes),
283 	TP_ARGS(c, bytes),
284 
285 	TP_STRUCT__entry(
286 		__field(dev_t,		dev			)
287 		__field(u32,		bytes			)
288 	),
289 
290 	TP_fast_assign(
291 		__entry->dev			= c->dev;
292 		__entry->bytes			= bytes;
293 	),
294 
295 	TP_printk("%d,%d entry bytes %u",
296 		  MAJOR(__entry->dev), MINOR(__entry->dev),
297 		  __entry->bytes)
298 );
299 
300 DEFINE_EVENT(bio, journal_write,
301 	TP_PROTO(struct bio *bio),
302 	TP_ARGS(bio)
303 );
304 
305 TRACE_EVENT(journal_reclaim_start,
306 	TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
307 		 u64 min_nr, u64 min_key_cache,
308 		 u64 btree_cache_dirty, u64 btree_cache_total,
309 		 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
310 	TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
311 		btree_cache_dirty, btree_cache_total,
312 		btree_key_cache_dirty, btree_key_cache_total),
313 
314 	TP_STRUCT__entry(
315 		__field(dev_t,		dev			)
316 		__field(bool,		direct			)
317 		__field(bool,		kicked			)
318 		__field(u64,		min_nr			)
319 		__field(u64,		min_key_cache		)
320 		__field(u64,		btree_cache_dirty	)
321 		__field(u64,		btree_cache_total	)
322 		__field(u64,		btree_key_cache_dirty	)
323 		__field(u64,		btree_key_cache_total	)
324 	),
325 
326 	TP_fast_assign(
327 		__entry->dev			= c->dev;
328 		__entry->direct			= direct;
329 		__entry->kicked			= kicked;
330 		__entry->min_nr			= min_nr;
331 		__entry->min_key_cache		= min_key_cache;
332 		__entry->btree_cache_dirty	= btree_cache_dirty;
333 		__entry->btree_cache_total	= btree_cache_total;
334 		__entry->btree_key_cache_dirty	= btree_key_cache_dirty;
335 		__entry->btree_key_cache_total	= btree_key_cache_total;
336 	),
337 
338 	TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
339 		  MAJOR(__entry->dev), MINOR(__entry->dev),
340 		  __entry->direct,
341 		  __entry->kicked,
342 		  __entry->min_nr,
343 		  __entry->min_key_cache,
344 		  __entry->btree_cache_dirty,
345 		  __entry->btree_cache_total,
346 		  __entry->btree_key_cache_dirty,
347 		  __entry->btree_key_cache_total)
348 );
349 
350 TRACE_EVENT(journal_reclaim_finish,
351 	TP_PROTO(struct bch_fs *c, u64 nr_flushed),
352 	TP_ARGS(c, nr_flushed),
353 
354 	TP_STRUCT__entry(
355 		__field(dev_t,		dev			)
356 		__field(u64,		nr_flushed		)
357 	),
358 
359 	TP_fast_assign(
360 		__entry->dev		= c->dev;
361 		__entry->nr_flushed	= nr_flushed;
362 	),
363 
364 	TP_printk("%d,%d flushed %llu",
365 		  MAJOR(__entry->dev), MINOR(__entry->dev),
366 		  __entry->nr_flushed)
367 );
368 
369 /* bset.c: */
370 
371 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
372 	TP_PROTO(const struct bpos *p),
373 	TP_ARGS(p)
374 );
375 
376 /* Btree cache: */
377 
378 TRACE_EVENT(btree_cache_scan,
379 	TP_PROTO(long nr_to_scan, long can_free, long ret),
380 	TP_ARGS(nr_to_scan, can_free, ret),
381 
382 	TP_STRUCT__entry(
383 		__field(long,	nr_to_scan		)
384 		__field(long,	can_free		)
385 		__field(long,	ret			)
386 	),
387 
388 	TP_fast_assign(
389 		__entry->nr_to_scan	= nr_to_scan;
390 		__entry->can_free	= can_free;
391 		__entry->ret		= ret;
392 	),
393 
394 	TP_printk("scanned for %li nodes, can free %li, ret %li",
395 		  __entry->nr_to_scan, __entry->can_free, __entry->ret)
396 );
397 
398 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
399 	TP_PROTO(struct bch_fs *c, struct btree *b),
400 	TP_ARGS(c, b)
401 );
402 
403 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
404 	TP_PROTO(struct btree_trans *trans),
405 	TP_ARGS(trans)
406 );
407 
408 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
409 	TP_PROTO(struct btree_trans *trans),
410 	TP_ARGS(trans)
411 );
412 
413 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
414 	TP_PROTO(struct btree_trans *trans),
415 	TP_ARGS(trans)
416 );
417 
418 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
419 	TP_PROTO(struct btree_trans *trans),
420 	TP_ARGS(trans)
421 );
422 
423 /* Btree */
424 
425 DEFINE_EVENT(btree_node, btree_node_read,
426 	TP_PROTO(struct btree_trans *trans, struct btree *b),
427 	TP_ARGS(trans, b)
428 );
429 
430 TRACE_EVENT(btree_node_write,
431 	TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
432 	TP_ARGS(b, bytes, sectors),
433 
434 	TP_STRUCT__entry(
435 		__field(enum btree_node_type,	type)
436 		__field(unsigned,	bytes			)
437 		__field(unsigned,	sectors			)
438 	),
439 
440 	TP_fast_assign(
441 		__entry->type	= btree_node_type(b);
442 		__entry->bytes	= bytes;
443 		__entry->sectors = sectors;
444 	),
445 
446 	TP_printk("bkey type %u bytes %u sectors %u",
447 		  __entry->type , __entry->bytes, __entry->sectors)
448 );
449 
450 DEFINE_EVENT(btree_node, btree_node_alloc,
451 	TP_PROTO(struct btree_trans *trans, struct btree *b),
452 	TP_ARGS(trans, b)
453 );
454 
455 DEFINE_EVENT(btree_node, btree_node_free,
456 	TP_PROTO(struct btree_trans *trans, struct btree *b),
457 	TP_ARGS(trans, b)
458 );
459 
460 TRACE_EVENT(btree_reserve_get_fail,
461 	TP_PROTO(const char *trans_fn,
462 		 unsigned long caller_ip,
463 		 size_t required,
464 		 int ret),
465 	TP_ARGS(trans_fn, caller_ip, required, ret),
466 
467 	TP_STRUCT__entry(
468 		__array(char,			trans_fn, 32	)
469 		__field(unsigned long,		caller_ip	)
470 		__field(size_t,			required	)
471 		__array(char,			ret, 32		)
472 	),
473 
474 	TP_fast_assign(
475 		strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
476 		__entry->caller_ip	= caller_ip;
477 		__entry->required	= required;
478 		strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
479 	),
480 
481 	TP_printk("%s %pS required %zu ret %s",
482 		  __entry->trans_fn,
483 		  (void *) __entry->caller_ip,
484 		  __entry->required,
485 		  __entry->ret)
486 );
487 
488 DEFINE_EVENT(btree_node, btree_node_compact,
489 	TP_PROTO(struct btree_trans *trans, struct btree *b),
490 	TP_ARGS(trans, b)
491 );
492 
493 DEFINE_EVENT(btree_node, btree_node_merge,
494 	TP_PROTO(struct btree_trans *trans, struct btree *b),
495 	TP_ARGS(trans, b)
496 );
497 
498 DEFINE_EVENT(btree_node, btree_node_split,
499 	TP_PROTO(struct btree_trans *trans, struct btree *b),
500 	TP_ARGS(trans, b)
501 );
502 
503 DEFINE_EVENT(btree_node, btree_node_rewrite,
504 	TP_PROTO(struct btree_trans *trans, struct btree *b),
505 	TP_ARGS(trans, b)
506 );
507 
508 DEFINE_EVENT(btree_node, btree_node_set_root,
509 	TP_PROTO(struct btree_trans *trans, struct btree *b),
510 	TP_ARGS(trans, b)
511 );
512 
513 TRACE_EVENT(btree_path_relock_fail,
514 	TP_PROTO(struct btree_trans *trans,
515 		 unsigned long caller_ip,
516 		 struct btree_path *path,
517 		 unsigned level),
518 	TP_ARGS(trans, caller_ip, path, level),
519 
520 	TP_STRUCT__entry(
521 		__array(char,			trans_fn, 32	)
522 		__field(unsigned long,		caller_ip	)
523 		__field(u8,			btree_id	)
524 		__field(u8,			level		)
525 		TRACE_BPOS_entries(pos)
526 		__array(char,			node, 24	)
527 		__field(u8,			self_read_count	)
528 		__field(u8,			self_intent_count)
529 		__field(u8,			read_count	)
530 		__field(u8,			intent_count	)
531 		__field(u32,			iter_lock_seq	)
532 		__field(u32,			node_lock_seq	)
533 	),
534 
535 	TP_fast_assign(
536 		struct btree *b = btree_path_node(path, level);
537 		struct six_lock_count c;
538 
539 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
540 		__entry->caller_ip		= caller_ip;
541 		__entry->btree_id		= path->btree_id;
542 		__entry->level			= path->level;
543 		TRACE_BPOS_assign(pos, path->pos);
544 
545 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
546 		__entry->self_read_count	= c.n[SIX_LOCK_read];
547 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
548 
549 		if (IS_ERR(b)) {
550 			strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
551 		} else {
552 			c = six_lock_counts(&path->l[level].b->c.lock);
553 			__entry->read_count	= c.n[SIX_LOCK_read];
554 			__entry->intent_count	= c.n[SIX_LOCK_intent];
555 			scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
556 		}
557 		__entry->iter_lock_seq		= path->l[level].lock_seq;
558 		__entry->node_lock_seq		= is_btree_node(path, level)
559 			? six_lock_seq(&path->l[level].b->c.lock)
560 			: 0;
561 	),
562 
563 	TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
564 		  __entry->trans_fn,
565 		  (void *) __entry->caller_ip,
566 		  bch2_btree_id_str(__entry->btree_id),
567 		  __entry->pos_inode,
568 		  __entry->pos_offset,
569 		  __entry->pos_snapshot,
570 		  __entry->level,
571 		  __entry->node,
572 		  __entry->self_read_count,
573 		  __entry->self_intent_count,
574 		  __entry->read_count,
575 		  __entry->intent_count,
576 		  __entry->iter_lock_seq,
577 		  __entry->node_lock_seq)
578 );
579 
580 TRACE_EVENT(btree_path_upgrade_fail,
581 	TP_PROTO(struct btree_trans *trans,
582 		 unsigned long caller_ip,
583 		 struct btree_path *path,
584 		 unsigned level),
585 	TP_ARGS(trans, caller_ip, path, level),
586 
587 	TP_STRUCT__entry(
588 		__array(char,			trans_fn, 32	)
589 		__field(unsigned long,		caller_ip	)
590 		__field(u8,			btree_id	)
591 		__field(u8,			level		)
592 		TRACE_BPOS_entries(pos)
593 		__field(u8,			locked		)
594 		__field(u8,			self_read_count	)
595 		__field(u8,			self_intent_count)
596 		__field(u8,			read_count	)
597 		__field(u8,			intent_count	)
598 		__field(u32,			iter_lock_seq	)
599 		__field(u32,			node_lock_seq	)
600 	),
601 
602 	TP_fast_assign(
603 		struct six_lock_count c;
604 
605 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
606 		__entry->caller_ip		= caller_ip;
607 		__entry->btree_id		= path->btree_id;
608 		__entry->level			= level;
609 		TRACE_BPOS_assign(pos, path->pos);
610 		__entry->locked			= btree_node_locked(path, level);
611 
612 		c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
613 		__entry->self_read_count	= c.n[SIX_LOCK_read];
614 		__entry->self_intent_count	= c.n[SIX_LOCK_intent];
615 		c = six_lock_counts(&path->l[level].b->c.lock);
616 		__entry->read_count		= c.n[SIX_LOCK_read];
617 		__entry->intent_count		= c.n[SIX_LOCK_intent];
618 		__entry->iter_lock_seq		= path->l[level].lock_seq;
619 		__entry->node_lock_seq		= is_btree_node(path, level)
620 			? six_lock_seq(&path->l[level].b->c.lock)
621 			: 0;
622 	),
623 
624 	TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
625 		  __entry->trans_fn,
626 		  (void *) __entry->caller_ip,
627 		  bch2_btree_id_str(__entry->btree_id),
628 		  __entry->pos_inode,
629 		  __entry->pos_offset,
630 		  __entry->pos_snapshot,
631 		  __entry->level,
632 		  __entry->locked,
633 		  __entry->self_read_count,
634 		  __entry->self_intent_count,
635 		  __entry->read_count,
636 		  __entry->intent_count,
637 		  __entry->iter_lock_seq,
638 		  __entry->node_lock_seq)
639 );
640 
641 /* Garbage collection */
642 
643 DEFINE_EVENT(bch_fs, gc_gens_start,
644 	TP_PROTO(struct bch_fs *c),
645 	TP_ARGS(c)
646 );
647 
648 DEFINE_EVENT(bch_fs, gc_gens_end,
649 	TP_PROTO(struct bch_fs *c),
650 	TP_ARGS(c)
651 );
652 
653 /* Allocator */
654 
655 DECLARE_EVENT_CLASS(bucket_alloc,
656 	TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
657 		 u64 bucket,
658 		 u64 free,
659 		 u64 avail,
660 		 u64 copygc_wait_amount,
661 		 s64 copygc_waiting_for,
662 		 struct bucket_alloc_state *s,
663 		 bool nonblocking,
664 		 const char *err),
665 	TP_ARGS(ca, alloc_reserve, bucket, free, avail,
666 		copygc_wait_amount, copygc_waiting_for,
667 		s, nonblocking, err),
668 
669 	TP_STRUCT__entry(
670 		__field(u8,			dev			)
671 		__array(char,	reserve,	16			)
672 		__field(u64,			bucket	)
673 		__field(u64,			free			)
674 		__field(u64,			avail			)
675 		__field(u64,			copygc_wait_amount	)
676 		__field(s64,			copygc_waiting_for	)
677 		__field(u64,			seen			)
678 		__field(u64,			open			)
679 		__field(u64,			need_journal_commit	)
680 		__field(u64,			nouse			)
681 		__field(bool,			nonblocking		)
682 		__field(u64,			nocow			)
683 		__array(char,			err,	32		)
684 	),
685 
686 	TP_fast_assign(
687 		__entry->dev		= ca->dev_idx;
688 		strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
689 		__entry->bucket		= bucket;
690 		__entry->free		= free;
691 		__entry->avail		= avail;
692 		__entry->copygc_wait_amount	= copygc_wait_amount;
693 		__entry->copygc_waiting_for	= copygc_waiting_for;
694 		__entry->seen		= s->buckets_seen;
695 		__entry->open		= s->skipped_open;
696 		__entry->need_journal_commit = s->skipped_need_journal_commit;
697 		__entry->nouse		= s->skipped_nouse;
698 		__entry->nonblocking	= nonblocking;
699 		__entry->nocow		= s->skipped_nocow;
700 		strscpy(__entry->err, err, sizeof(__entry->err));
701 	),
702 
703 	TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
704 		  __entry->reserve,
705 		  __entry->dev,
706 		  __entry->bucket,
707 		  __entry->free,
708 		  __entry->avail,
709 		  __entry->copygc_wait_amount,
710 		  __entry->copygc_waiting_for,
711 		  __entry->seen,
712 		  __entry->open,
713 		  __entry->need_journal_commit,
714 		  __entry->nouse,
715 		  __entry->nocow,
716 		  __entry->nonblocking,
717 		  __entry->err)
718 );
719 
720 DEFINE_EVENT(bucket_alloc, bucket_alloc,
721 	TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
722 		 u64 bucket,
723 		 u64 free,
724 		 u64 avail,
725 		 u64 copygc_wait_amount,
726 		 s64 copygc_waiting_for,
727 		 struct bucket_alloc_state *s,
728 		 bool nonblocking,
729 		 const char *err),
730 	TP_ARGS(ca, alloc_reserve, bucket, free, avail,
731 		copygc_wait_amount, copygc_waiting_for,
732 		s, nonblocking, err)
733 );
734 
735 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
736 	TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
737 		 u64 bucket,
738 		 u64 free,
739 		 u64 avail,
740 		 u64 copygc_wait_amount,
741 		 s64 copygc_waiting_for,
742 		 struct bucket_alloc_state *s,
743 		 bool nonblocking,
744 		 const char *err),
745 	TP_ARGS(ca, alloc_reserve, bucket, free, avail,
746 		copygc_wait_amount, copygc_waiting_for,
747 		s, nonblocking, err)
748 );
749 
750 TRACE_EVENT(discard_buckets,
751 	TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
752 		 u64 need_journal_commit, u64 discarded, const char *err),
753 	TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
754 
755 	TP_STRUCT__entry(
756 		__field(dev_t,		dev			)
757 		__field(u64,		seen			)
758 		__field(u64,		open			)
759 		__field(u64,		need_journal_commit	)
760 		__field(u64,		discarded		)
761 		__array(char,		err,	16		)
762 	),
763 
764 	TP_fast_assign(
765 		__entry->dev			= c->dev;
766 		__entry->seen			= seen;
767 		__entry->open			= open;
768 		__entry->need_journal_commit	= need_journal_commit;
769 		__entry->discarded		= discarded;
770 		strscpy(__entry->err, err, sizeof(__entry->err));
771 	),
772 
773 	TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
774 		  MAJOR(__entry->dev), MINOR(__entry->dev),
775 		  __entry->seen,
776 		  __entry->open,
777 		  __entry->need_journal_commit,
778 		  __entry->discarded,
779 		  __entry->err)
780 );
781 
782 TRACE_EVENT(bucket_invalidate,
783 	TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
784 	TP_ARGS(c, dev, bucket, sectors),
785 
786 	TP_STRUCT__entry(
787 		__field(dev_t,		dev			)
788 		__field(u32,		dev_idx			)
789 		__field(u32,		sectors			)
790 		__field(u64,		bucket			)
791 	),
792 
793 	TP_fast_assign(
794 		__entry->dev		= c->dev;
795 		__entry->dev_idx	= dev;
796 		__entry->sectors	= sectors;
797 		__entry->bucket		= bucket;
798 	),
799 
800 	TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
801 		  MAJOR(__entry->dev), MINOR(__entry->dev),
802 		  __entry->dev_idx, __entry->bucket,
803 		  __entry->sectors)
804 );
805 
806 /* Moving IO */
807 
808 TRACE_EVENT(bucket_evacuate,
809 	TP_PROTO(struct bch_fs *c, struct bpos *bucket),
810 	TP_ARGS(c, bucket),
811 
812 	TP_STRUCT__entry(
813 		__field(dev_t,		dev			)
814 		__field(u32,		dev_idx			)
815 		__field(u64,		bucket			)
816 	),
817 
818 	TP_fast_assign(
819 		__entry->dev		= c->dev;
820 		__entry->dev_idx	= bucket->inode;
821 		__entry->bucket		= bucket->offset;
822 	),
823 
824 	TP_printk("%d:%d %u:%llu",
825 		  MAJOR(__entry->dev), MINOR(__entry->dev),
826 		  __entry->dev_idx, __entry->bucket)
827 );
828 
829 DEFINE_EVENT(fs_str, move_extent,
830 	TP_PROTO(struct bch_fs *c, const char *k),
831 	TP_ARGS(c, k)
832 );
833 
834 DEFINE_EVENT(fs_str, move_extent_read,
835 	TP_PROTO(struct bch_fs *c, const char *k),
836 	TP_ARGS(c, k)
837 );
838 
839 DEFINE_EVENT(fs_str, move_extent_write,
840 	TP_PROTO(struct bch_fs *c, const char *k),
841 	TP_ARGS(c, k)
842 );
843 
844 DEFINE_EVENT(fs_str, move_extent_finish,
845 	TP_PROTO(struct bch_fs *c, const char *k),
846 	TP_ARGS(c, k)
847 );
848 
849 TRACE_EVENT(move_extent_fail,
850 	TP_PROTO(struct bch_fs *c, const char *msg),
851 	TP_ARGS(c, msg),
852 
853 	TP_STRUCT__entry(
854 		__field(dev_t,		dev			)
855 		__string(msg,		msg			)
856 	),
857 
858 	TP_fast_assign(
859 		__entry->dev		= c->dev;
860 		__assign_str(msg, msg);
861 	),
862 
863 	TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
864 );
865 
866 DEFINE_EVENT(fs_str, move_extent_start_fail,
867 	TP_PROTO(struct bch_fs *c, const char *str),
868 	TP_ARGS(c, str)
869 );
870 
871 TRACE_EVENT(move_data,
872 	TP_PROTO(struct bch_fs *c,
873 		 struct bch_move_stats *stats),
874 	TP_ARGS(c, stats),
875 
876 	TP_STRUCT__entry(
877 		__field(dev_t,		dev		)
878 		__field(u64,		keys_moved	)
879 		__field(u64,		keys_raced	)
880 		__field(u64,		sectors_seen	)
881 		__field(u64,		sectors_moved	)
882 		__field(u64,		sectors_raced	)
883 	),
884 
885 	TP_fast_assign(
886 		__entry->dev		= c->dev;
887 		__entry->keys_moved	= atomic64_read(&stats->keys_moved);
888 		__entry->keys_raced	= atomic64_read(&stats->keys_raced);
889 		__entry->sectors_seen	= atomic64_read(&stats->sectors_seen);
890 		__entry->sectors_moved	= atomic64_read(&stats->sectors_moved);
891 		__entry->sectors_raced	= atomic64_read(&stats->sectors_raced);
892 	),
893 
894 	TP_printk("%d,%d keys moved %llu raced %llu"
895 		  "sectors seen %llu moved %llu raced %llu",
896 		  MAJOR(__entry->dev), MINOR(__entry->dev),
897 		  __entry->keys_moved,
898 		  __entry->keys_raced,
899 		  __entry->sectors_seen,
900 		  __entry->sectors_moved,
901 		  __entry->sectors_raced)
902 );
903 
904 TRACE_EVENT(evacuate_bucket,
905 	TP_PROTO(struct bch_fs *c, struct bpos *bucket,
906 		 unsigned sectors, unsigned bucket_size,
907 		 u64 fragmentation, int ret),
908 	TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
909 
910 	TP_STRUCT__entry(
911 		__field(dev_t,		dev		)
912 		__field(u64,		member		)
913 		__field(u64,		bucket		)
914 		__field(u32,		sectors		)
915 		__field(u32,		bucket_size	)
916 		__field(u64,		fragmentation	)
917 		__field(int,		ret		)
918 	),
919 
920 	TP_fast_assign(
921 		__entry->dev			= c->dev;
922 		__entry->member			= bucket->inode;
923 		__entry->bucket			= bucket->offset;
924 		__entry->sectors		= sectors;
925 		__entry->bucket_size		= bucket_size;
926 		__entry->fragmentation		= fragmentation;
927 		__entry->ret			= ret;
928 	),
929 
930 	TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
931 		  MAJOR(__entry->dev), MINOR(__entry->dev),
932 		  __entry->member, __entry->bucket,
933 		  __entry->sectors, __entry->bucket_size,
934 		  __entry->fragmentation, __entry->ret)
935 );
936 
937 TRACE_EVENT(copygc,
938 	TP_PROTO(struct bch_fs *c,
939 		 u64 sectors_moved, u64 sectors_not_moved,
940 		 u64 buckets_moved, u64 buckets_not_moved),
941 	TP_ARGS(c,
942 		sectors_moved, sectors_not_moved,
943 		buckets_moved, buckets_not_moved),
944 
945 	TP_STRUCT__entry(
946 		__field(dev_t,		dev			)
947 		__field(u64,		sectors_moved		)
948 		__field(u64,		sectors_not_moved	)
949 		__field(u64,		buckets_moved		)
950 		__field(u64,		buckets_not_moved	)
951 	),
952 
953 	TP_fast_assign(
954 		__entry->dev			= c->dev;
955 		__entry->sectors_moved		= sectors_moved;
956 		__entry->sectors_not_moved	= sectors_not_moved;
957 		__entry->buckets_moved		= buckets_moved;
958 		__entry->buckets_not_moved = buckets_moved;
959 	),
960 
961 	TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
962 		  MAJOR(__entry->dev), MINOR(__entry->dev),
963 		  __entry->sectors_moved, __entry->sectors_not_moved,
964 		  __entry->buckets_moved, __entry->buckets_not_moved)
965 );
966 
967 TRACE_EVENT(copygc_wait,
968 	TP_PROTO(struct bch_fs *c,
969 		 u64 wait_amount, u64 until),
970 	TP_ARGS(c, wait_amount, until),
971 
972 	TP_STRUCT__entry(
973 		__field(dev_t,		dev			)
974 		__field(u64,		wait_amount		)
975 		__field(u64,		until			)
976 	),
977 
978 	TP_fast_assign(
979 		__entry->dev		= c->dev;
980 		__entry->wait_amount	= wait_amount;
981 		__entry->until		= until;
982 	),
983 
984 	TP_printk("%d,%u waiting for %llu sectors until %llu",
985 		  MAJOR(__entry->dev), MINOR(__entry->dev),
986 		  __entry->wait_amount, __entry->until)
987 );
988 
989 /* btree transactions: */
990 
991 DECLARE_EVENT_CLASS(transaction_event,
992 	TP_PROTO(struct btree_trans *trans,
993 		 unsigned long caller_ip),
994 	TP_ARGS(trans, caller_ip),
995 
996 	TP_STRUCT__entry(
997 		__array(char,			trans_fn, 32	)
998 		__field(unsigned long,		caller_ip	)
999 	),
1000 
1001 	TP_fast_assign(
1002 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1003 		__entry->caller_ip		= caller_ip;
1004 	),
1005 
1006 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1007 );
1008 
1009 DEFINE_EVENT(transaction_event,	transaction_commit,
1010 	TP_PROTO(struct btree_trans *trans,
1011 		 unsigned long caller_ip),
1012 	TP_ARGS(trans, caller_ip)
1013 );
1014 
1015 DEFINE_EVENT(transaction_event,	trans_restart_injected,
1016 	TP_PROTO(struct btree_trans *trans,
1017 		 unsigned long caller_ip),
1018 	TP_ARGS(trans, caller_ip)
1019 );
1020 
1021 TRACE_EVENT(trans_restart_split_race,
1022 	TP_PROTO(struct btree_trans *trans,
1023 		 unsigned long caller_ip,
1024 		 struct btree *b),
1025 	TP_ARGS(trans, caller_ip, b),
1026 
1027 	TP_STRUCT__entry(
1028 		__array(char,			trans_fn, 32	)
1029 		__field(unsigned long,		caller_ip	)
1030 		__field(u8,			level		)
1031 		__field(u16,			written		)
1032 		__field(u16,			blocks		)
1033 		__field(u16,			u64s_remaining	)
1034 	),
1035 
1036 	TP_fast_assign(
1037 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1038 		__entry->caller_ip		= caller_ip;
1039 		__entry->level		= b->c.level;
1040 		__entry->written	= b->written;
1041 		__entry->blocks		= btree_blocks(trans->c);
1042 		__entry->u64s_remaining	= bch_btree_keys_u64s_remaining(trans->c, b);
1043 	),
1044 
1045 	TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
1046 		  __entry->trans_fn, (void *) __entry->caller_ip,
1047 		  __entry->level,
1048 		  __entry->written, __entry->blocks,
1049 		  __entry->u64s_remaining)
1050 );
1051 
1052 DEFINE_EVENT(transaction_event,	trans_blocked_journal_reclaim,
1053 	TP_PROTO(struct btree_trans *trans,
1054 		 unsigned long caller_ip),
1055 	TP_ARGS(trans, caller_ip)
1056 );
1057 
1058 TRACE_EVENT(trans_restart_journal_preres_get,
1059 	TP_PROTO(struct btree_trans *trans,
1060 		 unsigned long caller_ip,
1061 		 unsigned flags),
1062 	TP_ARGS(trans, caller_ip, flags),
1063 
1064 	TP_STRUCT__entry(
1065 		__array(char,			trans_fn, 32	)
1066 		__field(unsigned long,		caller_ip	)
1067 		__field(unsigned,		flags		)
1068 	),
1069 
1070 	TP_fast_assign(
1071 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1072 		__entry->caller_ip		= caller_ip;
1073 		__entry->flags			= flags;
1074 	),
1075 
1076 	TP_printk("%s %pS %x", __entry->trans_fn,
1077 		  (void *) __entry->caller_ip,
1078 		  __entry->flags)
1079 );
1080 
1081 DEFINE_EVENT(transaction_event,	trans_restart_fault_inject,
1082 	TP_PROTO(struct btree_trans *trans,
1083 		 unsigned long caller_ip),
1084 	TP_ARGS(trans, caller_ip)
1085 );
1086 
1087 DEFINE_EVENT(transaction_event,	trans_traverse_all,
1088 	TP_PROTO(struct btree_trans *trans,
1089 		 unsigned long caller_ip),
1090 	TP_ARGS(trans, caller_ip)
1091 );
1092 
1093 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_raced,
1094 	TP_PROTO(struct btree_trans *trans,
1095 		 unsigned long caller_ip),
1096 	TP_ARGS(trans, caller_ip)
1097 );
1098 
1099 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1100 	TP_PROTO(struct btree_trans *trans,
1101 		 unsigned long caller_ip,
1102 		 const char *paths),
1103 	TP_ARGS(trans, caller_ip, paths)
1104 );
1105 
1106 DECLARE_EVENT_CLASS(transaction_restart_iter,
1107 	TP_PROTO(struct btree_trans *trans,
1108 		 unsigned long caller_ip,
1109 		 struct btree_path *path),
1110 	TP_ARGS(trans, caller_ip, path),
1111 
1112 	TP_STRUCT__entry(
1113 		__array(char,			trans_fn, 32	)
1114 		__field(unsigned long,		caller_ip	)
1115 		__field(u8,			btree_id	)
1116 		TRACE_BPOS_entries(pos)
1117 	),
1118 
1119 	TP_fast_assign(
1120 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1121 		__entry->caller_ip		= caller_ip;
1122 		__entry->btree_id		= path->btree_id;
1123 		TRACE_BPOS_assign(pos, path->pos)
1124 	),
1125 
1126 	TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1127 		  __entry->trans_fn,
1128 		  (void *) __entry->caller_ip,
1129 		  bch2_btree_id_str(__entry->btree_id),
1130 		  __entry->pos_inode,
1131 		  __entry->pos_offset,
1132 		  __entry->pos_snapshot)
1133 );
1134 
1135 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_reused,
1136 	TP_PROTO(struct btree_trans *trans,
1137 		 unsigned long caller_ip,
1138 		 struct btree_path *path),
1139 	TP_ARGS(trans, caller_ip, path)
1140 );
1141 
1142 DEFINE_EVENT(transaction_restart_iter,	trans_restart_btree_node_split,
1143 	TP_PROTO(struct btree_trans *trans,
1144 		 unsigned long caller_ip,
1145 		 struct btree_path *path),
1146 	TP_ARGS(trans, caller_ip, path)
1147 );
1148 
1149 struct get_locks_fail;
1150 
1151 TRACE_EVENT(trans_restart_upgrade,
1152 	TP_PROTO(struct btree_trans *trans,
1153 		 unsigned long caller_ip,
1154 		 struct btree_path *path,
1155 		 unsigned old_locks_want,
1156 		 unsigned new_locks_want,
1157 		 struct get_locks_fail *f),
1158 	TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1159 
1160 	TP_STRUCT__entry(
1161 		__array(char,			trans_fn, 32	)
1162 		__field(unsigned long,		caller_ip	)
1163 		__field(u8,			btree_id	)
1164 		__field(u8,			old_locks_want	)
1165 		__field(u8,			new_locks_want	)
1166 		__field(u8,			level		)
1167 		__field(u32,			path_seq	)
1168 		__field(u32,			node_seq	)
1169 		TRACE_BPOS_entries(pos)
1170 	),
1171 
1172 	TP_fast_assign(
1173 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1174 		__entry->caller_ip		= caller_ip;
1175 		__entry->btree_id		= path->btree_id;
1176 		__entry->old_locks_want		= old_locks_want;
1177 		__entry->new_locks_want		= new_locks_want;
1178 		__entry->level			= f->l;
1179 		__entry->path_seq		= path->l[f->l].lock_seq;
1180 		__entry->node_seq		= IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1181 		TRACE_BPOS_assign(pos, path->pos)
1182 	),
1183 
1184 	TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1185 		  __entry->trans_fn,
1186 		  (void *) __entry->caller_ip,
1187 		  bch2_btree_id_str(__entry->btree_id),
1188 		  __entry->pos_inode,
1189 		  __entry->pos_offset,
1190 		  __entry->pos_snapshot,
1191 		  __entry->old_locks_want,
1192 		  __entry->new_locks_want,
1193 		  __entry->level,
1194 		  __entry->path_seq,
1195 		  __entry->node_seq)
1196 );
1197 
1198 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock,
1199 	TP_PROTO(struct btree_trans *trans,
1200 		 unsigned long caller_ip,
1201 		 struct btree_path *path),
1202 	TP_ARGS(trans, caller_ip, path)
1203 );
1204 
1205 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_next_node,
1206 	TP_PROTO(struct btree_trans *trans,
1207 		 unsigned long caller_ip,
1208 		 struct btree_path *path),
1209 	TP_ARGS(trans, caller_ip, path)
1210 );
1211 
1212 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_parent_for_fill,
1213 	TP_PROTO(struct btree_trans *trans,
1214 		 unsigned long caller_ip,
1215 		 struct btree_path *path),
1216 	TP_ARGS(trans, caller_ip, path)
1217 );
1218 
1219 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_after_fill,
1220 	TP_PROTO(struct btree_trans *trans,
1221 		 unsigned long caller_ip,
1222 		 struct btree_path *path),
1223 	TP_ARGS(trans, caller_ip, path)
1224 );
1225 
1226 DEFINE_EVENT(transaction_event,	trans_restart_key_cache_upgrade,
1227 	TP_PROTO(struct btree_trans *trans,
1228 		 unsigned long caller_ip),
1229 	TP_ARGS(trans, caller_ip)
1230 );
1231 
1232 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_key_cache_fill,
1233 	TP_PROTO(struct btree_trans *trans,
1234 		 unsigned long caller_ip,
1235 		 struct btree_path *path),
1236 	TP_ARGS(trans, caller_ip, path)
1237 );
1238 
1239 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path,
1240 	TP_PROTO(struct btree_trans *trans,
1241 		 unsigned long caller_ip,
1242 		 struct btree_path *path),
1243 	TP_ARGS(trans, caller_ip, path)
1244 );
1245 
1246 DEFINE_EVENT(transaction_restart_iter,	trans_restart_relock_path_intent,
1247 	TP_PROTO(struct btree_trans *trans,
1248 		 unsigned long caller_ip,
1249 		 struct btree_path *path),
1250 	TP_ARGS(trans, caller_ip, path)
1251 );
1252 
1253 DEFINE_EVENT(transaction_restart_iter,	trans_restart_traverse,
1254 	TP_PROTO(struct btree_trans *trans,
1255 		 unsigned long caller_ip,
1256 		 struct btree_path *path),
1257 	TP_ARGS(trans, caller_ip, path)
1258 );
1259 
1260 DEFINE_EVENT(transaction_restart_iter,	trans_restart_memory_allocation_failure,
1261 	TP_PROTO(struct btree_trans *trans,
1262 		 unsigned long caller_ip,
1263 		 struct btree_path *path),
1264 	TP_ARGS(trans, caller_ip, path)
1265 );
1266 
1267 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1268 	TP_PROTO(struct btree_trans *trans,
1269 		 const char *cycle),
1270 	TP_ARGS(trans, cycle)
1271 );
1272 
1273 DEFINE_EVENT(transaction_event,	trans_restart_would_deadlock_recursion_limit,
1274 	TP_PROTO(struct btree_trans *trans,
1275 		 unsigned long caller_ip),
1276 	TP_ARGS(trans, caller_ip)
1277 );
1278 
1279 TRACE_EVENT(trans_restart_would_deadlock_write,
1280 	TP_PROTO(struct btree_trans *trans),
1281 	TP_ARGS(trans),
1282 
1283 	TP_STRUCT__entry(
1284 		__array(char,			trans_fn, 32	)
1285 	),
1286 
1287 	TP_fast_assign(
1288 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1289 	),
1290 
1291 	TP_printk("%s", __entry->trans_fn)
1292 );
1293 
1294 TRACE_EVENT(trans_restart_mem_realloced,
1295 	TP_PROTO(struct btree_trans *trans,
1296 		 unsigned long caller_ip,
1297 		 unsigned long bytes),
1298 	TP_ARGS(trans, caller_ip, bytes),
1299 
1300 	TP_STRUCT__entry(
1301 		__array(char,			trans_fn, 32	)
1302 		__field(unsigned long,		caller_ip	)
1303 		__field(unsigned long,		bytes		)
1304 	),
1305 
1306 	TP_fast_assign(
1307 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1308 		__entry->caller_ip	= caller_ip;
1309 		__entry->bytes		= bytes;
1310 	),
1311 
1312 	TP_printk("%s %pS bytes %lu",
1313 		  __entry->trans_fn,
1314 		  (void *) __entry->caller_ip,
1315 		  __entry->bytes)
1316 );
1317 
1318 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1319 	TP_PROTO(struct btree_trans *trans,
1320 		 unsigned long caller_ip,
1321 		 struct btree_path *path,
1322 		 unsigned old_u64s,
1323 		 unsigned new_u64s),
1324 	TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1325 
1326 	TP_STRUCT__entry(
1327 		__array(char,			trans_fn, 32	)
1328 		__field(unsigned long,		caller_ip	)
1329 		__field(enum btree_id,		btree_id	)
1330 		TRACE_BPOS_entries(pos)
1331 		__field(u32,			old_u64s	)
1332 		__field(u32,			new_u64s	)
1333 	),
1334 
1335 	TP_fast_assign(
1336 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1337 		__entry->caller_ip		= caller_ip;
1338 
1339 		__entry->btree_id	= path->btree_id;
1340 		TRACE_BPOS_assign(pos, path->pos);
1341 		__entry->old_u64s	= old_u64s;
1342 		__entry->new_u64s	= new_u64s;
1343 	),
1344 
1345 	TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1346 		  __entry->trans_fn,
1347 		  (void *) __entry->caller_ip,
1348 		  bch2_btree_id_str(__entry->btree_id),
1349 		  __entry->pos_inode,
1350 		  __entry->pos_offset,
1351 		  __entry->pos_snapshot,
1352 		  __entry->old_u64s,
1353 		  __entry->new_u64s)
1354 );
1355 
1356 TRACE_EVENT(path_downgrade,
1357 	TP_PROTO(struct btree_trans *trans,
1358 		 unsigned long caller_ip,
1359 		 struct btree_path *path,
1360 		 unsigned old_locks_want),
1361 	TP_ARGS(trans, caller_ip, path, old_locks_want),
1362 
1363 	TP_STRUCT__entry(
1364 		__array(char,			trans_fn, 32	)
1365 		__field(unsigned long,		caller_ip	)
1366 		__field(unsigned,		old_locks_want	)
1367 		__field(unsigned,		new_locks_want	)
1368 		__field(unsigned,		btree		)
1369 		TRACE_BPOS_entries(pos)
1370 	),
1371 
1372 	TP_fast_assign(
1373 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1374 		__entry->caller_ip		= caller_ip;
1375 		__entry->old_locks_want		= old_locks_want;
1376 		__entry->new_locks_want		= path->locks_want;
1377 		__entry->btree			= path->btree_id;
1378 		TRACE_BPOS_assign(pos, path->pos);
1379 	),
1380 
1381 	TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1382 		  __entry->trans_fn,
1383 		  (void *) __entry->caller_ip,
1384 		  __entry->old_locks_want,
1385 		  __entry->new_locks_want,
1386 		  bch2_btree_id_str(__entry->btree),
1387 		  __entry->pos_inode,
1388 		  __entry->pos_offset,
1389 		  __entry->pos_snapshot)
1390 );
1391 
1392 DEFINE_EVENT(transaction_event,	trans_restart_write_buffer_flush,
1393 	TP_PROTO(struct btree_trans *trans,
1394 		 unsigned long caller_ip),
1395 	TP_ARGS(trans, caller_ip)
1396 );
1397 
1398 TRACE_EVENT(write_buffer_flush,
1399 	TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1400 	TP_ARGS(trans, nr, skipped, fast, size),
1401 
1402 	TP_STRUCT__entry(
1403 		__field(size_t,		nr		)
1404 		__field(size_t,		skipped		)
1405 		__field(size_t,		fast		)
1406 		__field(size_t,		size		)
1407 	),
1408 
1409 	TP_fast_assign(
1410 		__entry->nr	= nr;
1411 		__entry->skipped = skipped;
1412 		__entry->fast	= fast;
1413 		__entry->size	= size;
1414 	),
1415 
1416 	TP_printk("%zu/%zu skipped %zu fast %zu",
1417 		  __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1418 );
1419 
1420 TRACE_EVENT(write_buffer_flush_sync,
1421 	TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1422 	TP_ARGS(trans, caller_ip),
1423 
1424 	TP_STRUCT__entry(
1425 		__array(char,			trans_fn, 32	)
1426 		__field(unsigned long,		caller_ip	)
1427 	),
1428 
1429 	TP_fast_assign(
1430 		strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1431 		__entry->caller_ip		= caller_ip;
1432 	),
1433 
1434 	TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1435 );
1436 
1437 TRACE_EVENT(write_buffer_flush_slowpath,
1438 	TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1439 	TP_ARGS(trans, slowpath, total),
1440 
1441 	TP_STRUCT__entry(
1442 		__field(size_t,		slowpath	)
1443 		__field(size_t,		total		)
1444 	),
1445 
1446 	TP_fast_assign(
1447 		__entry->slowpath	= slowpath;
1448 		__entry->total		= total;
1449 	),
1450 
1451 	TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1452 );
1453 
1454 DEFINE_EVENT(fs_str, rebalance_extent,
1455 	TP_PROTO(struct bch_fs *c, const char *str),
1456 	TP_ARGS(c, str)
1457 );
1458 
1459 DEFINE_EVENT(fs_str, data_update,
1460 	TP_PROTO(struct bch_fs *c, const char *str),
1461 	TP_ARGS(c, str)
1462 );
1463 
1464 #endif /* _TRACE_BCACHEFS_H */
1465 
1466 /* This part must be outside protection */
1467 #undef TRACE_INCLUDE_PATH
1468 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1469 
1470 #undef TRACE_INCLUDE_FILE
1471 #define TRACE_INCLUDE_FILE trace
1472 
1473 #include <trace/define_trace.h>
1474