xref: /linux/include/trace/events/bcache.h (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM bcache
3 
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
6 
7 #include <linux/tracepoint.h>
8 
9 DECLARE_EVENT_CLASS(bcache_request,
10 	TP_PROTO(struct bcache_device *d, struct bio *bio),
11 	TP_ARGS(d, bio),
12 
13 	TP_STRUCT__entry(
14 		__field(dev_t,		dev			)
15 		__field(unsigned int,	orig_major		)
16 		__field(unsigned int,	orig_minor		)
17 		__field(sector_t,	sector			)
18 		__field(dev_t,		orig_sector		)
19 		__field(unsigned int,	nr_sector		)
20 		__array(char,		rwbs,	6		)
21 	),
22 
23 	TP_fast_assign(
24 		__entry->dev		= bio->bi_bdev->bd_dev;
25 		__entry->orig_major	= d->disk->major;
26 		__entry->orig_minor	= d->disk->first_minor;
27 		__entry->sector		= bio->bi_sector;
28 		__entry->orig_sector	= bio->bi_sector - 16;
29 		__entry->nr_sector	= bio->bi_size >> 9;
30 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
31 	),
32 
33 	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
34 		  MAJOR(__entry->dev), MINOR(__entry->dev),
35 		  __entry->rwbs, (unsigned long long)__entry->sector,
36 		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
37 		  (unsigned long long)__entry->orig_sector)
38 );
39 
40 DECLARE_EVENT_CLASS(bkey,
41 	TP_PROTO(struct bkey *k),
42 	TP_ARGS(k),
43 
44 	TP_STRUCT__entry(
45 		__field(u32,	size				)
46 		__field(u32,	inode				)
47 		__field(u64,	offset				)
48 		__field(bool,	dirty				)
49 	),
50 
51 	TP_fast_assign(
52 		__entry->inode	= KEY_INODE(k);
53 		__entry->offset	= KEY_OFFSET(k);
54 		__entry->size	= KEY_SIZE(k);
55 		__entry->dirty	= KEY_DIRTY(k);
56 	),
57 
58 	TP_printk("%u:%llu len %u dirty %u", __entry->inode,
59 		  __entry->offset, __entry->size, __entry->dirty)
60 );
61 
62 DECLARE_EVENT_CLASS(btree_node,
63 	TP_PROTO(struct btree *b),
64 	TP_ARGS(b),
65 
66 	TP_STRUCT__entry(
67 		__field(size_t,		bucket			)
68 	),
69 
70 	TP_fast_assign(
71 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
72 	),
73 
74 	TP_printk("bucket %zu", __entry->bucket)
75 );
76 
77 /* request.c */
78 
79 DEFINE_EVENT(bcache_request, bcache_request_start,
80 	TP_PROTO(struct bcache_device *d, struct bio *bio),
81 	TP_ARGS(d, bio)
82 );
83 
84 DEFINE_EVENT(bcache_request, bcache_request_end,
85 	TP_PROTO(struct bcache_device *d, struct bio *bio),
86 	TP_ARGS(d, bio)
87 );
88 
89 DECLARE_EVENT_CLASS(bcache_bio,
90 	TP_PROTO(struct bio *bio),
91 	TP_ARGS(bio),
92 
93 	TP_STRUCT__entry(
94 		__field(dev_t,		dev			)
95 		__field(sector_t,	sector			)
96 		__field(unsigned int,	nr_sector		)
97 		__array(char,		rwbs,	6		)
98 	),
99 
100 	TP_fast_assign(
101 		__entry->dev		= bio->bi_bdev->bd_dev;
102 		__entry->sector		= bio->bi_sector;
103 		__entry->nr_sector	= bio->bi_size >> 9;
104 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
105 	),
106 
107 	TP_printk("%d,%d  %s %llu + %u",
108 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
109 		  (unsigned long long)__entry->sector, __entry->nr_sector)
110 );
111 
112 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
113 	TP_PROTO(struct bio *bio),
114 	TP_ARGS(bio)
115 );
116 
117 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
118 	TP_PROTO(struct bio *bio),
119 	TP_ARGS(bio)
120 );
121 
122 TRACE_EVENT(bcache_read,
123 	TP_PROTO(struct bio *bio, bool hit, bool bypass),
124 	TP_ARGS(bio, hit, bypass),
125 
126 	TP_STRUCT__entry(
127 		__field(dev_t,		dev			)
128 		__field(sector_t,	sector			)
129 		__field(unsigned int,	nr_sector		)
130 		__array(char,		rwbs,	6		)
131 		__field(bool,		cache_hit		)
132 		__field(bool,		bypass			)
133 	),
134 
135 	TP_fast_assign(
136 		__entry->dev		= bio->bi_bdev->bd_dev;
137 		__entry->sector		= bio->bi_sector;
138 		__entry->nr_sector	= bio->bi_size >> 9;
139 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
140 		__entry->cache_hit = hit;
141 		__entry->bypass = bypass;
142 	),
143 
144 	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
145 		  MAJOR(__entry->dev), MINOR(__entry->dev),
146 		  __entry->rwbs, (unsigned long long)__entry->sector,
147 		  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
148 );
149 
150 TRACE_EVENT(bcache_write,
151 	TP_PROTO(struct bio *bio, bool writeback, bool bypass),
152 	TP_ARGS(bio, writeback, bypass),
153 
154 	TP_STRUCT__entry(
155 		__field(dev_t,		dev			)
156 		__field(sector_t,	sector			)
157 		__field(unsigned int,	nr_sector		)
158 		__array(char,		rwbs,	6		)
159 		__field(bool,		writeback		)
160 		__field(bool,		bypass			)
161 	),
162 
163 	TP_fast_assign(
164 		__entry->dev		= bio->bi_bdev->bd_dev;
165 		__entry->sector		= bio->bi_sector;
166 		__entry->nr_sector	= bio->bi_size >> 9;
167 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
168 		__entry->writeback = writeback;
169 		__entry->bypass = bypass;
170 	),
171 
172 	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
173 		  MAJOR(__entry->dev), MINOR(__entry->dev),
174 		  __entry->rwbs, (unsigned long long)__entry->sector,
175 		  __entry->nr_sector, __entry->writeback, __entry->bypass)
176 );
177 
178 DEFINE_EVENT(bcache_bio, bcache_read_retry,
179 	TP_PROTO(struct bio *bio),
180 	TP_ARGS(bio)
181 );
182 
183 DEFINE_EVENT(bkey, bcache_cache_insert,
184 	TP_PROTO(struct bkey *k),
185 	TP_ARGS(k)
186 );
187 
188 /* Journal */
189 
190 DECLARE_EVENT_CLASS(cache_set,
191 	TP_PROTO(struct cache_set *c),
192 	TP_ARGS(c),
193 
194 	TP_STRUCT__entry(
195 		__array(char,		uuid,	16 )
196 	),
197 
198 	TP_fast_assign(
199 		memcpy(__entry->uuid, c->sb.set_uuid, 16);
200 	),
201 
202 	TP_printk("%pU", __entry->uuid)
203 );
204 
205 DEFINE_EVENT(bkey, bcache_journal_replay_key,
206 	TP_PROTO(struct bkey *k),
207 	TP_ARGS(k)
208 );
209 
210 DEFINE_EVENT(cache_set, bcache_journal_full,
211 	TP_PROTO(struct cache_set *c),
212 	TP_ARGS(c)
213 );
214 
215 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
216 	TP_PROTO(struct cache_set *c),
217 	TP_ARGS(c)
218 );
219 
220 DEFINE_EVENT(bcache_bio, bcache_journal_write,
221 	TP_PROTO(struct bio *bio),
222 	TP_ARGS(bio)
223 );
224 
225 /* Btree */
226 
227 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
228 	TP_PROTO(struct cache_set *c),
229 	TP_ARGS(c)
230 );
231 
232 DEFINE_EVENT(btree_node, bcache_btree_read,
233 	TP_PROTO(struct btree *b),
234 	TP_ARGS(b)
235 );
236 
237 TRACE_EVENT(bcache_btree_write,
238 	TP_PROTO(struct btree *b),
239 	TP_ARGS(b),
240 
241 	TP_STRUCT__entry(
242 		__field(size_t,		bucket			)
243 		__field(unsigned,	block			)
244 		__field(unsigned,	keys			)
245 	),
246 
247 	TP_fast_assign(
248 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
249 		__entry->block	= b->written;
250 		__entry->keys	= b->sets[b->nsets].data->keys;
251 	),
252 
253 	TP_printk("bucket %zu", __entry->bucket)
254 );
255 
256 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
257 	TP_PROTO(struct btree *b),
258 	TP_ARGS(b)
259 );
260 
261 DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
262 	TP_PROTO(struct btree *b),
263 	TP_ARGS(b)
264 );
265 
266 DEFINE_EVENT(btree_node, bcache_btree_node_free,
267 	TP_PROTO(struct btree *b),
268 	TP_ARGS(b)
269 );
270 
271 TRACE_EVENT(bcache_btree_gc_coalesce,
272 	TP_PROTO(unsigned nodes),
273 	TP_ARGS(nodes),
274 
275 	TP_STRUCT__entry(
276 		__field(unsigned,	nodes			)
277 	),
278 
279 	TP_fast_assign(
280 		__entry->nodes	= nodes;
281 	),
282 
283 	TP_printk("coalesced %u nodes", __entry->nodes)
284 );
285 
286 DEFINE_EVENT(cache_set, bcache_gc_start,
287 	TP_PROTO(struct cache_set *c),
288 	TP_ARGS(c)
289 );
290 
291 DEFINE_EVENT(cache_set, bcache_gc_end,
292 	TP_PROTO(struct cache_set *c),
293 	TP_ARGS(c)
294 );
295 
296 DEFINE_EVENT(bkey, bcache_gc_copy,
297 	TP_PROTO(struct bkey *k),
298 	TP_ARGS(k)
299 );
300 
301 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
302 	TP_PROTO(struct bkey *k),
303 	TP_ARGS(k)
304 );
305 
306 TRACE_EVENT(bcache_btree_insert_key,
307 	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
308 	TP_ARGS(b, k, op, status),
309 
310 	TP_STRUCT__entry(
311 		__field(u64,	btree_node			)
312 		__field(u32,	btree_level			)
313 		__field(u32,	inode				)
314 		__field(u64,	offset				)
315 		__field(u32,	size				)
316 		__field(u8,	dirty				)
317 		__field(u8,	op				)
318 		__field(u8,	status				)
319 	),
320 
321 	TP_fast_assign(
322 		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
323 		__entry->btree_level = b->level;
324 		__entry->inode	= KEY_INODE(k);
325 		__entry->offset	= KEY_OFFSET(k);
326 		__entry->size	= KEY_SIZE(k);
327 		__entry->dirty	= KEY_DIRTY(k);
328 		__entry->op = op;
329 		__entry->status = status;
330 	),
331 
332 	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
333 		  __entry->status, __entry->op,
334 		  __entry->btree_node, __entry->btree_level,
335 		  __entry->inode, __entry->offset,
336 		  __entry->size, __entry->dirty)
337 );
338 
339 DECLARE_EVENT_CLASS(btree_split,
340 	TP_PROTO(struct btree *b, unsigned keys),
341 	TP_ARGS(b, keys),
342 
343 	TP_STRUCT__entry(
344 		__field(size_t,		bucket			)
345 		__field(unsigned,	keys			)
346 	),
347 
348 	TP_fast_assign(
349 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
350 		__entry->keys	= keys;
351 	),
352 
353 	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
354 );
355 
356 DEFINE_EVENT(btree_split, bcache_btree_node_split,
357 	TP_PROTO(struct btree *b, unsigned keys),
358 	TP_ARGS(b, keys)
359 );
360 
361 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
362 	TP_PROTO(struct btree *b, unsigned keys),
363 	TP_ARGS(b, keys)
364 );
365 
366 DEFINE_EVENT(btree_node, bcache_btree_set_root,
367 	TP_PROTO(struct btree *b),
368 	TP_ARGS(b)
369 );
370 
371 TRACE_EVENT(bcache_keyscan,
372 	TP_PROTO(unsigned nr_found,
373 		 unsigned start_inode, uint64_t start_offset,
374 		 unsigned end_inode, uint64_t end_offset),
375 	TP_ARGS(nr_found,
376 		start_inode, start_offset,
377 		end_inode, end_offset),
378 
379 	TP_STRUCT__entry(
380 		__field(__u32,	nr_found			)
381 		__field(__u32,	start_inode			)
382 		__field(__u64,	start_offset			)
383 		__field(__u32,	end_inode			)
384 		__field(__u64,	end_offset			)
385 	),
386 
387 	TP_fast_assign(
388 		__entry->nr_found	= nr_found;
389 		__entry->start_inode	= start_inode;
390 		__entry->start_offset	= start_offset;
391 		__entry->end_inode	= end_inode;
392 		__entry->end_offset	= end_offset;
393 	),
394 
395 	TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
396 		  __entry->start_inode, __entry->start_offset,
397 		  __entry->end_inode, __entry->end_offset)
398 );
399 
400 /* Allocator */
401 
402 TRACE_EVENT(bcache_alloc_invalidate,
403 	TP_PROTO(struct cache *ca),
404 	TP_ARGS(ca),
405 
406 	TP_STRUCT__entry(
407 		__field(unsigned,	free			)
408 		__field(unsigned,	free_inc		)
409 		__field(unsigned,	free_inc_size		)
410 		__field(unsigned,	unused			)
411 	),
412 
413 	TP_fast_assign(
414 		__entry->free		= fifo_used(&ca->free);
415 		__entry->free_inc	= fifo_used(&ca->free_inc);
416 		__entry->free_inc_size	= ca->free_inc.size;
417 		__entry->unused		= fifo_used(&ca->unused);
418 	),
419 
420 	TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
421 		  __entry->free_inc, __entry->free_inc_size, __entry->unused)
422 );
423 
424 TRACE_EVENT(bcache_alloc_fail,
425 	TP_PROTO(struct cache *ca),
426 	TP_ARGS(ca),
427 
428 	TP_STRUCT__entry(
429 		__field(unsigned,	free			)
430 		__field(unsigned,	free_inc		)
431 		__field(unsigned,	unused			)
432 		__field(unsigned,	blocked			)
433 	),
434 
435 	TP_fast_assign(
436 		__entry->free		= fifo_used(&ca->free);
437 		__entry->free_inc	= fifo_used(&ca->free_inc);
438 		__entry->unused		= fifo_used(&ca->unused);
439 		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
440 	),
441 
442 	TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
443 		  __entry->free_inc, __entry->unused, __entry->blocked)
444 );
445 
446 /* Background writeback */
447 
448 DEFINE_EVENT(bkey, bcache_writeback,
449 	TP_PROTO(struct bkey *k),
450 	TP_ARGS(k)
451 );
452 
453 DEFINE_EVENT(bkey, bcache_writeback_collision,
454 	TP_PROTO(struct bkey *k),
455 	TP_ARGS(k)
456 );
457 
458 #endif /* _TRACE_BCACHE_H */
459 
460 /* This part must be outside protection */
461 #include <trace/define_trace.h>
462