xref: /linux/include/trace/events/writeback.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_REFERENCED,		"I_REFERENCED"},	\
24 		{I_LINKABLE,		"I_LINKABLE"},		\
25 		{I_WB_SWITCH,		"I_WB_SWITCH"},		\
26 		{I_OVL_INUSE,		"I_OVL_INUSE"},		\
27 		{I_CREATING,		"I_CREATING"},		\
28 		{I_DONTCACHE,		"I_DONTCACHE"},		\
29 		{I_SYNC_QUEUED,		"I_SYNC_QUEUED"},	\
30 		{I_PINNING_NETFS_WB,	"I_PINNING_NETFS_WB"},	\
31 		{I_LRU_ISOLATING,	"I_LRU_ISOLATING"}	\
32 	)
33 
34 /* enums need to be exported to user space */
35 #undef EM
36 #undef EMe
37 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
38 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
39 
40 #define WB_WORK_REASON							\
41 	EM( WB_REASON_BACKGROUND,		"background")		\
42 	EM( WB_REASON_VMSCAN,			"vmscan")		\
43 	EM( WB_REASON_SYNC,			"sync")			\
44 	EM( WB_REASON_PERIODIC,			"periodic")		\
45 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
46 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
47 	EM( WB_REASON_FORKER_THREAD,		"forker_thread")	\
48 	EMe(WB_REASON_FOREIGN_FLUSH,		"foreign_flush")
49 
50 WB_WORK_REASON
51 
52 /*
53  * Now redefine the EM() and EMe() macros to map the enums to the strings
54  * that will be printed in the output.
55  */
56 #undef EM
57 #undef EMe
58 #define EM(a,b)		{ a, b },
59 #define EMe(a,b)	{ a, b }
60 
61 struct wb_writeback_work;
62 
63 DECLARE_EVENT_CLASS(writeback_folio_template,
64 
65 	TP_PROTO(struct folio *folio, struct address_space *mapping),
66 
67 	TP_ARGS(folio, mapping),
68 
69 	TP_STRUCT__entry (
70 		__array(char, name, 32)
71 		__field(ino_t, ino)
72 		__field(pgoff_t, index)
73 	),
74 
75 	TP_fast_assign(
76 		strscpy_pad(__entry->name,
77 			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
78 					 NULL), 32);
79 		__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
80 		__entry->index = folio->index;
81 	),
82 
83 	TP_printk("bdi %s: ino=%lu index=%lu",
84 		__entry->name,
85 		(unsigned long)__entry->ino,
86 		__entry->index
87 	)
88 );
89 
90 DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
91 
92 	TP_PROTO(struct folio *folio, struct address_space *mapping),
93 
94 	TP_ARGS(folio, mapping)
95 );
96 
97 DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
98 
99 	TP_PROTO(struct folio *folio, struct address_space *mapping),
100 
101 	TP_ARGS(folio, mapping)
102 );
103 
104 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
105 
106 	TP_PROTO(struct inode *inode, int flags),
107 
108 	TP_ARGS(inode, flags),
109 
110 	TP_STRUCT__entry (
111 		__array(char, name, 32)
112 		__field(ino_t, ino)
113 		__field(unsigned long, state)
114 		__field(unsigned long, flags)
115 	),
116 
117 	TP_fast_assign(
118 		struct backing_dev_info *bdi = inode_to_bdi(inode);
119 
120 		/* may be called for files on pseudo FSes w/ unregistered bdi */
121 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
122 		__entry->ino		= inode->i_ino;
123 		__entry->state		= inode->i_state;
124 		__entry->flags		= flags;
125 	),
126 
127 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
128 		__entry->name,
129 		(unsigned long)__entry->ino,
130 		show_inode_state(__entry->state),
131 		show_inode_state(__entry->flags)
132 	)
133 );
134 
135 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
136 
137 	TP_PROTO(struct inode *inode, int flags),
138 
139 	TP_ARGS(inode, flags)
140 );
141 
142 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
143 
144 	TP_PROTO(struct inode *inode, int flags),
145 
146 	TP_ARGS(inode, flags)
147 );
148 
149 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
150 
151 	TP_PROTO(struct inode *inode, int flags),
152 
153 	TP_ARGS(inode, flags)
154 );
155 
156 #ifdef CREATE_TRACE_POINTS
157 #ifdef CONFIG_CGROUP_WRITEBACK
158 
159 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
160 {
161 	return cgroup_ino(wb->memcg_css->cgroup);
162 }
163 
164 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
165 {
166 	if (wbc->wb)
167 		return __trace_wb_assign_cgroup(wbc->wb);
168 	else
169 		return 1;
170 }
171 #else	/* CONFIG_CGROUP_WRITEBACK */
172 
173 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
174 {
175 	return 1;
176 }
177 
178 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
179 {
180 	return 1;
181 }
182 
183 #endif	/* CONFIG_CGROUP_WRITEBACK */
184 #endif	/* CREATE_TRACE_POINTS */
185 
186 #ifdef CONFIG_CGROUP_WRITEBACK
187 TRACE_EVENT(inode_foreign_history,
188 
189 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
190 		 unsigned int history),
191 
192 	TP_ARGS(inode, wbc, history),
193 
194 	TP_STRUCT__entry(
195 		__array(char,		name, 32)
196 		__field(ino_t,		ino)
197 		__field(ino_t,		cgroup_ino)
198 		__field(unsigned int,	history)
199 	),
200 
201 	TP_fast_assign(
202 		strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
203 		__entry->ino		= inode->i_ino;
204 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
205 		__entry->history	= history;
206 	),
207 
208 	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
209 		__entry->name,
210 		(unsigned long)__entry->ino,
211 		(unsigned long)__entry->cgroup_ino,
212 		__entry->history
213 	)
214 );
215 
216 TRACE_EVENT(inode_switch_wbs,
217 
218 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
219 		 struct bdi_writeback *new_wb),
220 
221 	TP_ARGS(inode, old_wb, new_wb),
222 
223 	TP_STRUCT__entry(
224 		__array(char,		name, 32)
225 		__field(ino_t,		ino)
226 		__field(ino_t,		old_cgroup_ino)
227 		__field(ino_t,		new_cgroup_ino)
228 	),
229 
230 	TP_fast_assign(
231 		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
232 		__entry->ino		= inode->i_ino;
233 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
234 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
235 	),
236 
237 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
238 		__entry->name,
239 		(unsigned long)__entry->ino,
240 		(unsigned long)__entry->old_cgroup_ino,
241 		(unsigned long)__entry->new_cgroup_ino
242 	)
243 );
244 
245 TRACE_EVENT(track_foreign_dirty,
246 
247 	TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
248 
249 	TP_ARGS(folio, wb),
250 
251 	TP_STRUCT__entry(
252 		__array(char,		name, 32)
253 		__field(u64,		bdi_id)
254 		__field(ino_t,		ino)
255 		__field(unsigned int,	memcg_id)
256 		__field(ino_t,		cgroup_ino)
257 		__field(ino_t,		page_cgroup_ino)
258 	),
259 
260 	TP_fast_assign(
261 		struct address_space *mapping = folio_mapping(folio);
262 		struct inode *inode = mapping ? mapping->host : NULL;
263 
264 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
265 		__entry->bdi_id		= wb->bdi->id;
266 		__entry->ino		= inode ? inode->i_ino : 0;
267 		__entry->memcg_id	= wb->memcg_css->id;
268 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
269 		__entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
270 	),
271 
272 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
273 		__entry->name,
274 		__entry->bdi_id,
275 		(unsigned long)__entry->ino,
276 		__entry->memcg_id,
277 		(unsigned long)__entry->cgroup_ino,
278 		(unsigned long)__entry->page_cgroup_ino
279 	)
280 );
281 
282 TRACE_EVENT(flush_foreign,
283 
284 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
285 		 unsigned int frn_memcg_id),
286 
287 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
288 
289 	TP_STRUCT__entry(
290 		__array(char,		name, 32)
291 		__field(ino_t,		cgroup_ino)
292 		__field(unsigned int,	frn_bdi_id)
293 		__field(unsigned int,	frn_memcg_id)
294 	),
295 
296 	TP_fast_assign(
297 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
298 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
299 		__entry->frn_bdi_id	= frn_bdi_id;
300 		__entry->frn_memcg_id	= frn_memcg_id;
301 	),
302 
303 	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
304 		__entry->name,
305 		(unsigned long)__entry->cgroup_ino,
306 		__entry->frn_bdi_id,
307 		__entry->frn_memcg_id
308 	)
309 );
310 #endif
311 
312 DECLARE_EVENT_CLASS(writeback_write_inode_template,
313 
314 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
315 
316 	TP_ARGS(inode, wbc),
317 
318 	TP_STRUCT__entry (
319 		__array(char, name, 32)
320 		__field(ino_t, ino)
321 		__field(int, sync_mode)
322 		__field(ino_t, cgroup_ino)
323 	),
324 
325 	TP_fast_assign(
326 		strscpy_pad(__entry->name,
327 			    bdi_dev_name(inode_to_bdi(inode)), 32);
328 		__entry->ino		= inode->i_ino;
329 		__entry->sync_mode	= wbc->sync_mode;
330 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
331 	),
332 
333 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
334 		__entry->name,
335 		(unsigned long)__entry->ino,
336 		__entry->sync_mode,
337 		(unsigned long)__entry->cgroup_ino
338 	)
339 );
340 
341 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
342 
343 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
344 
345 	TP_ARGS(inode, wbc)
346 );
347 
348 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
349 
350 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
351 
352 	TP_ARGS(inode, wbc)
353 );
354 
355 DECLARE_EVENT_CLASS(writeback_work_class,
356 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
357 	TP_ARGS(wb, work),
358 	TP_STRUCT__entry(
359 		__array(char, name, 32)
360 		__field(long, nr_pages)
361 		__field(dev_t, sb_dev)
362 		__field(int, sync_mode)
363 		__field(int, for_kupdate)
364 		__field(int, range_cyclic)
365 		__field(int, for_background)
366 		__field(int, reason)
367 		__field(ino_t, cgroup_ino)
368 	),
369 	TP_fast_assign(
370 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
371 		__entry->nr_pages = work->nr_pages;
372 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
373 		__entry->sync_mode = work->sync_mode;
374 		__entry->for_kupdate = work->for_kupdate;
375 		__entry->range_cyclic = work->range_cyclic;
376 		__entry->for_background	= work->for_background;
377 		__entry->reason = work->reason;
378 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
379 	),
380 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
381 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
382 		  __entry->name,
383 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
384 		  __entry->nr_pages,
385 		  __entry->sync_mode,
386 		  __entry->for_kupdate,
387 		  __entry->range_cyclic,
388 		  __entry->for_background,
389 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
390 		  (unsigned long)__entry->cgroup_ino
391 	)
392 );
393 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
394 DEFINE_EVENT(writeback_work_class, name, \
395 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
396 	TP_ARGS(wb, work))
397 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
398 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
399 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
400 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
401 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
402 
403 TRACE_EVENT(writeback_pages_written,
404 	TP_PROTO(long pages_written),
405 	TP_ARGS(pages_written),
406 	TP_STRUCT__entry(
407 		__field(long,		pages)
408 	),
409 	TP_fast_assign(
410 		__entry->pages		= pages_written;
411 	),
412 	TP_printk("%ld", __entry->pages)
413 );
414 
415 DECLARE_EVENT_CLASS(writeback_class,
416 	TP_PROTO(struct bdi_writeback *wb),
417 	TP_ARGS(wb),
418 	TP_STRUCT__entry(
419 		__array(char, name, 32)
420 		__field(ino_t, cgroup_ino)
421 	),
422 	TP_fast_assign(
423 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
424 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
425 	),
426 	TP_printk("bdi %s: cgroup_ino=%lu",
427 		  __entry->name,
428 		  (unsigned long)__entry->cgroup_ino
429 	)
430 );
431 #define DEFINE_WRITEBACK_EVENT(name) \
432 DEFINE_EVENT(writeback_class, name, \
433 	TP_PROTO(struct bdi_writeback *wb), \
434 	TP_ARGS(wb))
435 
436 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
437 
438 TRACE_EVENT(writeback_bdi_register,
439 	TP_PROTO(struct backing_dev_info *bdi),
440 	TP_ARGS(bdi),
441 	TP_STRUCT__entry(
442 		__array(char, name, 32)
443 	),
444 	TP_fast_assign(
445 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
446 	),
447 	TP_printk("bdi %s",
448 		__entry->name
449 	)
450 );
451 
452 DECLARE_EVENT_CLASS(wbc_class,
453 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
454 	TP_ARGS(wbc, bdi),
455 	TP_STRUCT__entry(
456 		__array(char, name, 32)
457 		__field(long, nr_to_write)
458 		__field(long, pages_skipped)
459 		__field(int, sync_mode)
460 		__field(int, for_kupdate)
461 		__field(int, for_background)
462 		__field(int, for_reclaim)
463 		__field(int, range_cyclic)
464 		__field(long, range_start)
465 		__field(long, range_end)
466 		__field(ino_t, cgroup_ino)
467 	),
468 
469 	TP_fast_assign(
470 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
471 		__entry->nr_to_write	= wbc->nr_to_write;
472 		__entry->pages_skipped	= wbc->pages_skipped;
473 		__entry->sync_mode	= wbc->sync_mode;
474 		__entry->for_kupdate	= wbc->for_kupdate;
475 		__entry->for_background	= wbc->for_background;
476 		__entry->for_reclaim	= wbc->for_reclaim;
477 		__entry->range_cyclic	= wbc->range_cyclic;
478 		__entry->range_start	= (long)wbc->range_start;
479 		__entry->range_end	= (long)wbc->range_end;
480 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
481 	),
482 
483 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
484 		"bgrd=%d reclm=%d cyclic=%d "
485 		"start=0x%lx end=0x%lx cgroup_ino=%lu",
486 		__entry->name,
487 		__entry->nr_to_write,
488 		__entry->pages_skipped,
489 		__entry->sync_mode,
490 		__entry->for_kupdate,
491 		__entry->for_background,
492 		__entry->for_reclaim,
493 		__entry->range_cyclic,
494 		__entry->range_start,
495 		__entry->range_end,
496 		(unsigned long)__entry->cgroup_ino
497 	)
498 )
499 
500 #define DEFINE_WBC_EVENT(name) \
501 DEFINE_EVENT(wbc_class, name, \
502 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
503 	TP_ARGS(wbc, bdi))
504 DEFINE_WBC_EVENT(wbc_writepage);
505 
506 TRACE_EVENT(writeback_queue_io,
507 	TP_PROTO(struct bdi_writeback *wb,
508 		 struct wb_writeback_work *work,
509 		 unsigned long dirtied_before,
510 		 int moved),
511 	TP_ARGS(wb, work, dirtied_before, moved),
512 	TP_STRUCT__entry(
513 		__array(char,		name, 32)
514 		__field(unsigned long,	older)
515 		__field(long,		age)
516 		__field(int,		moved)
517 		__field(int,		reason)
518 		__field(ino_t,		cgroup_ino)
519 	),
520 	TP_fast_assign(
521 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
522 		__entry->older	= dirtied_before;
523 		__entry->age	= (jiffies - dirtied_before) * 1000 / HZ;
524 		__entry->moved	= moved;
525 		__entry->reason	= work->reason;
526 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
527 	),
528 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
529 		__entry->name,
530 		__entry->older,	/* dirtied_before in jiffies */
531 		__entry->age,	/* dirtied_before in relative milliseconds */
532 		__entry->moved,
533 		__print_symbolic(__entry->reason, WB_WORK_REASON),
534 		(unsigned long)__entry->cgroup_ino
535 	)
536 );
537 
538 TRACE_EVENT(global_dirty_state,
539 
540 	TP_PROTO(unsigned long background_thresh,
541 		 unsigned long dirty_thresh
542 	),
543 
544 	TP_ARGS(background_thresh,
545 		dirty_thresh
546 	),
547 
548 	TP_STRUCT__entry(
549 		__field(unsigned long,	nr_dirty)
550 		__field(unsigned long,	nr_writeback)
551 		__field(unsigned long,	background_thresh)
552 		__field(unsigned long,	dirty_thresh)
553 		__field(unsigned long,	dirty_limit)
554 		__field(unsigned long,	nr_dirtied)
555 		__field(unsigned long,	nr_written)
556 	),
557 
558 	TP_fast_assign(
559 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
560 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
561 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
562 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
563 		__entry->background_thresh = background_thresh;
564 		__entry->dirty_thresh	= dirty_thresh;
565 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
566 	),
567 
568 	TP_printk("dirty=%lu writeback=%lu "
569 		  "bg_thresh=%lu thresh=%lu limit=%lu "
570 		  "dirtied=%lu written=%lu",
571 		  __entry->nr_dirty,
572 		  __entry->nr_writeback,
573 		  __entry->background_thresh,
574 		  __entry->dirty_thresh,
575 		  __entry->dirty_limit,
576 		  __entry->nr_dirtied,
577 		  __entry->nr_written
578 	)
579 );
580 
581 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
582 
583 TRACE_EVENT(bdi_dirty_ratelimit,
584 
585 	TP_PROTO(struct bdi_writeback *wb,
586 		 unsigned long dirty_rate,
587 		 unsigned long task_ratelimit),
588 
589 	TP_ARGS(wb, dirty_rate, task_ratelimit),
590 
591 	TP_STRUCT__entry(
592 		__array(char,		bdi, 32)
593 		__field(unsigned long,	write_bw)
594 		__field(unsigned long,	avg_write_bw)
595 		__field(unsigned long,	dirty_rate)
596 		__field(unsigned long,	dirty_ratelimit)
597 		__field(unsigned long,	task_ratelimit)
598 		__field(unsigned long,	balanced_dirty_ratelimit)
599 		__field(ino_t,		cgroup_ino)
600 	),
601 
602 	TP_fast_assign(
603 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
604 		__entry->write_bw	= KBps(wb->write_bandwidth);
605 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
606 		__entry->dirty_rate	= KBps(dirty_rate);
607 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
608 		__entry->task_ratelimit	= KBps(task_ratelimit);
609 		__entry->balanced_dirty_ratelimit =
610 					KBps(wb->balanced_dirty_ratelimit);
611 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
612 	),
613 
614 	TP_printk("bdi %s: "
615 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
616 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
617 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
618 		  __entry->bdi,
619 		  __entry->write_bw,		/* write bandwidth */
620 		  __entry->avg_write_bw,	/* avg write bandwidth */
621 		  __entry->dirty_rate,		/* bdi dirty rate */
622 		  __entry->dirty_ratelimit,	/* base ratelimit */
623 		  __entry->task_ratelimit, /* ratelimit with position control */
624 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
625 		  (unsigned long)__entry->cgroup_ino
626 	)
627 );
628 
629 TRACE_EVENT(balance_dirty_pages,
630 
631 	TP_PROTO(struct bdi_writeback *wb,
632 		 unsigned long thresh,
633 		 unsigned long bg_thresh,
634 		 unsigned long dirty,
635 		 unsigned long bdi_thresh,
636 		 unsigned long bdi_dirty,
637 		 unsigned long dirty_ratelimit,
638 		 unsigned long task_ratelimit,
639 		 unsigned long dirtied,
640 		 unsigned long period,
641 		 long pause,
642 		 unsigned long start_time),
643 
644 	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
645 		dirty_ratelimit, task_ratelimit,
646 		dirtied, period, pause, start_time),
647 
648 	TP_STRUCT__entry(
649 		__array(	 char,	bdi, 32)
650 		__field(unsigned long,	limit)
651 		__field(unsigned long,	setpoint)
652 		__field(unsigned long,	dirty)
653 		__field(unsigned long,	bdi_setpoint)
654 		__field(unsigned long,	bdi_dirty)
655 		__field(unsigned long,	dirty_ratelimit)
656 		__field(unsigned long,	task_ratelimit)
657 		__field(unsigned int,	dirtied)
658 		__field(unsigned int,	dirtied_pause)
659 		__field(unsigned long,	paused)
660 		__field(	 long,	pause)
661 		__field(unsigned long,	period)
662 		__field(	 long,	think)
663 		__field(ino_t,		cgroup_ino)
664 	),
665 
666 	TP_fast_assign(
667 		unsigned long freerun = (thresh + bg_thresh) / 2;
668 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
669 
670 		__entry->limit		= global_wb_domain.dirty_limit;
671 		__entry->setpoint	= (global_wb_domain.dirty_limit +
672 						freerun) / 2;
673 		__entry->dirty		= dirty;
674 		__entry->bdi_setpoint	= __entry->setpoint *
675 						bdi_thresh / (thresh + 1);
676 		__entry->bdi_dirty	= bdi_dirty;
677 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
678 		__entry->task_ratelimit	= KBps(task_ratelimit);
679 		__entry->dirtied	= dirtied;
680 		__entry->dirtied_pause	= current->nr_dirtied_pause;
681 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
682 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
683 		__entry->period		= period * 1000 / HZ;
684 		__entry->pause		= pause * 1000 / HZ;
685 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
686 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
687 	),
688 
689 
690 	TP_printk("bdi %s: "
691 		  "limit=%lu setpoint=%lu dirty=%lu "
692 		  "bdi_setpoint=%lu bdi_dirty=%lu "
693 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
694 		  "dirtied=%u dirtied_pause=%u "
695 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
696 		  __entry->bdi,
697 		  __entry->limit,
698 		  __entry->setpoint,
699 		  __entry->dirty,
700 		  __entry->bdi_setpoint,
701 		  __entry->bdi_dirty,
702 		  __entry->dirty_ratelimit,
703 		  __entry->task_ratelimit,
704 		  __entry->dirtied,
705 		  __entry->dirtied_pause,
706 		  __entry->paused,	/* ms */
707 		  __entry->pause,	/* ms */
708 		  __entry->period,	/* ms */
709 		  __entry->think,	/* ms */
710 		  (unsigned long)__entry->cgroup_ino
711 	  )
712 );
713 
714 TRACE_EVENT(writeback_sb_inodes_requeue,
715 
716 	TP_PROTO(struct inode *inode),
717 	TP_ARGS(inode),
718 
719 	TP_STRUCT__entry(
720 		__array(char, name, 32)
721 		__field(ino_t, ino)
722 		__field(unsigned long, state)
723 		__field(unsigned long, dirtied_when)
724 		__field(ino_t, cgroup_ino)
725 	),
726 
727 	TP_fast_assign(
728 		strscpy_pad(__entry->name,
729 			    bdi_dev_name(inode_to_bdi(inode)), 32);
730 		__entry->ino		= inode->i_ino;
731 		__entry->state		= inode->i_state;
732 		__entry->dirtied_when	= inode->dirtied_when;
733 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
734 	),
735 
736 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
737 		  __entry->name,
738 		  (unsigned long)__entry->ino,
739 		  show_inode_state(__entry->state),
740 		  __entry->dirtied_when,
741 		  (jiffies - __entry->dirtied_when) / HZ,
742 		  (unsigned long)__entry->cgroup_ino
743 	)
744 );
745 
746 DECLARE_EVENT_CLASS(writeback_single_inode_template,
747 
748 	TP_PROTO(struct inode *inode,
749 		 struct writeback_control *wbc,
750 		 unsigned long nr_to_write
751 	),
752 
753 	TP_ARGS(inode, wbc, nr_to_write),
754 
755 	TP_STRUCT__entry(
756 		__array(char, name, 32)
757 		__field(ino_t, ino)
758 		__field(unsigned long, state)
759 		__field(unsigned long, dirtied_when)
760 		__field(unsigned long, writeback_index)
761 		__field(long, nr_to_write)
762 		__field(unsigned long, wrote)
763 		__field(ino_t, cgroup_ino)
764 	),
765 
766 	TP_fast_assign(
767 		strscpy_pad(__entry->name,
768 			    bdi_dev_name(inode_to_bdi(inode)), 32);
769 		__entry->ino		= inode->i_ino;
770 		__entry->state		= inode->i_state;
771 		__entry->dirtied_when	= inode->dirtied_when;
772 		__entry->writeback_index = inode->i_mapping->writeback_index;
773 		__entry->nr_to_write	= nr_to_write;
774 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
775 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
776 	),
777 
778 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
779 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
780 		  __entry->name,
781 		  (unsigned long)__entry->ino,
782 		  show_inode_state(__entry->state),
783 		  __entry->dirtied_when,
784 		  (jiffies - __entry->dirtied_when) / HZ,
785 		  __entry->writeback_index,
786 		  __entry->nr_to_write,
787 		  __entry->wrote,
788 		  (unsigned long)__entry->cgroup_ino
789 	)
790 );
791 
792 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
793 	TP_PROTO(struct inode *inode,
794 		 struct writeback_control *wbc,
795 		 unsigned long nr_to_write),
796 	TP_ARGS(inode, wbc, nr_to_write)
797 );
798 
799 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
800 	TP_PROTO(struct inode *inode,
801 		 struct writeback_control *wbc,
802 		 unsigned long nr_to_write),
803 	TP_ARGS(inode, wbc, nr_to_write)
804 );
805 
806 DECLARE_EVENT_CLASS(writeback_inode_template,
807 	TP_PROTO(struct inode *inode),
808 
809 	TP_ARGS(inode),
810 
811 	TP_STRUCT__entry(
812 		__field(	dev_t,	dev			)
813 		__field(	ino_t,	ino			)
814 		__field(unsigned long,	state			)
815 		__field(	__u16, mode			)
816 		__field(unsigned long, dirtied_when		)
817 	),
818 
819 	TP_fast_assign(
820 		__entry->dev	= inode->i_sb->s_dev;
821 		__entry->ino	= inode->i_ino;
822 		__entry->state	= inode->i_state;
823 		__entry->mode	= inode->i_mode;
824 		__entry->dirtied_when = inode->dirtied_when;
825 	),
826 
827 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
828 		  MAJOR(__entry->dev), MINOR(__entry->dev),
829 		  (unsigned long)__entry->ino, __entry->dirtied_when,
830 		  show_inode_state(__entry->state), __entry->mode)
831 );
832 
833 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
834 	TP_PROTO(struct inode *inode),
835 
836 	TP_ARGS(inode)
837 );
838 
839 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
840 	TP_PROTO(struct inode *inode),
841 
842 	TP_ARGS(inode)
843 );
844 
845 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
846 
847 	TP_PROTO(struct inode *inode),
848 
849 	TP_ARGS(inode)
850 );
851 
852 /*
853  * Inode writeback list tracking.
854  */
855 
856 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
857 	TP_PROTO(struct inode *inode),
858 	TP_ARGS(inode)
859 );
860 
861 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
862 	TP_PROTO(struct inode *inode),
863 	TP_ARGS(inode)
864 );
865 
866 #endif /* _TRACE_WRITEBACK_H */
867 
868 /* This part must be outside protection */
869 #include <trace/define_trace.h>
870