xref: /linux/include/trace/events/writeback.h (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
24 		{I_REFERENCED,		"I_REFERENCED"}		\
25 	)
26 
27 /* enums need to be exported to user space */
28 #undef EM
29 #undef EMe
30 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
31 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
32 
33 #define WB_WORK_REASON							\
34 	EM( WB_REASON_BACKGROUND,		"background")		\
35 	EM( WB_REASON_VMSCAN,			"vmscan")		\
36 	EM( WB_REASON_SYNC,			"sync")			\
37 	EM( WB_REASON_PERIODIC,			"periodic")		\
38 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
39 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
40 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
41 
42 WB_WORK_REASON
43 
44 /*
45  * Now redefine the EM() and EMe() macros to map the enums to the strings
46  * that will be printed in the output.
47  */
48 #undef EM
49 #undef EMe
50 #define EM(a,b)		{ a, b },
51 #define EMe(a,b)	{ a, b }
52 
53 struct wb_writeback_work;
54 
55 DECLARE_EVENT_CLASS(writeback_page_template,
56 
57 	TP_PROTO(struct page *page, struct address_space *mapping),
58 
59 	TP_ARGS(page, mapping),
60 
61 	TP_STRUCT__entry (
62 		__array(char, name, 32)
63 		__field(ino_t, ino)
64 		__field(pgoff_t, index)
65 	),
66 
67 	TP_fast_assign(
68 		strscpy_pad(__entry->name,
69 			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
70 					 NULL), 32);
71 		__entry->ino = mapping ? mapping->host->i_ino : 0;
72 		__entry->index = page->index;
73 	),
74 
75 	TP_printk("bdi %s: ino=%lu index=%lu",
76 		__entry->name,
77 		(unsigned long)__entry->ino,
78 		__entry->index
79 	)
80 );
81 
82 DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
83 
84 	TP_PROTO(struct page *page, struct address_space *mapping),
85 
86 	TP_ARGS(page, mapping)
87 );
88 
89 DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
90 
91 	TP_PROTO(struct page *page, struct address_space *mapping),
92 
93 	TP_ARGS(page, mapping)
94 );
95 
96 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
97 
98 	TP_PROTO(struct inode *inode, int flags),
99 
100 	TP_ARGS(inode, flags),
101 
102 	TP_STRUCT__entry (
103 		__array(char, name, 32)
104 		__field(ino_t, ino)
105 		__field(unsigned long, state)
106 		__field(unsigned long, flags)
107 	),
108 
109 	TP_fast_assign(
110 		struct backing_dev_info *bdi = inode_to_bdi(inode);
111 
112 		/* may be called for files on pseudo FSes w/ unregistered bdi */
113 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
114 		__entry->ino		= inode->i_ino;
115 		__entry->state		= inode->i_state;
116 		__entry->flags		= flags;
117 	),
118 
119 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
120 		__entry->name,
121 		(unsigned long)__entry->ino,
122 		show_inode_state(__entry->state),
123 		show_inode_state(__entry->flags)
124 	)
125 );
126 
127 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
128 
129 	TP_PROTO(struct inode *inode, int flags),
130 
131 	TP_ARGS(inode, flags)
132 );
133 
134 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
135 
136 	TP_PROTO(struct inode *inode, int flags),
137 
138 	TP_ARGS(inode, flags)
139 );
140 
141 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
142 
143 	TP_PROTO(struct inode *inode, int flags),
144 
145 	TP_ARGS(inode, flags)
146 );
147 
148 #ifdef CREATE_TRACE_POINTS
149 #ifdef CONFIG_CGROUP_WRITEBACK
150 
151 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
152 {
153 	return cgroup_ino(wb->memcg_css->cgroup);
154 }
155 
156 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
157 {
158 	if (wbc->wb)
159 		return __trace_wb_assign_cgroup(wbc->wb);
160 	else
161 		return 1;
162 }
163 #else	/* CONFIG_CGROUP_WRITEBACK */
164 
165 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
166 {
167 	return 1;
168 }
169 
170 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
171 {
172 	return 1;
173 }
174 
175 #endif	/* CONFIG_CGROUP_WRITEBACK */
176 #endif	/* CREATE_TRACE_POINTS */
177 
178 #ifdef CONFIG_CGROUP_WRITEBACK
179 TRACE_EVENT(inode_foreign_history,
180 
181 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
182 		 unsigned int history),
183 
184 	TP_ARGS(inode, wbc, history),
185 
186 	TP_STRUCT__entry(
187 		__array(char,		name, 32)
188 		__field(ino_t,		ino)
189 		__field(ino_t,		cgroup_ino)
190 		__field(unsigned int,	history)
191 	),
192 
193 	TP_fast_assign(
194 		strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
195 		__entry->ino		= inode->i_ino;
196 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
197 		__entry->history	= history;
198 	),
199 
200 	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
201 		__entry->name,
202 		(unsigned long)__entry->ino,
203 		(unsigned long)__entry->cgroup_ino,
204 		__entry->history
205 	)
206 );
207 
208 TRACE_EVENT(inode_switch_wbs,
209 
210 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
211 		 struct bdi_writeback *new_wb),
212 
213 	TP_ARGS(inode, old_wb, new_wb),
214 
215 	TP_STRUCT__entry(
216 		__array(char,		name, 32)
217 		__field(ino_t,		ino)
218 		__field(ino_t,		old_cgroup_ino)
219 		__field(ino_t,		new_cgroup_ino)
220 	),
221 
222 	TP_fast_assign(
223 		strncpy(__entry->name,	bdi_dev_name(old_wb->bdi), 32);
224 		__entry->ino		= inode->i_ino;
225 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
226 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
227 	),
228 
229 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
230 		__entry->name,
231 		(unsigned long)__entry->ino,
232 		(unsigned long)__entry->old_cgroup_ino,
233 		(unsigned long)__entry->new_cgroup_ino
234 	)
235 );
236 
237 TRACE_EVENT(track_foreign_dirty,
238 
239 	TP_PROTO(struct page *page, struct bdi_writeback *wb),
240 
241 	TP_ARGS(page, wb),
242 
243 	TP_STRUCT__entry(
244 		__array(char,		name, 32)
245 		__field(u64,		bdi_id)
246 		__field(ino_t,		ino)
247 		__field(unsigned int,	memcg_id)
248 		__field(ino_t,		cgroup_ino)
249 		__field(ino_t,		page_cgroup_ino)
250 	),
251 
252 	TP_fast_assign(
253 		struct address_space *mapping = page_mapping(page);
254 		struct inode *inode = mapping ? mapping->host : NULL;
255 
256 		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
257 		__entry->bdi_id		= wb->bdi->id;
258 		__entry->ino		= inode ? inode->i_ino : 0;
259 		__entry->memcg_id	= wb->memcg_css->id;
260 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
261 		__entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
262 	),
263 
264 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
265 		__entry->name,
266 		__entry->bdi_id,
267 		(unsigned long)__entry->ino,
268 		__entry->memcg_id,
269 		(unsigned long)__entry->cgroup_ino,
270 		(unsigned long)__entry->page_cgroup_ino
271 	)
272 );
273 
274 TRACE_EVENT(flush_foreign,
275 
276 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
277 		 unsigned int frn_memcg_id),
278 
279 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
280 
281 	TP_STRUCT__entry(
282 		__array(char,		name, 32)
283 		__field(ino_t,		cgroup_ino)
284 		__field(unsigned int,	frn_bdi_id)
285 		__field(unsigned int,	frn_memcg_id)
286 	),
287 
288 	TP_fast_assign(
289 		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
290 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
291 		__entry->frn_bdi_id	= frn_bdi_id;
292 		__entry->frn_memcg_id	= frn_memcg_id;
293 	),
294 
295 	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
296 		__entry->name,
297 		(unsigned long)__entry->cgroup_ino,
298 		__entry->frn_bdi_id,
299 		__entry->frn_memcg_id
300 	)
301 );
302 #endif
303 
304 DECLARE_EVENT_CLASS(writeback_write_inode_template,
305 
306 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
307 
308 	TP_ARGS(inode, wbc),
309 
310 	TP_STRUCT__entry (
311 		__array(char, name, 32)
312 		__field(ino_t, ino)
313 		__field(int, sync_mode)
314 		__field(ino_t, cgroup_ino)
315 	),
316 
317 	TP_fast_assign(
318 		strscpy_pad(__entry->name,
319 			    bdi_dev_name(inode_to_bdi(inode)), 32);
320 		__entry->ino		= inode->i_ino;
321 		__entry->sync_mode	= wbc->sync_mode;
322 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
323 	),
324 
325 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
326 		__entry->name,
327 		(unsigned long)__entry->ino,
328 		__entry->sync_mode,
329 		(unsigned long)__entry->cgroup_ino
330 	)
331 );
332 
333 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
334 
335 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
336 
337 	TP_ARGS(inode, wbc)
338 );
339 
340 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
341 
342 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
343 
344 	TP_ARGS(inode, wbc)
345 );
346 
347 DECLARE_EVENT_CLASS(writeback_work_class,
348 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
349 	TP_ARGS(wb, work),
350 	TP_STRUCT__entry(
351 		__array(char, name, 32)
352 		__field(long, nr_pages)
353 		__field(dev_t, sb_dev)
354 		__field(int, sync_mode)
355 		__field(int, for_kupdate)
356 		__field(int, range_cyclic)
357 		__field(int, for_background)
358 		__field(int, reason)
359 		__field(ino_t, cgroup_ino)
360 	),
361 	TP_fast_assign(
362 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
363 		__entry->nr_pages = work->nr_pages;
364 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
365 		__entry->sync_mode = work->sync_mode;
366 		__entry->for_kupdate = work->for_kupdate;
367 		__entry->range_cyclic = work->range_cyclic;
368 		__entry->for_background	= work->for_background;
369 		__entry->reason = work->reason;
370 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
371 	),
372 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
373 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
374 		  __entry->name,
375 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
376 		  __entry->nr_pages,
377 		  __entry->sync_mode,
378 		  __entry->for_kupdate,
379 		  __entry->range_cyclic,
380 		  __entry->for_background,
381 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
382 		  (unsigned long)__entry->cgroup_ino
383 	)
384 );
385 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
386 DEFINE_EVENT(writeback_work_class, name, \
387 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
388 	TP_ARGS(wb, work))
389 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
390 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
391 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
392 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
393 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
394 
395 TRACE_EVENT(writeback_pages_written,
396 	TP_PROTO(long pages_written),
397 	TP_ARGS(pages_written),
398 	TP_STRUCT__entry(
399 		__field(long,		pages)
400 	),
401 	TP_fast_assign(
402 		__entry->pages		= pages_written;
403 	),
404 	TP_printk("%ld", __entry->pages)
405 );
406 
407 DECLARE_EVENT_CLASS(writeback_class,
408 	TP_PROTO(struct bdi_writeback *wb),
409 	TP_ARGS(wb),
410 	TP_STRUCT__entry(
411 		__array(char, name, 32)
412 		__field(ino_t, cgroup_ino)
413 	),
414 	TP_fast_assign(
415 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
416 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
417 	),
418 	TP_printk("bdi %s: cgroup_ino=%lu",
419 		  __entry->name,
420 		  (unsigned long)__entry->cgroup_ino
421 	)
422 );
423 #define DEFINE_WRITEBACK_EVENT(name) \
424 DEFINE_EVENT(writeback_class, name, \
425 	TP_PROTO(struct bdi_writeback *wb), \
426 	TP_ARGS(wb))
427 
428 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
429 
430 TRACE_EVENT(writeback_bdi_register,
431 	TP_PROTO(struct backing_dev_info *bdi),
432 	TP_ARGS(bdi),
433 	TP_STRUCT__entry(
434 		__array(char, name, 32)
435 	),
436 	TP_fast_assign(
437 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
438 	),
439 	TP_printk("bdi %s",
440 		__entry->name
441 	)
442 );
443 
444 DECLARE_EVENT_CLASS(wbc_class,
445 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
446 	TP_ARGS(wbc, bdi),
447 	TP_STRUCT__entry(
448 		__array(char, name, 32)
449 		__field(long, nr_to_write)
450 		__field(long, pages_skipped)
451 		__field(int, sync_mode)
452 		__field(int, for_kupdate)
453 		__field(int, for_background)
454 		__field(int, for_reclaim)
455 		__field(int, range_cyclic)
456 		__field(long, range_start)
457 		__field(long, range_end)
458 		__field(ino_t, cgroup_ino)
459 	),
460 
461 	TP_fast_assign(
462 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
463 		__entry->nr_to_write	= wbc->nr_to_write;
464 		__entry->pages_skipped	= wbc->pages_skipped;
465 		__entry->sync_mode	= wbc->sync_mode;
466 		__entry->for_kupdate	= wbc->for_kupdate;
467 		__entry->for_background	= wbc->for_background;
468 		__entry->for_reclaim	= wbc->for_reclaim;
469 		__entry->range_cyclic	= wbc->range_cyclic;
470 		__entry->range_start	= (long)wbc->range_start;
471 		__entry->range_end	= (long)wbc->range_end;
472 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
473 	),
474 
475 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
476 		"bgrd=%d reclm=%d cyclic=%d "
477 		"start=0x%lx end=0x%lx cgroup_ino=%lu",
478 		__entry->name,
479 		__entry->nr_to_write,
480 		__entry->pages_skipped,
481 		__entry->sync_mode,
482 		__entry->for_kupdate,
483 		__entry->for_background,
484 		__entry->for_reclaim,
485 		__entry->range_cyclic,
486 		__entry->range_start,
487 		__entry->range_end,
488 		(unsigned long)__entry->cgroup_ino
489 	)
490 )
491 
492 #define DEFINE_WBC_EVENT(name) \
493 DEFINE_EVENT(wbc_class, name, \
494 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
495 	TP_ARGS(wbc, bdi))
496 DEFINE_WBC_EVENT(wbc_writepage);
497 
498 TRACE_EVENT(writeback_queue_io,
499 	TP_PROTO(struct bdi_writeback *wb,
500 		 struct wb_writeback_work *work,
501 		 int moved),
502 	TP_ARGS(wb, work, moved),
503 	TP_STRUCT__entry(
504 		__array(char,		name, 32)
505 		__field(unsigned long,	older)
506 		__field(long,		age)
507 		__field(int,		moved)
508 		__field(int,		reason)
509 		__field(ino_t,		cgroup_ino)
510 	),
511 	TP_fast_assign(
512 		unsigned long *older_than_this = work->older_than_this;
513 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
514 		__entry->older	= older_than_this ?  *older_than_this : 0;
515 		__entry->age	= older_than_this ?
516 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
517 		__entry->moved	= moved;
518 		__entry->reason	= work->reason;
519 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
520 	),
521 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
522 		__entry->name,
523 		__entry->older,	/* older_than_this in jiffies */
524 		__entry->age,	/* older_than_this in relative milliseconds */
525 		__entry->moved,
526 		__print_symbolic(__entry->reason, WB_WORK_REASON),
527 		(unsigned long)__entry->cgroup_ino
528 	)
529 );
530 
531 TRACE_EVENT(global_dirty_state,
532 
533 	TP_PROTO(unsigned long background_thresh,
534 		 unsigned long dirty_thresh
535 	),
536 
537 	TP_ARGS(background_thresh,
538 		dirty_thresh
539 	),
540 
541 	TP_STRUCT__entry(
542 		__field(unsigned long,	nr_dirty)
543 		__field(unsigned long,	nr_writeback)
544 		__field(unsigned long,	background_thresh)
545 		__field(unsigned long,	dirty_thresh)
546 		__field(unsigned long,	dirty_limit)
547 		__field(unsigned long,	nr_dirtied)
548 		__field(unsigned long,	nr_written)
549 	),
550 
551 	TP_fast_assign(
552 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
553 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
554 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
555 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
556 		__entry->background_thresh = background_thresh;
557 		__entry->dirty_thresh	= dirty_thresh;
558 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
559 	),
560 
561 	TP_printk("dirty=%lu writeback=%lu "
562 		  "bg_thresh=%lu thresh=%lu limit=%lu "
563 		  "dirtied=%lu written=%lu",
564 		  __entry->nr_dirty,
565 		  __entry->nr_writeback,
566 		  __entry->background_thresh,
567 		  __entry->dirty_thresh,
568 		  __entry->dirty_limit,
569 		  __entry->nr_dirtied,
570 		  __entry->nr_written
571 	)
572 );
573 
574 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
575 
576 TRACE_EVENT(bdi_dirty_ratelimit,
577 
578 	TP_PROTO(struct bdi_writeback *wb,
579 		 unsigned long dirty_rate,
580 		 unsigned long task_ratelimit),
581 
582 	TP_ARGS(wb, dirty_rate, task_ratelimit),
583 
584 	TP_STRUCT__entry(
585 		__array(char,		bdi, 32)
586 		__field(unsigned long,	write_bw)
587 		__field(unsigned long,	avg_write_bw)
588 		__field(unsigned long,	dirty_rate)
589 		__field(unsigned long,	dirty_ratelimit)
590 		__field(unsigned long,	task_ratelimit)
591 		__field(unsigned long,	balanced_dirty_ratelimit)
592 		__field(ino_t,		cgroup_ino)
593 	),
594 
595 	TP_fast_assign(
596 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
597 		__entry->write_bw	= KBps(wb->write_bandwidth);
598 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
599 		__entry->dirty_rate	= KBps(dirty_rate);
600 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
601 		__entry->task_ratelimit	= KBps(task_ratelimit);
602 		__entry->balanced_dirty_ratelimit =
603 					KBps(wb->balanced_dirty_ratelimit);
604 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
605 	),
606 
607 	TP_printk("bdi %s: "
608 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
609 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
610 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
611 		  __entry->bdi,
612 		  __entry->write_bw,		/* write bandwidth */
613 		  __entry->avg_write_bw,	/* avg write bandwidth */
614 		  __entry->dirty_rate,		/* bdi dirty rate */
615 		  __entry->dirty_ratelimit,	/* base ratelimit */
616 		  __entry->task_ratelimit, /* ratelimit with position control */
617 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
618 		  (unsigned long)__entry->cgroup_ino
619 	)
620 );
621 
622 TRACE_EVENT(balance_dirty_pages,
623 
624 	TP_PROTO(struct bdi_writeback *wb,
625 		 unsigned long thresh,
626 		 unsigned long bg_thresh,
627 		 unsigned long dirty,
628 		 unsigned long bdi_thresh,
629 		 unsigned long bdi_dirty,
630 		 unsigned long dirty_ratelimit,
631 		 unsigned long task_ratelimit,
632 		 unsigned long dirtied,
633 		 unsigned long period,
634 		 long pause,
635 		 unsigned long start_time),
636 
637 	TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
638 		dirty_ratelimit, task_ratelimit,
639 		dirtied, period, pause, start_time),
640 
641 	TP_STRUCT__entry(
642 		__array(	 char,	bdi, 32)
643 		__field(unsigned long,	limit)
644 		__field(unsigned long,	setpoint)
645 		__field(unsigned long,	dirty)
646 		__field(unsigned long,	bdi_setpoint)
647 		__field(unsigned long,	bdi_dirty)
648 		__field(unsigned long,	dirty_ratelimit)
649 		__field(unsigned long,	task_ratelimit)
650 		__field(unsigned int,	dirtied)
651 		__field(unsigned int,	dirtied_pause)
652 		__field(unsigned long,	paused)
653 		__field(	 long,	pause)
654 		__field(unsigned long,	period)
655 		__field(	 long,	think)
656 		__field(ino_t,		cgroup_ino)
657 	),
658 
659 	TP_fast_assign(
660 		unsigned long freerun = (thresh + bg_thresh) / 2;
661 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
662 
663 		__entry->limit		= global_wb_domain.dirty_limit;
664 		__entry->setpoint	= (global_wb_domain.dirty_limit +
665 						freerun) / 2;
666 		__entry->dirty		= dirty;
667 		__entry->bdi_setpoint	= __entry->setpoint *
668 						bdi_thresh / (thresh + 1);
669 		__entry->bdi_dirty	= bdi_dirty;
670 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
671 		__entry->task_ratelimit	= KBps(task_ratelimit);
672 		__entry->dirtied	= dirtied;
673 		__entry->dirtied_pause	= current->nr_dirtied_pause;
674 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
675 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
676 		__entry->period		= period * 1000 / HZ;
677 		__entry->pause		= pause * 1000 / HZ;
678 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
679 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
680 	),
681 
682 
683 	TP_printk("bdi %s: "
684 		  "limit=%lu setpoint=%lu dirty=%lu "
685 		  "bdi_setpoint=%lu bdi_dirty=%lu "
686 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
687 		  "dirtied=%u dirtied_pause=%u "
688 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
689 		  __entry->bdi,
690 		  __entry->limit,
691 		  __entry->setpoint,
692 		  __entry->dirty,
693 		  __entry->bdi_setpoint,
694 		  __entry->bdi_dirty,
695 		  __entry->dirty_ratelimit,
696 		  __entry->task_ratelimit,
697 		  __entry->dirtied,
698 		  __entry->dirtied_pause,
699 		  __entry->paused,	/* ms */
700 		  __entry->pause,	/* ms */
701 		  __entry->period,	/* ms */
702 		  __entry->think,	/* ms */
703 		  (unsigned long)__entry->cgroup_ino
704 	  )
705 );
706 
707 TRACE_EVENT(writeback_sb_inodes_requeue,
708 
709 	TP_PROTO(struct inode *inode),
710 	TP_ARGS(inode),
711 
712 	TP_STRUCT__entry(
713 		__array(char, name, 32)
714 		__field(ino_t, ino)
715 		__field(unsigned long, state)
716 		__field(unsigned long, dirtied_when)
717 		__field(ino_t, cgroup_ino)
718 	),
719 
720 	TP_fast_assign(
721 		strscpy_pad(__entry->name,
722 			    bdi_dev_name(inode_to_bdi(inode)), 32);
723 		__entry->ino		= inode->i_ino;
724 		__entry->state		= inode->i_state;
725 		__entry->dirtied_when	= inode->dirtied_when;
726 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
727 	),
728 
729 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
730 		  __entry->name,
731 		  (unsigned long)__entry->ino,
732 		  show_inode_state(__entry->state),
733 		  __entry->dirtied_when,
734 		  (jiffies - __entry->dirtied_when) / HZ,
735 		  (unsigned long)__entry->cgroup_ino
736 	)
737 );
738 
739 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
740 
741 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
742 
743 	TP_ARGS(usec_timeout, usec_delayed),
744 
745 	TP_STRUCT__entry(
746 		__field(	unsigned int,	usec_timeout	)
747 		__field(	unsigned int,	usec_delayed	)
748 	),
749 
750 	TP_fast_assign(
751 		__entry->usec_timeout	= usec_timeout;
752 		__entry->usec_delayed	= usec_delayed;
753 	),
754 
755 	TP_printk("usec_timeout=%u usec_delayed=%u",
756 			__entry->usec_timeout,
757 			__entry->usec_delayed)
758 );
759 
760 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
761 
762 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
763 
764 	TP_ARGS(usec_timeout, usec_delayed)
765 );
766 
767 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
768 
769 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
770 
771 	TP_ARGS(usec_timeout, usec_delayed)
772 );
773 
774 DECLARE_EVENT_CLASS(writeback_single_inode_template,
775 
776 	TP_PROTO(struct inode *inode,
777 		 struct writeback_control *wbc,
778 		 unsigned long nr_to_write
779 	),
780 
781 	TP_ARGS(inode, wbc, nr_to_write),
782 
783 	TP_STRUCT__entry(
784 		__array(char, name, 32)
785 		__field(ino_t, ino)
786 		__field(unsigned long, state)
787 		__field(unsigned long, dirtied_when)
788 		__field(unsigned long, writeback_index)
789 		__field(long, nr_to_write)
790 		__field(unsigned long, wrote)
791 		__field(ino_t, cgroup_ino)
792 	),
793 
794 	TP_fast_assign(
795 		strscpy_pad(__entry->name,
796 			    bdi_dev_name(inode_to_bdi(inode)), 32);
797 		__entry->ino		= inode->i_ino;
798 		__entry->state		= inode->i_state;
799 		__entry->dirtied_when	= inode->dirtied_when;
800 		__entry->writeback_index = inode->i_mapping->writeback_index;
801 		__entry->nr_to_write	= nr_to_write;
802 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
803 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
804 	),
805 
806 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
807 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
808 		  __entry->name,
809 		  (unsigned long)__entry->ino,
810 		  show_inode_state(__entry->state),
811 		  __entry->dirtied_when,
812 		  (jiffies - __entry->dirtied_when) / HZ,
813 		  __entry->writeback_index,
814 		  __entry->nr_to_write,
815 		  __entry->wrote,
816 		  (unsigned long)__entry->cgroup_ino
817 	)
818 );
819 
820 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
821 	TP_PROTO(struct inode *inode,
822 		 struct writeback_control *wbc,
823 		 unsigned long nr_to_write),
824 	TP_ARGS(inode, wbc, nr_to_write)
825 );
826 
827 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
828 	TP_PROTO(struct inode *inode,
829 		 struct writeback_control *wbc,
830 		 unsigned long nr_to_write),
831 	TP_ARGS(inode, wbc, nr_to_write)
832 );
833 
834 DECLARE_EVENT_CLASS(writeback_inode_template,
835 	TP_PROTO(struct inode *inode),
836 
837 	TP_ARGS(inode),
838 
839 	TP_STRUCT__entry(
840 		__field(	dev_t,	dev			)
841 		__field(	ino_t,	ino			)
842 		__field(unsigned long,	state			)
843 		__field(	__u16, mode			)
844 		__field(unsigned long, dirtied_when		)
845 	),
846 
847 	TP_fast_assign(
848 		__entry->dev	= inode->i_sb->s_dev;
849 		__entry->ino	= inode->i_ino;
850 		__entry->state	= inode->i_state;
851 		__entry->mode	= inode->i_mode;
852 		__entry->dirtied_when = inode->dirtied_when;
853 	),
854 
855 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
856 		  MAJOR(__entry->dev), MINOR(__entry->dev),
857 		  (unsigned long)__entry->ino, __entry->dirtied_when,
858 		  show_inode_state(__entry->state), __entry->mode)
859 );
860 
861 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
862 	TP_PROTO(struct inode *inode),
863 
864 	TP_ARGS(inode)
865 );
866 
867 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
868 	TP_PROTO(struct inode *inode),
869 
870 	TP_ARGS(inode)
871 );
872 
873 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
874 
875 	TP_PROTO(struct inode *inode),
876 
877 	TP_ARGS(inode)
878 );
879 
880 /*
881  * Inode writeback list tracking.
882  */
883 
884 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
885 	TP_PROTO(struct inode *inode),
886 	TP_ARGS(inode)
887 );
888 
889 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
890 	TP_PROTO(struct inode *inode),
891 	TP_ARGS(inode)
892 );
893 
894 #endif /* _TRACE_WRITEBACK_H */
895 
896 /* This part must be outside protection */
897 #include <trace/define_trace.h>
898