xref: /linux/include/trace/events/writeback.h (revision 263e777ee3e00d628ac2660f68c82aeab14707b3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM writeback
4 
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
11 
12 #define show_inode_state(state)					\
13 	__print_flags(state, "|",				\
14 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
15 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
16 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
17 		{I_NEW,			"I_NEW"},		\
18 		{I_WILL_FREE,		"I_WILL_FREE"},		\
19 		{I_FREEING,		"I_FREEING"},		\
20 		{I_CLEAR,		"I_CLEAR"},		\
21 		{I_SYNC,		"I_SYNC"},		\
22 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
23 		{I_REFERENCED,		"I_REFERENCED"},	\
24 		{I_LINKABLE,		"I_LINKABLE"},		\
25 		{I_WB_SWITCH,		"I_WB_SWITCH"},		\
26 		{I_OVL_INUSE,		"I_OVL_INUSE"},		\
27 		{I_CREATING,		"I_CREATING"},		\
28 		{I_DONTCACHE,		"I_DONTCACHE"},		\
29 		{I_SYNC_QUEUED,		"I_SYNC_QUEUED"},	\
30 		{I_PINNING_NETFS_WB,	"I_PINNING_NETFS_WB"},	\
31 		{I_LRU_ISOLATING,	"I_LRU_ISOLATING"}	\
32 	)
33 
34 /* enums need to be exported to user space */
35 #undef EM
36 #undef EMe
37 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
38 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
39 
40 #define WB_WORK_REASON							\
41 	EM( WB_REASON_BACKGROUND,		"background")		\
42 	EM( WB_REASON_VMSCAN,			"vmscan")		\
43 	EM( WB_REASON_SYNC,			"sync")			\
44 	EM( WB_REASON_PERIODIC,			"periodic")		\
45 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
46 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
47 	EM( WB_REASON_FORKER_THREAD,		"forker_thread")	\
48 	EMe(WB_REASON_FOREIGN_FLUSH,		"foreign_flush")
49 
50 WB_WORK_REASON
51 
52 /*
53  * Now redefine the EM() and EMe() macros to map the enums to the strings
54  * that will be printed in the output.
55  */
56 #undef EM
57 #undef EMe
58 #define EM(a,b)		{ a, b },
59 #define EMe(a,b)	{ a, b }
60 
61 struct wb_writeback_work;
62 
63 DECLARE_EVENT_CLASS(writeback_folio_template,
64 
65 	TP_PROTO(struct folio *folio, struct address_space *mapping),
66 
67 	TP_ARGS(folio, mapping),
68 
69 	TP_STRUCT__entry (
70 		__array(char, name, 32)
71 		__field(ino_t, ino)
72 		__field(pgoff_t, index)
73 	),
74 
75 	TP_fast_assign(
76 		strscpy_pad(__entry->name,
77 			    bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
78 					 NULL), 32);
79 		__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
80 		__entry->index = folio->index;
81 	),
82 
83 	TP_printk("bdi %s: ino=%lu index=%lu",
84 		__entry->name,
85 		(unsigned long)__entry->ino,
86 		__entry->index
87 	)
88 );
89 
90 DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
91 
92 	TP_PROTO(struct folio *folio, struct address_space *mapping),
93 
94 	TP_ARGS(folio, mapping)
95 );
96 
97 DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
98 
99 	TP_PROTO(struct folio *folio, struct address_space *mapping),
100 
101 	TP_ARGS(folio, mapping)
102 );
103 
104 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
105 
106 	TP_PROTO(struct inode *inode, int flags),
107 
108 	TP_ARGS(inode, flags),
109 
110 	TP_STRUCT__entry (
111 		__array(char, name, 32)
112 		__field(ino_t, ino)
113 		__field(unsigned long, state)
114 		__field(unsigned long, flags)
115 	),
116 
117 	TP_fast_assign(
118 		struct backing_dev_info *bdi = inode_to_bdi(inode);
119 
120 		/* may be called for files on pseudo FSes w/ unregistered bdi */
121 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
122 		__entry->ino		= inode->i_ino;
123 		__entry->state		= inode->i_state;
124 		__entry->flags		= flags;
125 	),
126 
127 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
128 		__entry->name,
129 		(unsigned long)__entry->ino,
130 		show_inode_state(__entry->state),
131 		show_inode_state(__entry->flags)
132 	)
133 );
134 
135 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
136 
137 	TP_PROTO(struct inode *inode, int flags),
138 
139 	TP_ARGS(inode, flags)
140 );
141 
142 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
143 
144 	TP_PROTO(struct inode *inode, int flags),
145 
146 	TP_ARGS(inode, flags)
147 );
148 
149 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
150 
151 	TP_PROTO(struct inode *inode, int flags),
152 
153 	TP_ARGS(inode, flags)
154 );
155 
156 #ifdef CREATE_TRACE_POINTS
157 #ifdef CONFIG_CGROUP_WRITEBACK
158 
__trace_wb_assign_cgroup(struct bdi_writeback * wb)159 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
160 {
161 	return cgroup_ino(wb->memcg_css->cgroup);
162 }
163 
__trace_wbc_assign_cgroup(struct writeback_control * wbc)164 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
165 {
166 	if (wbc->wb)
167 		return __trace_wb_assign_cgroup(wbc->wb);
168 	else
169 		return 1;
170 }
171 #else	/* CONFIG_CGROUP_WRITEBACK */
172 
__trace_wb_assign_cgroup(struct bdi_writeback * wb)173 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
174 {
175 	return 1;
176 }
177 
__trace_wbc_assign_cgroup(struct writeback_control * wbc)178 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
179 {
180 	return 1;
181 }
182 
183 #endif	/* CONFIG_CGROUP_WRITEBACK */
184 #endif	/* CREATE_TRACE_POINTS */
185 
186 #ifdef CONFIG_CGROUP_WRITEBACK
187 TRACE_EVENT(inode_foreign_history,
188 
189 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
190 		 unsigned int history),
191 
192 	TP_ARGS(inode, wbc, history),
193 
194 	TP_STRUCT__entry(
195 		__array(char,		name, 32)
196 		__field(ino_t,		ino)
197 		__field(ino_t,		cgroup_ino)
198 		__field(unsigned int,	history)
199 	),
200 
201 	TP_fast_assign(
202 		strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
203 		__entry->ino		= inode->i_ino;
204 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
205 		__entry->history	= history;
206 	),
207 
208 	TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
209 		__entry->name,
210 		(unsigned long)__entry->ino,
211 		(unsigned long)__entry->cgroup_ino,
212 		__entry->history
213 	)
214 );
215 
216 TRACE_EVENT(inode_switch_wbs_queue,
217 
218 	TP_PROTO(struct bdi_writeback *old_wb, struct bdi_writeback *new_wb,
219 		 unsigned int count),
220 
221 	TP_ARGS(old_wb, new_wb, count),
222 
223 	TP_STRUCT__entry(
224 		__array(char,		name, 32)
225 		__field(ino_t,		old_cgroup_ino)
226 		__field(ino_t,		new_cgroup_ino)
227 		__field(unsigned int,	count)
228 	),
229 
230 	TP_fast_assign(
231 		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
232 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
233 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
234 		__entry->count		= count;
235 	),
236 
237 	TP_printk("bdi %s: old_cgroup_ino=%lu new_cgroup_ino=%lu count=%u",
238 		__entry->name,
239 		(unsigned long)__entry->old_cgroup_ino,
240 		(unsigned long)__entry->new_cgroup_ino,
241 		__entry->count
242 	)
243 );
244 
245 TRACE_EVENT(inode_switch_wbs,
246 
247 	TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
248 		 struct bdi_writeback *new_wb),
249 
250 	TP_ARGS(inode, old_wb, new_wb),
251 
252 	TP_STRUCT__entry(
253 		__array(char,		name, 32)
254 		__field(ino_t,		ino)
255 		__field(ino_t,		old_cgroup_ino)
256 		__field(ino_t,		new_cgroup_ino)
257 	),
258 
259 	TP_fast_assign(
260 		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
261 		__entry->ino		= inode->i_ino;
262 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
263 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
264 	),
265 
266 	TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
267 		__entry->name,
268 		(unsigned long)__entry->ino,
269 		(unsigned long)__entry->old_cgroup_ino,
270 		(unsigned long)__entry->new_cgroup_ino
271 	)
272 );
273 
274 TRACE_EVENT(track_foreign_dirty,
275 
276 	TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
277 
278 	TP_ARGS(folio, wb),
279 
280 	TP_STRUCT__entry(
281 		__array(char,		name, 32)
282 		__field(u64,		bdi_id)
283 		__field(ino_t,		ino)
284 		__field(unsigned int,	memcg_id)
285 		__field(ino_t,		cgroup_ino)
286 		__field(ino_t,		page_cgroup_ino)
287 	),
288 
289 	TP_fast_assign(
290 		struct address_space *mapping = folio_mapping(folio);
291 		struct inode *inode = mapping ? mapping->host : NULL;
292 
293 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
294 		__entry->bdi_id		= wb->bdi->id;
295 		__entry->ino		= inode ? inode->i_ino : 0;
296 		__entry->memcg_id	= wb->memcg_css->id;
297 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
298 		__entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
299 	),
300 
301 	TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
302 		__entry->name,
303 		__entry->bdi_id,
304 		(unsigned long)__entry->ino,
305 		__entry->memcg_id,
306 		(unsigned long)__entry->cgroup_ino,
307 		(unsigned long)__entry->page_cgroup_ino
308 	)
309 );
310 
311 TRACE_EVENT(flush_foreign,
312 
313 	TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
314 		 unsigned int frn_memcg_id),
315 
316 	TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
317 
318 	TP_STRUCT__entry(
319 		__array(char,		name, 32)
320 		__field(ino_t,		cgroup_ino)
321 		__field(unsigned int,	frn_bdi_id)
322 		__field(unsigned int,	frn_memcg_id)
323 	),
324 
325 	TP_fast_assign(
326 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
327 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
328 		__entry->frn_bdi_id	= frn_bdi_id;
329 		__entry->frn_memcg_id	= frn_memcg_id;
330 	),
331 
332 	TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
333 		__entry->name,
334 		(unsigned long)__entry->cgroup_ino,
335 		__entry->frn_bdi_id,
336 		__entry->frn_memcg_id
337 	)
338 );
339 #endif
340 
341 DECLARE_EVENT_CLASS(writeback_write_inode_template,
342 
343 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
344 
345 	TP_ARGS(inode, wbc),
346 
347 	TP_STRUCT__entry (
348 		__array(char, name, 32)
349 		__field(ino_t, ino)
350 		__field(int, sync_mode)
351 		__field(ino_t, cgroup_ino)
352 	),
353 
354 	TP_fast_assign(
355 		strscpy_pad(__entry->name,
356 			    bdi_dev_name(inode_to_bdi(inode)), 32);
357 		__entry->ino		= inode->i_ino;
358 		__entry->sync_mode	= wbc->sync_mode;
359 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
360 	),
361 
362 	TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
363 		__entry->name,
364 		(unsigned long)__entry->ino,
365 		__entry->sync_mode,
366 		(unsigned long)__entry->cgroup_ino
367 	)
368 );
369 
370 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
371 
372 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
373 
374 	TP_ARGS(inode, wbc)
375 );
376 
377 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
378 
379 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
380 
381 	TP_ARGS(inode, wbc)
382 );
383 
384 DECLARE_EVENT_CLASS(writeback_work_class,
385 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
386 	TP_ARGS(wb, work),
387 	TP_STRUCT__entry(
388 		__array(char, name, 32)
389 		__field(long, nr_pages)
390 		__field(dev_t, sb_dev)
391 		__field(int, sync_mode)
392 		__field(int, for_kupdate)
393 		__field(int, range_cyclic)
394 		__field(int, for_background)
395 		__field(int, reason)
396 		__field(ino_t, cgroup_ino)
397 	),
398 	TP_fast_assign(
399 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
400 		__entry->nr_pages = work->nr_pages;
401 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
402 		__entry->sync_mode = work->sync_mode;
403 		__entry->for_kupdate = work->for_kupdate;
404 		__entry->range_cyclic = work->range_cyclic;
405 		__entry->for_background	= work->for_background;
406 		__entry->reason = work->reason;
407 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
408 	),
409 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
410 		  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
411 		  __entry->name,
412 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
413 		  __entry->nr_pages,
414 		  __entry->sync_mode,
415 		  __entry->for_kupdate,
416 		  __entry->range_cyclic,
417 		  __entry->for_background,
418 		  __print_symbolic(__entry->reason, WB_WORK_REASON),
419 		  (unsigned long)__entry->cgroup_ino
420 	)
421 );
422 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
423 DEFINE_EVENT(writeback_work_class, name, \
424 	TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
425 	TP_ARGS(wb, work))
426 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
427 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
428 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
429 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
430 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
431 
432 TRACE_EVENT(writeback_pages_written,
433 	TP_PROTO(long pages_written),
434 	TP_ARGS(pages_written),
435 	TP_STRUCT__entry(
436 		__field(long,		pages)
437 	),
438 	TP_fast_assign(
439 		__entry->pages		= pages_written;
440 	),
441 	TP_printk("%ld", __entry->pages)
442 );
443 
444 DECLARE_EVENT_CLASS(writeback_class,
445 	TP_PROTO(struct bdi_writeback *wb),
446 	TP_ARGS(wb),
447 	TP_STRUCT__entry(
448 		__array(char, name, 32)
449 		__field(ino_t, cgroup_ino)
450 	),
451 	TP_fast_assign(
452 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
453 		__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
454 	),
455 	TP_printk("bdi %s: cgroup_ino=%lu",
456 		  __entry->name,
457 		  (unsigned long)__entry->cgroup_ino
458 	)
459 );
460 #define DEFINE_WRITEBACK_EVENT(name) \
461 DEFINE_EVENT(writeback_class, name, \
462 	TP_PROTO(struct bdi_writeback *wb), \
463 	TP_ARGS(wb))
464 
465 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
466 
467 TRACE_EVENT(writeback_bdi_register,
468 	TP_PROTO(struct backing_dev_info *bdi),
469 	TP_ARGS(bdi),
470 	TP_STRUCT__entry(
471 		__array(char, name, 32)
472 	),
473 	TP_fast_assign(
474 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
475 	),
476 	TP_printk("bdi %s",
477 		__entry->name
478 	)
479 );
480 
481 DECLARE_EVENT_CLASS(wbc_class,
482 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
483 	TP_ARGS(wbc, bdi),
484 	TP_STRUCT__entry(
485 		__array(char, name, 32)
486 		__field(long, nr_to_write)
487 		__field(long, pages_skipped)
488 		__field(int, sync_mode)
489 		__field(int, for_kupdate)
490 		__field(int, for_background)
491 		__field(int, range_cyclic)
492 		__field(long, range_start)
493 		__field(long, range_end)
494 		__field(ino_t, cgroup_ino)
495 	),
496 
497 	TP_fast_assign(
498 		strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
499 		__entry->nr_to_write	= wbc->nr_to_write;
500 		__entry->pages_skipped	= wbc->pages_skipped;
501 		__entry->sync_mode	= wbc->sync_mode;
502 		__entry->for_kupdate	= wbc->for_kupdate;
503 		__entry->for_background	= wbc->for_background;
504 		__entry->range_cyclic	= wbc->range_cyclic;
505 		__entry->range_start	= (long)wbc->range_start;
506 		__entry->range_end	= (long)wbc->range_end;
507 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
508 	),
509 
510 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
511 		"cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu",
512 		__entry->name,
513 		__entry->nr_to_write,
514 		__entry->pages_skipped,
515 		__entry->sync_mode,
516 		__entry->for_kupdate,
517 		__entry->for_background,
518 		__entry->range_cyclic,
519 		__entry->range_start,
520 		__entry->range_end,
521 		(unsigned long)__entry->cgroup_ino
522 	)
523 )
524 
525 #define DEFINE_WBC_EVENT(name) \
526 DEFINE_EVENT(wbc_class, name, \
527 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
528 	TP_ARGS(wbc, bdi))
529 DEFINE_WBC_EVENT(wbc_writepage);
530 
531 TRACE_EVENT(writeback_queue_io,
532 	TP_PROTO(struct bdi_writeback *wb,
533 		 struct wb_writeback_work *work,
534 		 unsigned long dirtied_before,
535 		 int moved),
536 	TP_ARGS(wb, work, dirtied_before, moved),
537 	TP_STRUCT__entry(
538 		__array(char,		name, 32)
539 		__field(unsigned long,	older)
540 		__field(long,		age)
541 		__field(int,		moved)
542 		__field(int,		reason)
543 		__field(ino_t,		cgroup_ino)
544 	),
545 	TP_fast_assign(
546 		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
547 		__entry->older	= dirtied_before;
548 		__entry->age	= (jiffies - dirtied_before) * 1000 / HZ;
549 		__entry->moved	= moved;
550 		__entry->reason	= work->reason;
551 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
552 	),
553 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
554 		__entry->name,
555 		__entry->older,	/* dirtied_before in jiffies */
556 		__entry->age,	/* dirtied_before in relative milliseconds */
557 		__entry->moved,
558 		__print_symbolic(__entry->reason, WB_WORK_REASON),
559 		(unsigned long)__entry->cgroup_ino
560 	)
561 );
562 
563 TRACE_EVENT(global_dirty_state,
564 
565 	TP_PROTO(unsigned long background_thresh,
566 		 unsigned long dirty_thresh
567 	),
568 
569 	TP_ARGS(background_thresh,
570 		dirty_thresh
571 	),
572 
573 	TP_STRUCT__entry(
574 		__field(unsigned long,	nr_dirty)
575 		__field(unsigned long,	nr_writeback)
576 		__field(unsigned long,	background_thresh)
577 		__field(unsigned long,	dirty_thresh)
578 		__field(unsigned long,	dirty_limit)
579 		__field(unsigned long,	nr_dirtied)
580 		__field(unsigned long,	nr_written)
581 	),
582 
583 	TP_fast_assign(
584 		__entry->nr_dirty	= global_node_page_state(NR_FILE_DIRTY);
585 		__entry->nr_writeback	= global_node_page_state(NR_WRITEBACK);
586 		__entry->nr_dirtied	= global_node_page_state(NR_DIRTIED);
587 		__entry->nr_written	= global_node_page_state(NR_WRITTEN);
588 		__entry->background_thresh = background_thresh;
589 		__entry->dirty_thresh	= dirty_thresh;
590 		__entry->dirty_limit	= global_wb_domain.dirty_limit;
591 	),
592 
593 	TP_printk("dirty=%lu writeback=%lu "
594 		  "bg_thresh=%lu thresh=%lu limit=%lu "
595 		  "dirtied=%lu written=%lu",
596 		  __entry->nr_dirty,
597 		  __entry->nr_writeback,
598 		  __entry->background_thresh,
599 		  __entry->dirty_thresh,
600 		  __entry->dirty_limit,
601 		  __entry->nr_dirtied,
602 		  __entry->nr_written
603 	)
604 );
605 
606 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
607 
608 TRACE_EVENT(bdi_dirty_ratelimit,
609 
610 	TP_PROTO(struct bdi_writeback *wb,
611 		 unsigned long dirty_rate,
612 		 unsigned long task_ratelimit),
613 
614 	TP_ARGS(wb, dirty_rate, task_ratelimit),
615 
616 	TP_STRUCT__entry(
617 		__array(char,		bdi, 32)
618 		__field(unsigned long,	write_bw)
619 		__field(unsigned long,	avg_write_bw)
620 		__field(unsigned long,	dirty_rate)
621 		__field(unsigned long,	dirty_ratelimit)
622 		__field(unsigned long,	task_ratelimit)
623 		__field(unsigned long,	balanced_dirty_ratelimit)
624 		__field(ino_t,		cgroup_ino)
625 	),
626 
627 	TP_fast_assign(
628 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
629 		__entry->write_bw	= KBps(wb->write_bandwidth);
630 		__entry->avg_write_bw	= KBps(wb->avg_write_bandwidth);
631 		__entry->dirty_rate	= KBps(dirty_rate);
632 		__entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
633 		__entry->task_ratelimit	= KBps(task_ratelimit);
634 		__entry->balanced_dirty_ratelimit =
635 					KBps(wb->balanced_dirty_ratelimit);
636 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
637 	),
638 
639 	TP_printk("bdi %s: "
640 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
641 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
642 		  "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
643 		  __entry->bdi,
644 		  __entry->write_bw,		/* write bandwidth */
645 		  __entry->avg_write_bw,	/* avg write bandwidth */
646 		  __entry->dirty_rate,		/* bdi dirty rate */
647 		  __entry->dirty_ratelimit,	/* base ratelimit */
648 		  __entry->task_ratelimit, /* ratelimit with position control */
649 		  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
650 		  (unsigned long)__entry->cgroup_ino
651 	)
652 );
653 
654 TRACE_EVENT(balance_dirty_pages,
655 
656 	TP_PROTO(struct bdi_writeback *wb,
657 		 struct dirty_throttle_control *dtc,
658 		 unsigned long dirty_ratelimit,
659 		 unsigned long task_ratelimit,
660 		 unsigned long dirtied,
661 		 unsigned long period,
662 		 long pause,
663 		 unsigned long start_time),
664 
665 	TP_ARGS(wb, dtc,
666 		dirty_ratelimit, task_ratelimit,
667 		dirtied, period, pause, start_time),
668 
669 	TP_STRUCT__entry(
670 		__array(	 char,	bdi, 32)
671 		__field(unsigned long,	limit)
672 		__field(unsigned long,	setpoint)
673 		__field(unsigned long,	dirty)
674 		__field(unsigned long,	wb_setpoint)
675 		__field(unsigned long,	wb_dirty)
676 		__field(unsigned long,	dirty_ratelimit)
677 		__field(unsigned long,	task_ratelimit)
678 		__field(unsigned int,	dirtied)
679 		__field(unsigned int,	dirtied_pause)
680 		__field(unsigned long,	paused)
681 		__field(	 long,	pause)
682 		__field(unsigned long,	period)
683 		__field(	 long,	think)
684 		__field(ino_t,		cgroup_ino)
685 	),
686 
687 	TP_fast_assign(
688 		unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2;
689 		strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
690 
691 		__entry->limit		= dtc->limit;
692 		__entry->setpoint	= (dtc->limit + freerun) / 2;
693 		__entry->dirty		= dtc->dirty;
694 		__entry->wb_setpoint	= __entry->setpoint *
695 						dtc->wb_thresh / (dtc->thresh + 1);
696 		__entry->wb_dirty	= dtc->wb_dirty;
697 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
698 		__entry->task_ratelimit	= KBps(task_ratelimit);
699 		__entry->dirtied	= dirtied;
700 		__entry->dirtied_pause	= current->nr_dirtied_pause;
701 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
702 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
703 		__entry->period		= period * 1000 / HZ;
704 		__entry->pause		= pause * 1000 / HZ;
705 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
706 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
707 	),
708 
709 
710 	TP_printk("bdi %s: "
711 		  "limit=%lu setpoint=%lu dirty=%lu "
712 		  "wb_setpoint=%lu wb_dirty=%lu "
713 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
714 		  "dirtied=%u dirtied_pause=%u "
715 		  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
716 		  __entry->bdi,
717 		  __entry->limit,
718 		  __entry->setpoint,
719 		  __entry->dirty,
720 		  __entry->wb_setpoint,
721 		  __entry->wb_dirty,
722 		  __entry->dirty_ratelimit,
723 		  __entry->task_ratelimit,
724 		  __entry->dirtied,
725 		  __entry->dirtied_pause,
726 		  __entry->paused,	/* ms */
727 		  __entry->pause,	/* ms */
728 		  __entry->period,	/* ms */
729 		  __entry->think,	/* ms */
730 		  (unsigned long)__entry->cgroup_ino
731 	  )
732 );
733 
734 TRACE_EVENT(writeback_sb_inodes_requeue,
735 
736 	TP_PROTO(struct inode *inode),
737 	TP_ARGS(inode),
738 
739 	TP_STRUCT__entry(
740 		__array(char, name, 32)
741 		__field(ino_t, ino)
742 		__field(unsigned long, state)
743 		__field(unsigned long, dirtied_when)
744 		__field(ino_t, cgroup_ino)
745 	),
746 
747 	TP_fast_assign(
748 		strscpy_pad(__entry->name,
749 			    bdi_dev_name(inode_to_bdi(inode)), 32);
750 		__entry->ino		= inode->i_ino;
751 		__entry->state		= inode->i_state;
752 		__entry->dirtied_when	= inode->dirtied_when;
753 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(inode_to_wb(inode));
754 	),
755 
756 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
757 		  __entry->name,
758 		  (unsigned long)__entry->ino,
759 		  show_inode_state(__entry->state),
760 		  __entry->dirtied_when,
761 		  (jiffies - __entry->dirtied_when) / HZ,
762 		  (unsigned long)__entry->cgroup_ino
763 	)
764 );
765 
766 DECLARE_EVENT_CLASS(writeback_single_inode_template,
767 
768 	TP_PROTO(struct inode *inode,
769 		 struct writeback_control *wbc,
770 		 unsigned long nr_to_write
771 	),
772 
773 	TP_ARGS(inode, wbc, nr_to_write),
774 
775 	TP_STRUCT__entry(
776 		__array(char, name, 32)
777 		__field(ino_t, ino)
778 		__field(unsigned long, state)
779 		__field(unsigned long, dirtied_when)
780 		__field(unsigned long, writeback_index)
781 		__field(long, nr_to_write)
782 		__field(unsigned long, wrote)
783 		__field(ino_t, cgroup_ino)
784 	),
785 
786 	TP_fast_assign(
787 		strscpy_pad(__entry->name,
788 			    bdi_dev_name(inode_to_bdi(inode)), 32);
789 		__entry->ino		= inode->i_ino;
790 		__entry->state		= inode->i_state;
791 		__entry->dirtied_when	= inode->dirtied_when;
792 		__entry->writeback_index = inode->i_mapping->writeback_index;
793 		__entry->nr_to_write	= nr_to_write;
794 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
795 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
796 	),
797 
798 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
799 		  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
800 		  __entry->name,
801 		  (unsigned long)__entry->ino,
802 		  show_inode_state(__entry->state),
803 		  __entry->dirtied_when,
804 		  (jiffies - __entry->dirtied_when) / HZ,
805 		  __entry->writeback_index,
806 		  __entry->nr_to_write,
807 		  __entry->wrote,
808 		  (unsigned long)__entry->cgroup_ino
809 	)
810 );
811 
812 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
813 	TP_PROTO(struct inode *inode,
814 		 struct writeback_control *wbc,
815 		 unsigned long nr_to_write),
816 	TP_ARGS(inode, wbc, nr_to_write)
817 );
818 
819 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
820 	TP_PROTO(struct inode *inode,
821 		 struct writeback_control *wbc,
822 		 unsigned long nr_to_write),
823 	TP_ARGS(inode, wbc, nr_to_write)
824 );
825 
826 DECLARE_EVENT_CLASS(writeback_inode_template,
827 	TP_PROTO(struct inode *inode),
828 
829 	TP_ARGS(inode),
830 
831 	TP_STRUCT__entry(
832 		__field(	dev_t,	dev			)
833 		__field(	ino_t,	ino			)
834 		__field(unsigned long,	state			)
835 		__field(	__u16, mode			)
836 		__field(unsigned long, dirtied_when		)
837 	),
838 
839 	TP_fast_assign(
840 		__entry->dev	= inode->i_sb->s_dev;
841 		__entry->ino	= inode->i_ino;
842 		__entry->state	= inode->i_state;
843 		__entry->mode	= inode->i_mode;
844 		__entry->dirtied_when = inode->dirtied_when;
845 	),
846 
847 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
848 		  MAJOR(__entry->dev), MINOR(__entry->dev),
849 		  (unsigned long)__entry->ino, __entry->dirtied_when,
850 		  show_inode_state(__entry->state), __entry->mode)
851 );
852 
853 DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
854 	TP_PROTO(struct inode *inode),
855 
856 	TP_ARGS(inode)
857 );
858 
859 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
860 	TP_PROTO(struct inode *inode),
861 
862 	TP_ARGS(inode)
863 );
864 
865 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
866 
867 	TP_PROTO(struct inode *inode),
868 
869 	TP_ARGS(inode)
870 );
871 
872 /*
873  * Inode writeback list tracking.
874  */
875 
876 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
877 	TP_PROTO(struct inode *inode),
878 	TP_ARGS(inode)
879 );
880 
881 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
882 	TP_PROTO(struct inode *inode),
883 	TP_ARGS(inode)
884 );
885 
886 #endif /* _TRACE_WRITEBACK_H */
887 
888 /* This part must be outside protection */
889 #include <trace/define_trace.h>
890