1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM writeback 4 5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_WRITEBACK_H 7 8 #include <linux/tracepoint.h> 9 #include <linux/backing-dev.h> 10 #include <linux/writeback.h> 11 12 #define show_inode_state(state) \ 13 __print_flags(state, "|", \ 14 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ 15 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ 16 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ 17 {I_NEW, "I_NEW"}, \ 18 {I_WILL_FREE, "I_WILL_FREE"}, \ 19 {I_FREEING, "I_FREEING"}, \ 20 {I_CLEAR, "I_CLEAR"}, \ 21 {I_SYNC, "I_SYNC"}, \ 22 {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ 23 {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ 24 {I_REFERENCED, "I_REFERENCED"} \ 25 ) 26 27 /* enums need to be exported to user space */ 28 #undef EM 29 #undef EMe 30 #define EM(a,b) TRACE_DEFINE_ENUM(a); 31 #define EMe(a,b) TRACE_DEFINE_ENUM(a); 32 33 #define WB_WORK_REASON \ 34 EM( WB_REASON_BACKGROUND, "background") \ 35 EM( WB_REASON_VMSCAN, "vmscan") \ 36 EM( WB_REASON_SYNC, "sync") \ 37 EM( WB_REASON_PERIODIC, "periodic") \ 38 EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ 39 EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ 40 EMe(WB_REASON_FORKER_THREAD, "forker_thread") 41 42 WB_WORK_REASON 43 44 /* 45 * Now redefine the EM() and EMe() macros to map the enums to the strings 46 * that will be printed in the output. 47 */ 48 #undef EM 49 #undef EMe 50 #define EM(a,b) { a, b }, 51 #define EMe(a,b) { a, b } 52 53 struct wb_writeback_work; 54 55 DECLARE_EVENT_CLASS(writeback_page_template, 56 57 TP_PROTO(struct page *page, struct address_space *mapping), 58 59 TP_ARGS(page, mapping), 60 61 TP_STRUCT__entry ( 62 __array(char, name, 32) 63 __field(ino_t, ino) 64 __field(pgoff_t, index) 65 ), 66 67 TP_fast_assign( 68 strscpy_pad(__entry->name, 69 bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : 70 NULL), 32); 71 __entry->ino = mapping ? mapping->host->i_ino : 0; 72 __entry->index = page->index; 73 ), 74 75 TP_printk("bdi %s: ino=%lu index=%lu", 76 __entry->name, 77 (unsigned long)__entry->ino, 78 __entry->index 79 ) 80 ); 81 82 DEFINE_EVENT(writeback_page_template, writeback_dirty_page, 83 84 TP_PROTO(struct page *page, struct address_space *mapping), 85 86 TP_ARGS(page, mapping) 87 ); 88 89 DEFINE_EVENT(writeback_page_template, wait_on_page_writeback, 90 91 TP_PROTO(struct page *page, struct address_space *mapping), 92 93 TP_ARGS(page, mapping) 94 ); 95 96 DECLARE_EVENT_CLASS(writeback_dirty_inode_template, 97 98 TP_PROTO(struct inode *inode, int flags), 99 100 TP_ARGS(inode, flags), 101 102 TP_STRUCT__entry ( 103 __array(char, name, 32) 104 __field(ino_t, ino) 105 __field(unsigned long, state) 106 __field(unsigned long, flags) 107 ), 108 109 TP_fast_assign( 110 struct backing_dev_info *bdi = inode_to_bdi(inode); 111 112 /* may be called for files on pseudo FSes w/ unregistered bdi */ 113 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); 114 __entry->ino = inode->i_ino; 115 __entry->state = inode->i_state; 116 __entry->flags = flags; 117 ), 118 119 TP_printk("bdi %s: ino=%lu state=%s flags=%s", 120 __entry->name, 121 (unsigned long)__entry->ino, 122 show_inode_state(__entry->state), 123 show_inode_state(__entry->flags) 124 ) 125 ); 126 127 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, 128 129 TP_PROTO(struct inode *inode, int flags), 130 131 TP_ARGS(inode, flags) 132 ); 133 134 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, 135 136 TP_PROTO(struct inode *inode, int flags), 137 138 TP_ARGS(inode, flags) 139 ); 140 141 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, 142 143 TP_PROTO(struct inode *inode, int flags), 144 145 TP_ARGS(inode, flags) 146 ); 147 148 #ifdef CREATE_TRACE_POINTS 149 #ifdef CONFIG_CGROUP_WRITEBACK 150 151 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) 152 { 153 return cgroup_ino(wb->memcg_css->cgroup); 154 } 155 156 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) 157 { 158 if (wbc->wb) 159 return __trace_wb_assign_cgroup(wbc->wb); 160 else 161 return 1; 162 } 163 #else /* CONFIG_CGROUP_WRITEBACK */ 164 165 static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) 166 { 167 return 1; 168 } 169 170 static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) 171 { 172 return 1; 173 } 174 175 #endif /* CONFIG_CGROUP_WRITEBACK */ 176 #endif /* CREATE_TRACE_POINTS */ 177 178 #ifdef CONFIG_CGROUP_WRITEBACK 179 TRACE_EVENT(inode_foreign_history, 180 181 TP_PROTO(struct inode *inode, struct writeback_control *wbc, 182 unsigned int history), 183 184 TP_ARGS(inode, wbc, history), 185 186 TP_STRUCT__entry( 187 __array(char, name, 32) 188 __field(ino_t, ino) 189 __field(ino_t, cgroup_ino) 190 __field(unsigned int, history) 191 ), 192 193 TP_fast_assign( 194 strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); 195 __entry->ino = inode->i_ino; 196 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); 197 __entry->history = history; 198 ), 199 200 TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x", 201 __entry->name, 202 (unsigned long)__entry->ino, 203 (unsigned long)__entry->cgroup_ino, 204 __entry->history 205 ) 206 ); 207 208 TRACE_EVENT(inode_switch_wbs, 209 210 TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb, 211 struct bdi_writeback *new_wb), 212 213 TP_ARGS(inode, old_wb, new_wb), 214 215 TP_STRUCT__entry( 216 __array(char, name, 32) 217 __field(ino_t, ino) 218 __field(ino_t, old_cgroup_ino) 219 __field(ino_t, new_cgroup_ino) 220 ), 221 222 TP_fast_assign( 223 strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32); 224 __entry->ino = inode->i_ino; 225 __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); 226 __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); 227 ), 228 229 TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu", 230 __entry->name, 231 (unsigned long)__entry->ino, 232 (unsigned long)__entry->old_cgroup_ino, 233 (unsigned long)__entry->new_cgroup_ino 234 ) 235 ); 236 237 TRACE_EVENT(track_foreign_dirty, 238 239 TP_PROTO(struct page *page, struct bdi_writeback *wb), 240 241 TP_ARGS(page, wb), 242 243 TP_STRUCT__entry( 244 __array(char, name, 32) 245 __field(u64, bdi_id) 246 __field(ino_t, ino) 247 __field(unsigned int, memcg_id) 248 __field(ino_t, cgroup_ino) 249 __field(ino_t, page_cgroup_ino) 250 ), 251 252 TP_fast_assign( 253 struct address_space *mapping = page_mapping(page); 254 struct inode *inode = mapping ? mapping->host : NULL; 255 256 strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); 257 __entry->bdi_id = wb->bdi->id; 258 __entry->ino = inode ? inode->i_ino : 0; 259 __entry->memcg_id = wb->memcg_css->id; 260 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 261 __entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup); 262 ), 263 264 TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu", 265 __entry->name, 266 __entry->bdi_id, 267 (unsigned long)__entry->ino, 268 __entry->memcg_id, 269 (unsigned long)__entry->cgroup_ino, 270 (unsigned long)__entry->page_cgroup_ino 271 ) 272 ); 273 274 TRACE_EVENT(flush_foreign, 275 276 TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id, 277 unsigned int frn_memcg_id), 278 279 TP_ARGS(wb, frn_bdi_id, frn_memcg_id), 280 281 TP_STRUCT__entry( 282 __array(char, name, 32) 283 __field(ino_t, cgroup_ino) 284 __field(unsigned int, frn_bdi_id) 285 __field(unsigned int, frn_memcg_id) 286 ), 287 288 TP_fast_assign( 289 strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); 290 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 291 __entry->frn_bdi_id = frn_bdi_id; 292 __entry->frn_memcg_id = frn_memcg_id; 293 ), 294 295 TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u", 296 __entry->name, 297 (unsigned long)__entry->cgroup_ino, 298 __entry->frn_bdi_id, 299 __entry->frn_memcg_id 300 ) 301 ); 302 #endif 303 304 DECLARE_EVENT_CLASS(writeback_write_inode_template, 305 306 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 307 308 TP_ARGS(inode, wbc), 309 310 TP_STRUCT__entry ( 311 __array(char, name, 32) 312 __field(ino_t, ino) 313 __field(int, sync_mode) 314 __field(ino_t, cgroup_ino) 315 ), 316 317 TP_fast_assign( 318 strscpy_pad(__entry->name, 319 bdi_dev_name(inode_to_bdi(inode)), 32); 320 __entry->ino = inode->i_ino; 321 __entry->sync_mode = wbc->sync_mode; 322 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); 323 ), 324 325 TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu", 326 __entry->name, 327 (unsigned long)__entry->ino, 328 __entry->sync_mode, 329 (unsigned long)__entry->cgroup_ino 330 ) 331 ); 332 333 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, 334 335 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 336 337 TP_ARGS(inode, wbc) 338 ); 339 340 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, 341 342 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 343 344 TP_ARGS(inode, wbc) 345 ); 346 347 DECLARE_EVENT_CLASS(writeback_work_class, 348 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), 349 TP_ARGS(wb, work), 350 TP_STRUCT__entry( 351 __array(char, name, 32) 352 __field(long, nr_pages) 353 __field(dev_t, sb_dev) 354 __field(int, sync_mode) 355 __field(int, for_kupdate) 356 __field(int, range_cyclic) 357 __field(int, for_background) 358 __field(int, reason) 359 __field(ino_t, cgroup_ino) 360 ), 361 TP_fast_assign( 362 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); 363 __entry->nr_pages = work->nr_pages; 364 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 365 __entry->sync_mode = work->sync_mode; 366 __entry->for_kupdate = work->for_kupdate; 367 __entry->range_cyclic = work->range_cyclic; 368 __entry->for_background = work->for_background; 369 __entry->reason = work->reason; 370 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 371 ), 372 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " 373 "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu", 374 __entry->name, 375 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), 376 __entry->nr_pages, 377 __entry->sync_mode, 378 __entry->for_kupdate, 379 __entry->range_cyclic, 380 __entry->for_background, 381 __print_symbolic(__entry->reason, WB_WORK_REASON), 382 (unsigned long)__entry->cgroup_ino 383 ) 384 ); 385 #define DEFINE_WRITEBACK_WORK_EVENT(name) \ 386 DEFINE_EVENT(writeback_work_class, name, \ 387 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \ 388 TP_ARGS(wb, work)) 389 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); 390 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); 391 DEFINE_WRITEBACK_WORK_EVENT(writeback_start); 392 DEFINE_WRITEBACK_WORK_EVENT(writeback_written); 393 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); 394 395 TRACE_EVENT(writeback_pages_written, 396 TP_PROTO(long pages_written), 397 TP_ARGS(pages_written), 398 TP_STRUCT__entry( 399 __field(long, pages) 400 ), 401 TP_fast_assign( 402 __entry->pages = pages_written; 403 ), 404 TP_printk("%ld", __entry->pages) 405 ); 406 407 DECLARE_EVENT_CLASS(writeback_class, 408 TP_PROTO(struct bdi_writeback *wb), 409 TP_ARGS(wb), 410 TP_STRUCT__entry( 411 __array(char, name, 32) 412 __field(ino_t, cgroup_ino) 413 ), 414 TP_fast_assign( 415 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); 416 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 417 ), 418 TP_printk("bdi %s: cgroup_ino=%lu", 419 __entry->name, 420 (unsigned long)__entry->cgroup_ino 421 ) 422 ); 423 #define DEFINE_WRITEBACK_EVENT(name) \ 424 DEFINE_EVENT(writeback_class, name, \ 425 TP_PROTO(struct bdi_writeback *wb), \ 426 TP_ARGS(wb)) 427 428 DEFINE_WRITEBACK_EVENT(writeback_wake_background); 429 430 TRACE_EVENT(writeback_bdi_register, 431 TP_PROTO(struct backing_dev_info *bdi), 432 TP_ARGS(bdi), 433 TP_STRUCT__entry( 434 __array(char, name, 32) 435 ), 436 TP_fast_assign( 437 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); 438 ), 439 TP_printk("bdi %s", 440 __entry->name 441 ) 442 ); 443 444 DECLARE_EVENT_CLASS(wbc_class, 445 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), 446 TP_ARGS(wbc, bdi), 447 TP_STRUCT__entry( 448 __array(char, name, 32) 449 __field(long, nr_to_write) 450 __field(long, pages_skipped) 451 __field(int, sync_mode) 452 __field(int, for_kupdate) 453 __field(int, for_background) 454 __field(int, for_reclaim) 455 __field(int, range_cyclic) 456 __field(long, range_start) 457 __field(long, range_end) 458 __field(ino_t, cgroup_ino) 459 ), 460 461 TP_fast_assign( 462 strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); 463 __entry->nr_to_write = wbc->nr_to_write; 464 __entry->pages_skipped = wbc->pages_skipped; 465 __entry->sync_mode = wbc->sync_mode; 466 __entry->for_kupdate = wbc->for_kupdate; 467 __entry->for_background = wbc->for_background; 468 __entry->for_reclaim = wbc->for_reclaim; 469 __entry->range_cyclic = wbc->range_cyclic; 470 __entry->range_start = (long)wbc->range_start; 471 __entry->range_end = (long)wbc->range_end; 472 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); 473 ), 474 475 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " 476 "bgrd=%d reclm=%d cyclic=%d " 477 "start=0x%lx end=0x%lx cgroup_ino=%lu", 478 __entry->name, 479 __entry->nr_to_write, 480 __entry->pages_skipped, 481 __entry->sync_mode, 482 __entry->for_kupdate, 483 __entry->for_background, 484 __entry->for_reclaim, 485 __entry->range_cyclic, 486 __entry->range_start, 487 __entry->range_end, 488 (unsigned long)__entry->cgroup_ino 489 ) 490 ) 491 492 #define DEFINE_WBC_EVENT(name) \ 493 DEFINE_EVENT(wbc_class, name, \ 494 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ 495 TP_ARGS(wbc, bdi)) 496 DEFINE_WBC_EVENT(wbc_writepage); 497 498 TRACE_EVENT(writeback_queue_io, 499 TP_PROTO(struct bdi_writeback *wb, 500 struct wb_writeback_work *work, 501 int moved), 502 TP_ARGS(wb, work, moved), 503 TP_STRUCT__entry( 504 __array(char, name, 32) 505 __field(unsigned long, older) 506 __field(long, age) 507 __field(int, moved) 508 __field(int, reason) 509 __field(ino_t, cgroup_ino) 510 ), 511 TP_fast_assign( 512 unsigned long *older_than_this = work->older_than_this; 513 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); 514 __entry->older = older_than_this ? *older_than_this : 0; 515 __entry->age = older_than_this ? 516 (jiffies - *older_than_this) * 1000 / HZ : -1; 517 __entry->moved = moved; 518 __entry->reason = work->reason; 519 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 520 ), 521 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu", 522 __entry->name, 523 __entry->older, /* older_than_this in jiffies */ 524 __entry->age, /* older_than_this in relative milliseconds */ 525 __entry->moved, 526 __print_symbolic(__entry->reason, WB_WORK_REASON), 527 (unsigned long)__entry->cgroup_ino 528 ) 529 ); 530 531 TRACE_EVENT(global_dirty_state, 532 533 TP_PROTO(unsigned long background_thresh, 534 unsigned long dirty_thresh 535 ), 536 537 TP_ARGS(background_thresh, 538 dirty_thresh 539 ), 540 541 TP_STRUCT__entry( 542 __field(unsigned long, nr_dirty) 543 __field(unsigned long, nr_writeback) 544 __field(unsigned long, nr_unstable) 545 __field(unsigned long, background_thresh) 546 __field(unsigned long, dirty_thresh) 547 __field(unsigned long, dirty_limit) 548 __field(unsigned long, nr_dirtied) 549 __field(unsigned long, nr_written) 550 ), 551 552 TP_fast_assign( 553 __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); 554 __entry->nr_writeback = global_node_page_state(NR_WRITEBACK); 555 __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS); 556 __entry->nr_dirtied = global_node_page_state(NR_DIRTIED); 557 __entry->nr_written = global_node_page_state(NR_WRITTEN); 558 __entry->background_thresh = background_thresh; 559 __entry->dirty_thresh = dirty_thresh; 560 __entry->dirty_limit = global_wb_domain.dirty_limit; 561 ), 562 563 TP_printk("dirty=%lu writeback=%lu unstable=%lu " 564 "bg_thresh=%lu thresh=%lu limit=%lu " 565 "dirtied=%lu written=%lu", 566 __entry->nr_dirty, 567 __entry->nr_writeback, 568 __entry->nr_unstable, 569 __entry->background_thresh, 570 __entry->dirty_thresh, 571 __entry->dirty_limit, 572 __entry->nr_dirtied, 573 __entry->nr_written 574 ) 575 ); 576 577 #define KBps(x) ((x) << (PAGE_SHIFT - 10)) 578 579 TRACE_EVENT(bdi_dirty_ratelimit, 580 581 TP_PROTO(struct bdi_writeback *wb, 582 unsigned long dirty_rate, 583 unsigned long task_ratelimit), 584 585 TP_ARGS(wb, dirty_rate, task_ratelimit), 586 587 TP_STRUCT__entry( 588 __array(char, bdi, 32) 589 __field(unsigned long, write_bw) 590 __field(unsigned long, avg_write_bw) 591 __field(unsigned long, dirty_rate) 592 __field(unsigned long, dirty_ratelimit) 593 __field(unsigned long, task_ratelimit) 594 __field(unsigned long, balanced_dirty_ratelimit) 595 __field(ino_t, cgroup_ino) 596 ), 597 598 TP_fast_assign( 599 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); 600 __entry->write_bw = KBps(wb->write_bandwidth); 601 __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); 602 __entry->dirty_rate = KBps(dirty_rate); 603 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit); 604 __entry->task_ratelimit = KBps(task_ratelimit); 605 __entry->balanced_dirty_ratelimit = 606 KBps(wb->balanced_dirty_ratelimit); 607 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 608 ), 609 610 TP_printk("bdi %s: " 611 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " 612 "dirty_ratelimit=%lu task_ratelimit=%lu " 613 "balanced_dirty_ratelimit=%lu cgroup_ino=%lu", 614 __entry->bdi, 615 __entry->write_bw, /* write bandwidth */ 616 __entry->avg_write_bw, /* avg write bandwidth */ 617 __entry->dirty_rate, /* bdi dirty rate */ 618 __entry->dirty_ratelimit, /* base ratelimit */ 619 __entry->task_ratelimit, /* ratelimit with position control */ 620 __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ 621 (unsigned long)__entry->cgroup_ino 622 ) 623 ); 624 625 TRACE_EVENT(balance_dirty_pages, 626 627 TP_PROTO(struct bdi_writeback *wb, 628 unsigned long thresh, 629 unsigned long bg_thresh, 630 unsigned long dirty, 631 unsigned long bdi_thresh, 632 unsigned long bdi_dirty, 633 unsigned long dirty_ratelimit, 634 unsigned long task_ratelimit, 635 unsigned long dirtied, 636 unsigned long period, 637 long pause, 638 unsigned long start_time), 639 640 TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, 641 dirty_ratelimit, task_ratelimit, 642 dirtied, period, pause, start_time), 643 644 TP_STRUCT__entry( 645 __array( char, bdi, 32) 646 __field(unsigned long, limit) 647 __field(unsigned long, setpoint) 648 __field(unsigned long, dirty) 649 __field(unsigned long, bdi_setpoint) 650 __field(unsigned long, bdi_dirty) 651 __field(unsigned long, dirty_ratelimit) 652 __field(unsigned long, task_ratelimit) 653 __field(unsigned int, dirtied) 654 __field(unsigned int, dirtied_pause) 655 __field(unsigned long, paused) 656 __field( long, pause) 657 __field(unsigned long, period) 658 __field( long, think) 659 __field(ino_t, cgroup_ino) 660 ), 661 662 TP_fast_assign( 663 unsigned long freerun = (thresh + bg_thresh) / 2; 664 strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); 665 666 __entry->limit = global_wb_domain.dirty_limit; 667 __entry->setpoint = (global_wb_domain.dirty_limit + 668 freerun) / 2; 669 __entry->dirty = dirty; 670 __entry->bdi_setpoint = __entry->setpoint * 671 bdi_thresh / (thresh + 1); 672 __entry->bdi_dirty = bdi_dirty; 673 __entry->dirty_ratelimit = KBps(dirty_ratelimit); 674 __entry->task_ratelimit = KBps(task_ratelimit); 675 __entry->dirtied = dirtied; 676 __entry->dirtied_pause = current->nr_dirtied_pause; 677 __entry->think = current->dirty_paused_when == 0 ? 0 : 678 (long)(jiffies - current->dirty_paused_when) * 1000/HZ; 679 __entry->period = period * 1000 / HZ; 680 __entry->pause = pause * 1000 / HZ; 681 __entry->paused = (jiffies - start_time) * 1000 / HZ; 682 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); 683 ), 684 685 686 TP_printk("bdi %s: " 687 "limit=%lu setpoint=%lu dirty=%lu " 688 "bdi_setpoint=%lu bdi_dirty=%lu " 689 "dirty_ratelimit=%lu task_ratelimit=%lu " 690 "dirtied=%u dirtied_pause=%u " 691 "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu", 692 __entry->bdi, 693 __entry->limit, 694 __entry->setpoint, 695 __entry->dirty, 696 __entry->bdi_setpoint, 697 __entry->bdi_dirty, 698 __entry->dirty_ratelimit, 699 __entry->task_ratelimit, 700 __entry->dirtied, 701 __entry->dirtied_pause, 702 __entry->paused, /* ms */ 703 __entry->pause, /* ms */ 704 __entry->period, /* ms */ 705 __entry->think, /* ms */ 706 (unsigned long)__entry->cgroup_ino 707 ) 708 ); 709 710 TRACE_EVENT(writeback_sb_inodes_requeue, 711 712 TP_PROTO(struct inode *inode), 713 TP_ARGS(inode), 714 715 TP_STRUCT__entry( 716 __array(char, name, 32) 717 __field(ino_t, ino) 718 __field(unsigned long, state) 719 __field(unsigned long, dirtied_when) 720 __field(ino_t, cgroup_ino) 721 ), 722 723 TP_fast_assign( 724 strscpy_pad(__entry->name, 725 bdi_dev_name(inode_to_bdi(inode)), 32); 726 __entry->ino = inode->i_ino; 727 __entry->state = inode->i_state; 728 __entry->dirtied_when = inode->dirtied_when; 729 __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); 730 ), 731 732 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu", 733 __entry->name, 734 (unsigned long)__entry->ino, 735 show_inode_state(__entry->state), 736 __entry->dirtied_when, 737 (jiffies - __entry->dirtied_when) / HZ, 738 (unsigned long)__entry->cgroup_ino 739 ) 740 ); 741 742 DECLARE_EVENT_CLASS(writeback_congest_waited_template, 743 744 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 745 746 TP_ARGS(usec_timeout, usec_delayed), 747 748 TP_STRUCT__entry( 749 __field( unsigned int, usec_timeout ) 750 __field( unsigned int, usec_delayed ) 751 ), 752 753 TP_fast_assign( 754 __entry->usec_timeout = usec_timeout; 755 __entry->usec_delayed = usec_delayed; 756 ), 757 758 TP_printk("usec_timeout=%u usec_delayed=%u", 759 __entry->usec_timeout, 760 __entry->usec_delayed) 761 ); 762 763 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, 764 765 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 766 767 TP_ARGS(usec_timeout, usec_delayed) 768 ); 769 770 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, 771 772 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 773 774 TP_ARGS(usec_timeout, usec_delayed) 775 ); 776 777 DECLARE_EVENT_CLASS(writeback_single_inode_template, 778 779 TP_PROTO(struct inode *inode, 780 struct writeback_control *wbc, 781 unsigned long nr_to_write 782 ), 783 784 TP_ARGS(inode, wbc, nr_to_write), 785 786 TP_STRUCT__entry( 787 __array(char, name, 32) 788 __field(ino_t, ino) 789 __field(unsigned long, state) 790 __field(unsigned long, dirtied_when) 791 __field(unsigned long, writeback_index) 792 __field(long, nr_to_write) 793 __field(unsigned long, wrote) 794 __field(ino_t, cgroup_ino) 795 ), 796 797 TP_fast_assign( 798 strscpy_pad(__entry->name, 799 bdi_dev_name(inode_to_bdi(inode)), 32); 800 __entry->ino = inode->i_ino; 801 __entry->state = inode->i_state; 802 __entry->dirtied_when = inode->dirtied_when; 803 __entry->writeback_index = inode->i_mapping->writeback_index; 804 __entry->nr_to_write = nr_to_write; 805 __entry->wrote = nr_to_write - wbc->nr_to_write; 806 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); 807 ), 808 809 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " 810 "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu", 811 __entry->name, 812 (unsigned long)__entry->ino, 813 show_inode_state(__entry->state), 814 __entry->dirtied_when, 815 (jiffies - __entry->dirtied_when) / HZ, 816 __entry->writeback_index, 817 __entry->nr_to_write, 818 __entry->wrote, 819 (unsigned long)__entry->cgroup_ino 820 ) 821 ); 822 823 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, 824 TP_PROTO(struct inode *inode, 825 struct writeback_control *wbc, 826 unsigned long nr_to_write), 827 TP_ARGS(inode, wbc, nr_to_write) 828 ); 829 830 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, 831 TP_PROTO(struct inode *inode, 832 struct writeback_control *wbc, 833 unsigned long nr_to_write), 834 TP_ARGS(inode, wbc, nr_to_write) 835 ); 836 837 DECLARE_EVENT_CLASS(writeback_inode_template, 838 TP_PROTO(struct inode *inode), 839 840 TP_ARGS(inode), 841 842 TP_STRUCT__entry( 843 __field( dev_t, dev ) 844 __field( ino_t, ino ) 845 __field(unsigned long, state ) 846 __field( __u16, mode ) 847 __field(unsigned long, dirtied_when ) 848 ), 849 850 TP_fast_assign( 851 __entry->dev = inode->i_sb->s_dev; 852 __entry->ino = inode->i_ino; 853 __entry->state = inode->i_state; 854 __entry->mode = inode->i_mode; 855 __entry->dirtied_when = inode->dirtied_when; 856 ), 857 858 TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", 859 MAJOR(__entry->dev), MINOR(__entry->dev), 860 (unsigned long)__entry->ino, __entry->dirtied_when, 861 show_inode_state(__entry->state), __entry->mode) 862 ); 863 864 DEFINE_EVENT(writeback_inode_template, writeback_lazytime, 865 TP_PROTO(struct inode *inode), 866 867 TP_ARGS(inode) 868 ); 869 870 DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput, 871 TP_PROTO(struct inode *inode), 872 873 TP_ARGS(inode) 874 ); 875 876 DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue, 877 878 TP_PROTO(struct inode *inode), 879 880 TP_ARGS(inode) 881 ); 882 883 /* 884 * Inode writeback list tracking. 885 */ 886 887 DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback, 888 TP_PROTO(struct inode *inode), 889 TP_ARGS(inode) 890 ); 891 892 DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback, 893 TP_PROTO(struct inode *inode), 894 TP_ARGS(inode) 895 ); 896 897 #endif /* _TRACE_WRITEBACK_H */ 898 899 /* This part must be outside protection */ 900 #include <trace/define_trace.h> 901