1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/fs-writeback.c 4 * 5 * Copyright (C) 2002, Linus Torvalds. 6 * 7 * Contains all the functions related to writing back and waiting 8 * upon dirty inodes against superblocks, and writing back dirty 9 * pages against inodes. ie: data writeback. Writeout of the 10 * inode itself is not handled here. 11 * 12 * 10Apr2002 Andrew Morton 13 * Split out of fs/inode.c 14 * Additions for address_space-based writeback 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/export.h> 19 #include <linux/spinlock.h> 20 #include <linux/slab.h> 21 #include <linux/sched.h> 22 #include <linux/fs.h> 23 #include <linux/mm.h> 24 #include <linux/pagemap.h> 25 #include <linux/kthread.h> 26 #include <linux/writeback.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/tracepoint.h> 30 #include <linux/device.h> 31 #include <linux/memcontrol.h> 32 #include "internal.h" 33 34 /* 35 * 4MB minimal write chunk size 36 */ 37 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10)) 38 39 struct wb_completion { 40 atomic_t cnt; 41 }; 42 43 /* 44 * Passed into wb_writeback(), essentially a subset of writeback_control 45 */ 46 struct wb_writeback_work { 47 long nr_pages; 48 struct super_block *sb; 49 unsigned long *older_than_this; 50 enum writeback_sync_modes sync_mode; 51 unsigned int tagged_writepages:1; 52 unsigned int for_kupdate:1; 53 unsigned int range_cyclic:1; 54 unsigned int for_background:1; 55 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 56 unsigned int auto_free:1; /* free on completion */ 57 enum wb_reason reason; /* why was writeback initiated? */ 58 59 struct list_head list; /* pending work list */ 60 struct wb_completion *done; /* set if the caller waits */ 61 }; 62 63 /* 64 * If one wants to wait for one or more wb_writeback_works, each work's 65 * ->done should be set to a wb_completion defined using the following 66 * macro. Once all work items are issued with wb_queue_work(), the caller 67 * can wait for the completion of all using wb_wait_for_completion(). Work 68 * items which are waited upon aren't freed automatically on completion. 69 */ 70 #define DEFINE_WB_COMPLETION_ONSTACK(cmpl) \ 71 struct wb_completion cmpl = { \ 72 .cnt = ATOMIC_INIT(1), \ 73 } 74 75 76 /* 77 * If an inode is constantly having its pages dirtied, but then the 78 * updates stop dirtytime_expire_interval seconds in the past, it's 79 * possible for the worst case time between when an inode has its 80 * timestamps updated and when they finally get written out to be two 81 * dirtytime_expire_intervals. We set the default to 12 hours (in 82 * seconds), which means most of the time inodes will have their 83 * timestamps written to disk after 12 hours, but in the worst case a 84 * few inodes might not their timestamps updated for 24 hours. 85 */ 86 unsigned int dirtytime_expire_interval = 12 * 60 * 60; 87 88 static inline struct inode *wb_inode(struct list_head *head) 89 { 90 return list_entry(head, struct inode, i_io_list); 91 } 92 93 /* 94 * Include the creation of the trace points after defining the 95 * wb_writeback_work structure and inline functions so that the definition 96 * remains local to this file. 97 */ 98 #define CREATE_TRACE_POINTS 99 #include <trace/events/writeback.h> 100 101 EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); 102 103 static bool wb_io_lists_populated(struct bdi_writeback *wb) 104 { 105 if (wb_has_dirty_io(wb)) { 106 return false; 107 } else { 108 set_bit(WB_has_dirty_io, &wb->state); 109 WARN_ON_ONCE(!wb->avg_write_bandwidth); 110 atomic_long_add(wb->avg_write_bandwidth, 111 &wb->bdi->tot_write_bandwidth); 112 return true; 113 } 114 } 115 116 static void wb_io_lists_depopulated(struct bdi_writeback *wb) 117 { 118 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && 119 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { 120 clear_bit(WB_has_dirty_io, &wb->state); 121 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, 122 &wb->bdi->tot_write_bandwidth) < 0); 123 } 124 } 125 126 /** 127 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list 128 * @inode: inode to be moved 129 * @wb: target bdi_writeback 130 * @head: one of @wb->b_{dirty|io|more_io|dirty_time} 131 * 132 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io. 133 * Returns %true if @inode is the first occupant of the !dirty_time IO 134 * lists; otherwise, %false. 135 */ 136 static bool inode_io_list_move_locked(struct inode *inode, 137 struct bdi_writeback *wb, 138 struct list_head *head) 139 { 140 assert_spin_locked(&wb->list_lock); 141 142 list_move(&inode->i_io_list, head); 143 144 /* dirty_time doesn't count as dirty_io until expiration */ 145 if (head != &wb->b_dirty_time) 146 return wb_io_lists_populated(wb); 147 148 wb_io_lists_depopulated(wb); 149 return false; 150 } 151 152 /** 153 * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list 154 * @inode: inode to be removed 155 * @wb: bdi_writeback @inode is being removed from 156 * 157 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and 158 * clear %WB_has_dirty_io if all are empty afterwards. 159 */ 160 static void inode_io_list_del_locked(struct inode *inode, 161 struct bdi_writeback *wb) 162 { 163 assert_spin_locked(&wb->list_lock); 164 165 list_del_init(&inode->i_io_list); 166 wb_io_lists_depopulated(wb); 167 } 168 169 static void wb_wakeup(struct bdi_writeback *wb) 170 { 171 spin_lock_bh(&wb->work_lock); 172 if (test_bit(WB_registered, &wb->state)) 173 mod_delayed_work(bdi_wq, &wb->dwork, 0); 174 spin_unlock_bh(&wb->work_lock); 175 } 176 177 static void finish_writeback_work(struct bdi_writeback *wb, 178 struct wb_writeback_work *work) 179 { 180 struct wb_completion *done = work->done; 181 182 if (work->auto_free) 183 kfree(work); 184 if (done && atomic_dec_and_test(&done->cnt)) 185 wake_up_all(&wb->bdi->wb_waitq); 186 } 187 188 static void wb_queue_work(struct bdi_writeback *wb, 189 struct wb_writeback_work *work) 190 { 191 trace_writeback_queue(wb, work); 192 193 if (work->done) 194 atomic_inc(&work->done->cnt); 195 196 spin_lock_bh(&wb->work_lock); 197 198 if (test_bit(WB_registered, &wb->state)) { 199 list_add_tail(&work->list, &wb->work_list); 200 mod_delayed_work(bdi_wq, &wb->dwork, 0); 201 } else 202 finish_writeback_work(wb, work); 203 204 spin_unlock_bh(&wb->work_lock); 205 } 206 207 /** 208 * wb_wait_for_completion - wait for completion of bdi_writeback_works 209 * @bdi: bdi work items were issued to 210 * @done: target wb_completion 211 * 212 * Wait for one or more work items issued to @bdi with their ->done field 213 * set to @done, which should have been defined with 214 * DEFINE_WB_COMPLETION_ONSTACK(). This function returns after all such 215 * work items are completed. Work items which are waited upon aren't freed 216 * automatically on completion. 217 */ 218 static void wb_wait_for_completion(struct backing_dev_info *bdi, 219 struct wb_completion *done) 220 { 221 atomic_dec(&done->cnt); /* put down the initial count */ 222 wait_event(bdi->wb_waitq, !atomic_read(&done->cnt)); 223 } 224 225 #ifdef CONFIG_CGROUP_WRITEBACK 226 227 /* parameters for foreign inode detection, see wb_detach_inode() */ 228 #define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */ 229 #define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */ 230 #define WB_FRN_TIME_CUT_DIV 2 /* ignore rounds < avg / 2 */ 231 #define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */ 232 233 #define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */ 234 #define WB_FRN_HIST_UNIT (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS) 235 /* each slot's duration is 2s / 16 */ 236 #define WB_FRN_HIST_THR_SLOTS (WB_FRN_HIST_SLOTS / 2) 237 /* if foreign slots >= 8, switch */ 238 #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1) 239 /* one round can affect upto 5 slots */ 240 241 static atomic_t isw_nr_in_flight = ATOMIC_INIT(0); 242 static struct workqueue_struct *isw_wq; 243 244 void __inode_attach_wb(struct inode *inode, struct page *page) 245 { 246 struct backing_dev_info *bdi = inode_to_bdi(inode); 247 struct bdi_writeback *wb = NULL; 248 249 if (inode_cgwb_enabled(inode)) { 250 struct cgroup_subsys_state *memcg_css; 251 252 if (page) { 253 memcg_css = mem_cgroup_css_from_page(page); 254 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 255 } else { 256 /* must pin memcg_css, see wb_get_create() */ 257 memcg_css = task_get_css(current, memory_cgrp_id); 258 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 259 css_put(memcg_css); 260 } 261 } 262 263 if (!wb) 264 wb = &bdi->wb; 265 266 /* 267 * There may be multiple instances of this function racing to 268 * update the same inode. Use cmpxchg() to tell the winner. 269 */ 270 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) 271 wb_put(wb); 272 } 273 274 /** 275 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it 276 * @inode: inode of interest with i_lock held 277 * 278 * Returns @inode's wb with its list_lock held. @inode->i_lock must be 279 * held on entry and is released on return. The returned wb is guaranteed 280 * to stay @inode's associated wb until its list_lock is released. 281 */ 282 static struct bdi_writeback * 283 locked_inode_to_wb_and_lock_list(struct inode *inode) 284 __releases(&inode->i_lock) 285 __acquires(&wb->list_lock) 286 { 287 while (true) { 288 struct bdi_writeback *wb = inode_to_wb(inode); 289 290 /* 291 * inode_to_wb() association is protected by both 292 * @inode->i_lock and @wb->list_lock but list_lock nests 293 * outside i_lock. Drop i_lock and verify that the 294 * association hasn't changed after acquiring list_lock. 295 */ 296 wb_get(wb); 297 spin_unlock(&inode->i_lock); 298 spin_lock(&wb->list_lock); 299 300 /* i_wb may have changed inbetween, can't use inode_to_wb() */ 301 if (likely(wb == inode->i_wb)) { 302 wb_put(wb); /* @inode already has ref */ 303 return wb; 304 } 305 306 spin_unlock(&wb->list_lock); 307 wb_put(wb); 308 cpu_relax(); 309 spin_lock(&inode->i_lock); 310 } 311 } 312 313 /** 314 * inode_to_wb_and_lock_list - determine an inode's wb and lock it 315 * @inode: inode of interest 316 * 317 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held 318 * on entry. 319 */ 320 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) 321 __acquires(&wb->list_lock) 322 { 323 spin_lock(&inode->i_lock); 324 return locked_inode_to_wb_and_lock_list(inode); 325 } 326 327 struct inode_switch_wbs_context { 328 struct inode *inode; 329 struct bdi_writeback *new_wb; 330 331 struct rcu_head rcu_head; 332 struct work_struct work; 333 }; 334 335 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) 336 { 337 down_write(&bdi->wb_switch_rwsem); 338 } 339 340 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) 341 { 342 up_write(&bdi->wb_switch_rwsem); 343 } 344 345 static void inode_switch_wbs_work_fn(struct work_struct *work) 346 { 347 struct inode_switch_wbs_context *isw = 348 container_of(work, struct inode_switch_wbs_context, work); 349 struct inode *inode = isw->inode; 350 struct backing_dev_info *bdi = inode_to_bdi(inode); 351 struct address_space *mapping = inode->i_mapping; 352 struct bdi_writeback *old_wb = inode->i_wb; 353 struct bdi_writeback *new_wb = isw->new_wb; 354 XA_STATE(xas, &mapping->i_pages, 0); 355 struct page *page; 356 bool switched = false; 357 358 /* 359 * If @inode switches cgwb membership while sync_inodes_sb() is 360 * being issued, sync_inodes_sb() might miss it. Synchronize. 361 */ 362 down_read(&bdi->wb_switch_rwsem); 363 364 /* 365 * By the time control reaches here, RCU grace period has passed 366 * since I_WB_SWITCH assertion and all wb stat update transactions 367 * between unlocked_inode_to_wb_begin/end() are guaranteed to be 368 * synchronizing against the i_pages lock. 369 * 370 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock 371 * gives us exclusion against all wb related operations on @inode 372 * including IO list manipulations and stat updates. 373 */ 374 if (old_wb < new_wb) { 375 spin_lock(&old_wb->list_lock); 376 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING); 377 } else { 378 spin_lock(&new_wb->list_lock); 379 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); 380 } 381 spin_lock(&inode->i_lock); 382 xa_lock_irq(&mapping->i_pages); 383 384 /* 385 * Once I_FREEING is visible under i_lock, the eviction path owns 386 * the inode and we shouldn't modify ->i_io_list. 387 */ 388 if (unlikely(inode->i_state & I_FREEING)) 389 goto skip_switch; 390 391 /* 392 * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points 393 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to 394 * pages actually under writeback. 395 */ 396 xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) { 397 if (PageDirty(page)) { 398 dec_wb_stat(old_wb, WB_RECLAIMABLE); 399 inc_wb_stat(new_wb, WB_RECLAIMABLE); 400 } 401 } 402 403 xas_set(&xas, 0); 404 xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) { 405 WARN_ON_ONCE(!PageWriteback(page)); 406 dec_wb_stat(old_wb, WB_WRITEBACK); 407 inc_wb_stat(new_wb, WB_WRITEBACK); 408 } 409 410 wb_get(new_wb); 411 412 /* 413 * Transfer to @new_wb's IO list if necessary. The specific list 414 * @inode was on is ignored and the inode is put on ->b_dirty which 415 * is always correct including from ->b_dirty_time. The transfer 416 * preserves @inode->dirtied_when ordering. 417 */ 418 if (!list_empty(&inode->i_io_list)) { 419 struct inode *pos; 420 421 inode_io_list_del_locked(inode, old_wb); 422 inode->i_wb = new_wb; 423 list_for_each_entry(pos, &new_wb->b_dirty, i_io_list) 424 if (time_after_eq(inode->dirtied_when, 425 pos->dirtied_when)) 426 break; 427 inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev); 428 } else { 429 inode->i_wb = new_wb; 430 } 431 432 /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */ 433 inode->i_wb_frn_winner = 0; 434 inode->i_wb_frn_avg_time = 0; 435 inode->i_wb_frn_history = 0; 436 switched = true; 437 skip_switch: 438 /* 439 * Paired with load_acquire in unlocked_inode_to_wb_begin() and 440 * ensures that the new wb is visible if they see !I_WB_SWITCH. 441 */ 442 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); 443 444 xa_unlock_irq(&mapping->i_pages); 445 spin_unlock(&inode->i_lock); 446 spin_unlock(&new_wb->list_lock); 447 spin_unlock(&old_wb->list_lock); 448 449 up_read(&bdi->wb_switch_rwsem); 450 451 if (switched) { 452 wb_wakeup(new_wb); 453 wb_put(old_wb); 454 } 455 wb_put(new_wb); 456 457 iput(inode); 458 kfree(isw); 459 460 atomic_dec(&isw_nr_in_flight); 461 } 462 463 static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head) 464 { 465 struct inode_switch_wbs_context *isw = container_of(rcu_head, 466 struct inode_switch_wbs_context, rcu_head); 467 468 /* needs to grab bh-unsafe locks, bounce to work item */ 469 INIT_WORK(&isw->work, inode_switch_wbs_work_fn); 470 queue_work(isw_wq, &isw->work); 471 } 472 473 /** 474 * inode_switch_wbs - change the wb association of an inode 475 * @inode: target inode 476 * @new_wb_id: ID of the new wb 477 * 478 * Switch @inode's wb association to the wb identified by @new_wb_id. The 479 * switching is performed asynchronously and may fail silently. 480 */ 481 static void inode_switch_wbs(struct inode *inode, int new_wb_id) 482 { 483 struct backing_dev_info *bdi = inode_to_bdi(inode); 484 struct cgroup_subsys_state *memcg_css; 485 struct inode_switch_wbs_context *isw; 486 487 /* noop if seems to be already in progress */ 488 if (inode->i_state & I_WB_SWITCH) 489 return; 490 491 /* 492 * Avoid starting new switches while sync_inodes_sb() is in 493 * progress. Otherwise, if the down_write protected issue path 494 * blocks heavily, we might end up starting a large number of 495 * switches which will block on the rwsem. 496 */ 497 if (!down_read_trylock(&bdi->wb_switch_rwsem)) 498 return; 499 500 isw = kzalloc(sizeof(*isw), GFP_ATOMIC); 501 if (!isw) 502 goto out_unlock; 503 504 /* find and pin the new wb */ 505 rcu_read_lock(); 506 memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys); 507 if (memcg_css) 508 isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 509 rcu_read_unlock(); 510 if (!isw->new_wb) 511 goto out_free; 512 513 /* while holding I_WB_SWITCH, no one else can update the association */ 514 spin_lock(&inode->i_lock); 515 if (!(inode->i_sb->s_flags & SB_ACTIVE) || 516 inode->i_state & (I_WB_SWITCH | I_FREEING) || 517 inode_to_wb(inode) == isw->new_wb) { 518 spin_unlock(&inode->i_lock); 519 goto out_free; 520 } 521 inode->i_state |= I_WB_SWITCH; 522 __iget(inode); 523 spin_unlock(&inode->i_lock); 524 525 isw->inode = inode; 526 527 /* 528 * In addition to synchronizing among switchers, I_WB_SWITCH tells 529 * the RCU protected stat update paths to grab the i_page 530 * lock so that stat transfer can synchronize against them. 531 * Let's continue after I_WB_SWITCH is guaranteed to be visible. 532 */ 533 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); 534 535 atomic_inc(&isw_nr_in_flight); 536 537 goto out_unlock; 538 539 out_free: 540 if (isw->new_wb) 541 wb_put(isw->new_wb); 542 kfree(isw); 543 out_unlock: 544 up_read(&bdi->wb_switch_rwsem); 545 } 546 547 /** 548 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it 549 * @wbc: writeback_control of interest 550 * @inode: target inode 551 * 552 * @inode is locked and about to be written back under the control of @wbc. 553 * Record @inode's writeback context into @wbc and unlock the i_lock. On 554 * writeback completion, wbc_detach_inode() should be called. This is used 555 * to track the cgroup writeback context. 556 */ 557 void wbc_attach_and_unlock_inode(struct writeback_control *wbc, 558 struct inode *inode) 559 { 560 if (!inode_cgwb_enabled(inode)) { 561 spin_unlock(&inode->i_lock); 562 return; 563 } 564 565 wbc->wb = inode_to_wb(inode); 566 wbc->inode = inode; 567 568 wbc->wb_id = wbc->wb->memcg_css->id; 569 wbc->wb_lcand_id = inode->i_wb_frn_winner; 570 wbc->wb_tcand_id = 0; 571 wbc->wb_bytes = 0; 572 wbc->wb_lcand_bytes = 0; 573 wbc->wb_tcand_bytes = 0; 574 575 wb_get(wbc->wb); 576 spin_unlock(&inode->i_lock); 577 578 /* 579 * A dying wb indicates that the memcg-blkcg mapping has changed 580 * and a new wb is already serving the memcg. Switch immediately. 581 */ 582 if (unlikely(wb_dying(wbc->wb))) 583 inode_switch_wbs(inode, wbc->wb_id); 584 } 585 586 /** 587 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection 588 * @wbc: writeback_control of the just finished writeback 589 * 590 * To be called after a writeback attempt of an inode finishes and undoes 591 * wbc_attach_and_unlock_inode(). Can be called under any context. 592 * 593 * As concurrent write sharing of an inode is expected to be very rare and 594 * memcg only tracks page ownership on first-use basis severely confining 595 * the usefulness of such sharing, cgroup writeback tracks ownership 596 * per-inode. While the support for concurrent write sharing of an inode 597 * is deemed unnecessary, an inode being written to by different cgroups at 598 * different points in time is a lot more common, and, more importantly, 599 * charging only by first-use can too readily lead to grossly incorrect 600 * behaviors (single foreign page can lead to gigabytes of writeback to be 601 * incorrectly attributed). 602 * 603 * To resolve this issue, cgroup writeback detects the majority dirtier of 604 * an inode and transfers the ownership to it. To avoid unnnecessary 605 * oscillation, the detection mechanism keeps track of history and gives 606 * out the switch verdict only if the foreign usage pattern is stable over 607 * a certain amount of time and/or writeback attempts. 608 * 609 * On each writeback attempt, @wbc tries to detect the majority writer 610 * using Boyer-Moore majority vote algorithm. In addition to the byte 611 * count from the majority voting, it also counts the bytes written for the 612 * current wb and the last round's winner wb (max of last round's current 613 * wb, the winner from two rounds ago, and the last round's majority 614 * candidate). Keeping track of the historical winner helps the algorithm 615 * to semi-reliably detect the most active writer even when it's not the 616 * absolute majority. 617 * 618 * Once the winner of the round is determined, whether the winner is 619 * foreign or not and how much IO time the round consumed is recorded in 620 * inode->i_wb_frn_history. If the amount of recorded foreign IO time is 621 * over a certain threshold, the switch verdict is given. 622 */ 623 void wbc_detach_inode(struct writeback_control *wbc) 624 { 625 struct bdi_writeback *wb = wbc->wb; 626 struct inode *inode = wbc->inode; 627 unsigned long avg_time, max_bytes, max_time; 628 u16 history; 629 int max_id; 630 631 if (!wb) 632 return; 633 634 history = inode->i_wb_frn_history; 635 avg_time = inode->i_wb_frn_avg_time; 636 637 /* pick the winner of this round */ 638 if (wbc->wb_bytes >= wbc->wb_lcand_bytes && 639 wbc->wb_bytes >= wbc->wb_tcand_bytes) { 640 max_id = wbc->wb_id; 641 max_bytes = wbc->wb_bytes; 642 } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) { 643 max_id = wbc->wb_lcand_id; 644 max_bytes = wbc->wb_lcand_bytes; 645 } else { 646 max_id = wbc->wb_tcand_id; 647 max_bytes = wbc->wb_tcand_bytes; 648 } 649 650 /* 651 * Calculate the amount of IO time the winner consumed and fold it 652 * into the running average kept per inode. If the consumed IO 653 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for 654 * deciding whether to switch or not. This is to prevent one-off 655 * small dirtiers from skewing the verdict. 656 */ 657 max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT, 658 wb->avg_write_bandwidth); 659 if (avg_time) 660 avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) - 661 (avg_time >> WB_FRN_TIME_AVG_SHIFT); 662 else 663 avg_time = max_time; /* immediate catch up on first run */ 664 665 if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) { 666 int slots; 667 668 /* 669 * The switch verdict is reached if foreign wb's consume 670 * more than a certain proportion of IO time in a 671 * WB_FRN_TIME_PERIOD. This is loosely tracked by 16 slot 672 * history mask where each bit represents one sixteenth of 673 * the period. Determine the number of slots to shift into 674 * history from @max_time. 675 */ 676 slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT), 677 (unsigned long)WB_FRN_HIST_MAX_SLOTS); 678 history <<= slots; 679 if (wbc->wb_id != max_id) 680 history |= (1U << slots) - 1; 681 682 /* 683 * Switch if the current wb isn't the consistent winner. 684 * If there are multiple closely competing dirtiers, the 685 * inode may switch across them repeatedly over time, which 686 * is okay. The main goal is avoiding keeping an inode on 687 * the wrong wb for an extended period of time. 688 */ 689 if (hweight32(history) > WB_FRN_HIST_THR_SLOTS) 690 inode_switch_wbs(inode, max_id); 691 } 692 693 /* 694 * Multiple instances of this function may race to update the 695 * following fields but we don't mind occassional inaccuracies. 696 */ 697 inode->i_wb_frn_winner = max_id; 698 inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); 699 inode->i_wb_frn_history = history; 700 701 wb_put(wbc->wb); 702 wbc->wb = NULL; 703 } 704 705 /** 706 * wbc_account_io - account IO issued during writeback 707 * @wbc: writeback_control of the writeback in progress 708 * @page: page being written out 709 * @bytes: number of bytes being written out 710 * 711 * @bytes from @page are about to written out during the writeback 712 * controlled by @wbc. Keep the book for foreign inode detection. See 713 * wbc_detach_inode(). 714 */ 715 void wbc_account_io(struct writeback_control *wbc, struct page *page, 716 size_t bytes) 717 { 718 struct cgroup_subsys_state *css; 719 int id; 720 721 /* 722 * pageout() path doesn't attach @wbc to the inode being written 723 * out. This is intentional as we don't want the function to block 724 * behind a slow cgroup. Ultimately, we want pageout() to kick off 725 * regular writeback instead of writing things out itself. 726 */ 727 if (!wbc->wb) 728 return; 729 730 css = mem_cgroup_css_from_page(page); 731 /* dead cgroups shouldn't contribute to inode ownership arbitration */ 732 if (!(css->flags & CSS_ONLINE)) 733 return; 734 735 id = css->id; 736 737 if (id == wbc->wb_id) { 738 wbc->wb_bytes += bytes; 739 return; 740 } 741 742 if (id == wbc->wb_lcand_id) 743 wbc->wb_lcand_bytes += bytes; 744 745 /* Boyer-Moore majority vote algorithm */ 746 if (!wbc->wb_tcand_bytes) 747 wbc->wb_tcand_id = id; 748 if (id == wbc->wb_tcand_id) 749 wbc->wb_tcand_bytes += bytes; 750 else 751 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 752 } 753 EXPORT_SYMBOL_GPL(wbc_account_io); 754 755 /** 756 * inode_congested - test whether an inode is congested 757 * @inode: inode to test for congestion (may be NULL) 758 * @cong_bits: mask of WB_[a]sync_congested bits to test 759 * 760 * Tests whether @inode is congested. @cong_bits is the mask of congestion 761 * bits to test and the return value is the mask of set bits. 762 * 763 * If cgroup writeback is enabled for @inode, the congestion state is 764 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg 765 * associated with @inode is congested; otherwise, the root wb's congestion 766 * state is used. 767 * 768 * @inode is allowed to be NULL as this function is often called on 769 * mapping->host which is NULL for the swapper space. 770 */ 771 int inode_congested(struct inode *inode, int cong_bits) 772 { 773 /* 774 * Once set, ->i_wb never becomes NULL while the inode is alive. 775 * Start transaction iff ->i_wb is visible. 776 */ 777 if (inode && inode_to_wb_is_valid(inode)) { 778 struct bdi_writeback *wb; 779 struct wb_lock_cookie lock_cookie = {}; 780 bool congested; 781 782 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); 783 congested = wb_congested(wb, cong_bits); 784 unlocked_inode_to_wb_end(inode, &lock_cookie); 785 return congested; 786 } 787 788 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 789 } 790 EXPORT_SYMBOL_GPL(inode_congested); 791 792 /** 793 * wb_split_bdi_pages - split nr_pages to write according to bandwidth 794 * @wb: target bdi_writeback to split @nr_pages to 795 * @nr_pages: number of pages to write for the whole bdi 796 * 797 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in 798 * relation to the total write bandwidth of all wb's w/ dirty inodes on 799 * @wb->bdi. 800 */ 801 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 802 { 803 unsigned long this_bw = wb->avg_write_bandwidth; 804 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); 805 806 if (nr_pages == LONG_MAX) 807 return LONG_MAX; 808 809 /* 810 * This may be called on clean wb's and proportional distribution 811 * may not make sense, just use the original @nr_pages in those 812 * cases. In general, we wanna err on the side of writing more. 813 */ 814 if (!tot_bw || this_bw >= tot_bw) 815 return nr_pages; 816 else 817 return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); 818 } 819 820 /** 821 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi 822 * @bdi: target backing_dev_info 823 * @base_work: wb_writeback_work to issue 824 * @skip_if_busy: skip wb's which already have writeback in progress 825 * 826 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which 827 * have dirty inodes. If @base_work->nr_page isn't %LONG_MAX, it's 828 * distributed to the busy wbs according to each wb's proportion in the 829 * total active write bandwidth of @bdi. 830 */ 831 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, 832 struct wb_writeback_work *base_work, 833 bool skip_if_busy) 834 { 835 struct bdi_writeback *last_wb = NULL; 836 struct bdi_writeback *wb = list_entry(&bdi->wb_list, 837 struct bdi_writeback, bdi_node); 838 839 might_sleep(); 840 restart: 841 rcu_read_lock(); 842 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { 843 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); 844 struct wb_writeback_work fallback_work; 845 struct wb_writeback_work *work; 846 long nr_pages; 847 848 if (last_wb) { 849 wb_put(last_wb); 850 last_wb = NULL; 851 } 852 853 /* SYNC_ALL writes out I_DIRTY_TIME too */ 854 if (!wb_has_dirty_io(wb) && 855 (base_work->sync_mode == WB_SYNC_NONE || 856 list_empty(&wb->b_dirty_time))) 857 continue; 858 if (skip_if_busy && writeback_in_progress(wb)) 859 continue; 860 861 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); 862 863 work = kmalloc(sizeof(*work), GFP_ATOMIC); 864 if (work) { 865 *work = *base_work; 866 work->nr_pages = nr_pages; 867 work->auto_free = 1; 868 wb_queue_work(wb, work); 869 continue; 870 } 871 872 /* alloc failed, execute synchronously using on-stack fallback */ 873 work = &fallback_work; 874 *work = *base_work; 875 work->nr_pages = nr_pages; 876 work->auto_free = 0; 877 work->done = &fallback_work_done; 878 879 wb_queue_work(wb, work); 880 881 /* 882 * Pin @wb so that it stays on @bdi->wb_list. This allows 883 * continuing iteration from @wb after dropping and 884 * regrabbing rcu read lock. 885 */ 886 wb_get(wb); 887 last_wb = wb; 888 889 rcu_read_unlock(); 890 wb_wait_for_completion(bdi, &fallback_work_done); 891 goto restart; 892 } 893 rcu_read_unlock(); 894 895 if (last_wb) 896 wb_put(last_wb); 897 } 898 899 /** 900 * cgroup_writeback_umount - flush inode wb switches for umount 901 * 902 * This function is called when a super_block is about to be destroyed and 903 * flushes in-flight inode wb switches. An inode wb switch goes through 904 * RCU and then workqueue, so the two need to be flushed in order to ensure 905 * that all previously scheduled switches are finished. As wb switches are 906 * rare occurrences and synchronize_rcu() can take a while, perform 907 * flushing iff wb switches are in flight. 908 */ 909 void cgroup_writeback_umount(void) 910 { 911 if (atomic_read(&isw_nr_in_flight)) { 912 /* 913 * Use rcu_barrier() to wait for all pending callbacks to 914 * ensure that all in-flight wb switches are in the workqueue. 915 */ 916 rcu_barrier(); 917 flush_workqueue(isw_wq); 918 } 919 } 920 921 static int __init cgroup_writeback_init(void) 922 { 923 isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0); 924 if (!isw_wq) 925 return -ENOMEM; 926 return 0; 927 } 928 fs_initcall(cgroup_writeback_init); 929 930 #else /* CONFIG_CGROUP_WRITEBACK */ 931 932 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } 933 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { } 934 935 static struct bdi_writeback * 936 locked_inode_to_wb_and_lock_list(struct inode *inode) 937 __releases(&inode->i_lock) 938 __acquires(&wb->list_lock) 939 { 940 struct bdi_writeback *wb = inode_to_wb(inode); 941 942 spin_unlock(&inode->i_lock); 943 spin_lock(&wb->list_lock); 944 return wb; 945 } 946 947 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) 948 __acquires(&wb->list_lock) 949 { 950 struct bdi_writeback *wb = inode_to_wb(inode); 951 952 spin_lock(&wb->list_lock); 953 return wb; 954 } 955 956 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 957 { 958 return nr_pages; 959 } 960 961 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, 962 struct wb_writeback_work *base_work, 963 bool skip_if_busy) 964 { 965 might_sleep(); 966 967 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { 968 base_work->auto_free = 0; 969 wb_queue_work(&bdi->wb, base_work); 970 } 971 } 972 973 #endif /* CONFIG_CGROUP_WRITEBACK */ 974 975 /* 976 * Add in the number of potentially dirty inodes, because each inode 977 * write can dirty pagecache in the underlying blockdev. 978 */ 979 static unsigned long get_nr_dirty_pages(void) 980 { 981 return global_node_page_state(NR_FILE_DIRTY) + 982 global_node_page_state(NR_UNSTABLE_NFS) + 983 get_nr_dirty_inodes(); 984 } 985 986 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) 987 { 988 if (!wb_has_dirty_io(wb)) 989 return; 990 991 /* 992 * All callers of this function want to start writeback of all 993 * dirty pages. Places like vmscan can call this at a very 994 * high frequency, causing pointless allocations of tons of 995 * work items and keeping the flusher threads busy retrieving 996 * that work. Ensure that we only allow one of them pending and 997 * inflight at the time. 998 */ 999 if (test_bit(WB_start_all, &wb->state) || 1000 test_and_set_bit(WB_start_all, &wb->state)) 1001 return; 1002 1003 wb->start_all_reason = reason; 1004 wb_wakeup(wb); 1005 } 1006 1007 /** 1008 * wb_start_background_writeback - start background writeback 1009 * @wb: bdi_writback to write from 1010 * 1011 * Description: 1012 * This makes sure WB_SYNC_NONE background writeback happens. When 1013 * this function returns, it is only guaranteed that for given wb 1014 * some IO is happening if we are over background dirty threshold. 1015 * Caller need not hold sb s_umount semaphore. 1016 */ 1017 void wb_start_background_writeback(struct bdi_writeback *wb) 1018 { 1019 /* 1020 * We just wake up the flusher thread. It will perform background 1021 * writeback as soon as there is no other work to do. 1022 */ 1023 trace_writeback_wake_background(wb); 1024 wb_wakeup(wb); 1025 } 1026 1027 /* 1028 * Remove the inode from the writeback list it is on. 1029 */ 1030 void inode_io_list_del(struct inode *inode) 1031 { 1032 struct bdi_writeback *wb; 1033 1034 wb = inode_to_wb_and_lock_list(inode); 1035 inode_io_list_del_locked(inode, wb); 1036 spin_unlock(&wb->list_lock); 1037 } 1038 1039 /* 1040 * mark an inode as under writeback on the sb 1041 */ 1042 void sb_mark_inode_writeback(struct inode *inode) 1043 { 1044 struct super_block *sb = inode->i_sb; 1045 unsigned long flags; 1046 1047 if (list_empty(&inode->i_wb_list)) { 1048 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); 1049 if (list_empty(&inode->i_wb_list)) { 1050 list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb); 1051 trace_sb_mark_inode_writeback(inode); 1052 } 1053 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); 1054 } 1055 } 1056 1057 /* 1058 * clear an inode as under writeback on the sb 1059 */ 1060 void sb_clear_inode_writeback(struct inode *inode) 1061 { 1062 struct super_block *sb = inode->i_sb; 1063 unsigned long flags; 1064 1065 if (!list_empty(&inode->i_wb_list)) { 1066 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); 1067 if (!list_empty(&inode->i_wb_list)) { 1068 list_del_init(&inode->i_wb_list); 1069 trace_sb_clear_inode_writeback(inode); 1070 } 1071 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); 1072 } 1073 } 1074 1075 /* 1076 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 1077 * furthest end of its superblock's dirty-inode list. 1078 * 1079 * Before stamping the inode's ->dirtied_when, we check to see whether it is 1080 * already the most-recently-dirtied inode on the b_dirty list. If that is 1081 * the case then the inode must have been redirtied while it was being written 1082 * out and we don't reset its dirtied_when. 1083 */ 1084 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 1085 { 1086 if (!list_empty(&wb->b_dirty)) { 1087 struct inode *tail; 1088 1089 tail = wb_inode(wb->b_dirty.next); 1090 if (time_before(inode->dirtied_when, tail->dirtied_when)) 1091 inode->dirtied_when = jiffies; 1092 } 1093 inode_io_list_move_locked(inode, wb, &wb->b_dirty); 1094 } 1095 1096 /* 1097 * requeue inode for re-scanning after bdi->b_io list is exhausted. 1098 */ 1099 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 1100 { 1101 inode_io_list_move_locked(inode, wb, &wb->b_more_io); 1102 } 1103 1104 static void inode_sync_complete(struct inode *inode) 1105 { 1106 inode->i_state &= ~I_SYNC; 1107 /* If inode is clean an unused, put it into LRU now... */ 1108 inode_add_lru(inode); 1109 /* Waiters must see I_SYNC cleared before being woken up */ 1110 smp_mb(); 1111 wake_up_bit(&inode->i_state, __I_SYNC); 1112 } 1113 1114 static bool inode_dirtied_after(struct inode *inode, unsigned long t) 1115 { 1116 bool ret = time_after(inode->dirtied_when, t); 1117 #ifndef CONFIG_64BIT 1118 /* 1119 * For inodes being constantly redirtied, dirtied_when can get stuck. 1120 * It _appears_ to be in the future, but is actually in distant past. 1121 * This test is necessary to prevent such wrapped-around relative times 1122 * from permanently stopping the whole bdi writeback. 1123 */ 1124 ret = ret && time_before_eq(inode->dirtied_when, jiffies); 1125 #endif 1126 return ret; 1127 } 1128 1129 #define EXPIRE_DIRTY_ATIME 0x0001 1130 1131 /* 1132 * Move expired (dirtied before work->older_than_this) dirty inodes from 1133 * @delaying_queue to @dispatch_queue. 1134 */ 1135 static int move_expired_inodes(struct list_head *delaying_queue, 1136 struct list_head *dispatch_queue, 1137 int flags, 1138 struct wb_writeback_work *work) 1139 { 1140 unsigned long *older_than_this = NULL; 1141 unsigned long expire_time; 1142 LIST_HEAD(tmp); 1143 struct list_head *pos, *node; 1144 struct super_block *sb = NULL; 1145 struct inode *inode; 1146 int do_sb_sort = 0; 1147 int moved = 0; 1148 1149 if ((flags & EXPIRE_DIRTY_ATIME) == 0) 1150 older_than_this = work->older_than_this; 1151 else if (!work->for_sync) { 1152 expire_time = jiffies - (dirtytime_expire_interval * HZ); 1153 older_than_this = &expire_time; 1154 } 1155 while (!list_empty(delaying_queue)) { 1156 inode = wb_inode(delaying_queue->prev); 1157 if (older_than_this && 1158 inode_dirtied_after(inode, *older_than_this)) 1159 break; 1160 list_move(&inode->i_io_list, &tmp); 1161 moved++; 1162 if (flags & EXPIRE_DIRTY_ATIME) 1163 set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); 1164 if (sb_is_blkdev_sb(inode->i_sb)) 1165 continue; 1166 if (sb && sb != inode->i_sb) 1167 do_sb_sort = 1; 1168 sb = inode->i_sb; 1169 } 1170 1171 /* just one sb in list, splice to dispatch_queue and we're done */ 1172 if (!do_sb_sort) { 1173 list_splice(&tmp, dispatch_queue); 1174 goto out; 1175 } 1176 1177 /* Move inodes from one superblock together */ 1178 while (!list_empty(&tmp)) { 1179 sb = wb_inode(tmp.prev)->i_sb; 1180 list_for_each_prev_safe(pos, node, &tmp) { 1181 inode = wb_inode(pos); 1182 if (inode->i_sb == sb) 1183 list_move(&inode->i_io_list, dispatch_queue); 1184 } 1185 } 1186 out: 1187 return moved; 1188 } 1189 1190 /* 1191 * Queue all expired dirty inodes for io, eldest first. 1192 * Before 1193 * newly dirtied b_dirty b_io b_more_io 1194 * =============> gf edc BA 1195 * After 1196 * newly dirtied b_dirty b_io b_more_io 1197 * =============> g fBAedc 1198 * | 1199 * +--> dequeue for IO 1200 */ 1201 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 1202 { 1203 int moved; 1204 1205 assert_spin_locked(&wb->list_lock); 1206 list_splice_init(&wb->b_more_io, &wb->b_io); 1207 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); 1208 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, 1209 EXPIRE_DIRTY_ATIME, work); 1210 if (moved) 1211 wb_io_lists_populated(wb); 1212 trace_writeback_queue_io(wb, work, moved); 1213 } 1214 1215 static int write_inode(struct inode *inode, struct writeback_control *wbc) 1216 { 1217 int ret; 1218 1219 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 1220 trace_writeback_write_inode_start(inode, wbc); 1221 ret = inode->i_sb->s_op->write_inode(inode, wbc); 1222 trace_writeback_write_inode(inode, wbc); 1223 return ret; 1224 } 1225 return 0; 1226 } 1227 1228 /* 1229 * Wait for writeback on an inode to complete. Called with i_lock held. 1230 * Caller must make sure inode cannot go away when we drop i_lock. 1231 */ 1232 static void __inode_wait_for_writeback(struct inode *inode) 1233 __releases(inode->i_lock) 1234 __acquires(inode->i_lock) 1235 { 1236 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 1237 wait_queue_head_t *wqh; 1238 1239 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1240 while (inode->i_state & I_SYNC) { 1241 spin_unlock(&inode->i_lock); 1242 __wait_on_bit(wqh, &wq, bit_wait, 1243 TASK_UNINTERRUPTIBLE); 1244 spin_lock(&inode->i_lock); 1245 } 1246 } 1247 1248 /* 1249 * Wait for writeback on an inode to complete. Caller must have inode pinned. 1250 */ 1251 void inode_wait_for_writeback(struct inode *inode) 1252 { 1253 spin_lock(&inode->i_lock); 1254 __inode_wait_for_writeback(inode); 1255 spin_unlock(&inode->i_lock); 1256 } 1257 1258 /* 1259 * Sleep until I_SYNC is cleared. This function must be called with i_lock 1260 * held and drops it. It is aimed for callers not holding any inode reference 1261 * so once i_lock is dropped, inode can go away. 1262 */ 1263 static void inode_sleep_on_writeback(struct inode *inode) 1264 __releases(inode->i_lock) 1265 { 1266 DEFINE_WAIT(wait); 1267 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1268 int sleep; 1269 1270 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 1271 sleep = inode->i_state & I_SYNC; 1272 spin_unlock(&inode->i_lock); 1273 if (sleep) 1274 schedule(); 1275 finish_wait(wqh, &wait); 1276 } 1277 1278 /* 1279 * Find proper writeback list for the inode depending on its current state and 1280 * possibly also change of its state while we were doing writeback. Here we 1281 * handle things such as livelock prevention or fairness of writeback among 1282 * inodes. This function can be called only by flusher thread - noone else 1283 * processes all inodes in writeback lists and requeueing inodes behind flusher 1284 * thread's back can have unexpected consequences. 1285 */ 1286 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 1287 struct writeback_control *wbc) 1288 { 1289 if (inode->i_state & I_FREEING) 1290 return; 1291 1292 /* 1293 * Sync livelock prevention. Each inode is tagged and synced in one 1294 * shot. If still dirty, it will be redirty_tail()'ed below. Update 1295 * the dirty time to prevent enqueue and sync it again. 1296 */ 1297 if ((inode->i_state & I_DIRTY) && 1298 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 1299 inode->dirtied_when = jiffies; 1300 1301 if (wbc->pages_skipped) { 1302 /* 1303 * writeback is not making progress due to locked 1304 * buffers. Skip this inode for now. 1305 */ 1306 redirty_tail(inode, wb); 1307 return; 1308 } 1309 1310 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 1311 /* 1312 * We didn't write back all the pages. nfs_writepages() 1313 * sometimes bales out without doing anything. 1314 */ 1315 if (wbc->nr_to_write <= 0) { 1316 /* Slice used up. Queue for next turn. */ 1317 requeue_io(inode, wb); 1318 } else { 1319 /* 1320 * Writeback blocked by something other than 1321 * congestion. Delay the inode for some time to 1322 * avoid spinning on the CPU (100% iowait) 1323 * retrying writeback of the dirty page/inode 1324 * that cannot be performed immediately. 1325 */ 1326 redirty_tail(inode, wb); 1327 } 1328 } else if (inode->i_state & I_DIRTY) { 1329 /* 1330 * Filesystems can dirty the inode during writeback operations, 1331 * such as delayed allocation during submission or metadata 1332 * updates after data IO completion. 1333 */ 1334 redirty_tail(inode, wb); 1335 } else if (inode->i_state & I_DIRTY_TIME) { 1336 inode->dirtied_when = jiffies; 1337 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); 1338 } else { 1339 /* The inode is clean. Remove from writeback lists. */ 1340 inode_io_list_del_locked(inode, wb); 1341 } 1342 } 1343 1344 /* 1345 * Write out an inode and its dirty pages. Do not update the writeback list 1346 * linkage. That is left to the caller. The caller is also responsible for 1347 * setting I_SYNC flag and calling inode_sync_complete() to clear it. 1348 */ 1349 static int 1350 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 1351 { 1352 struct address_space *mapping = inode->i_mapping; 1353 long nr_to_write = wbc->nr_to_write; 1354 unsigned dirty; 1355 int ret; 1356 1357 WARN_ON(!(inode->i_state & I_SYNC)); 1358 1359 trace_writeback_single_inode_start(inode, wbc, nr_to_write); 1360 1361 ret = do_writepages(mapping, wbc); 1362 1363 /* 1364 * Make sure to wait on the data before writing out the metadata. 1365 * This is important for filesystems that modify metadata on data 1366 * I/O completion. We don't do it for sync(2) writeback because it has a 1367 * separate, external IO completion path and ->sync_fs for guaranteeing 1368 * inode metadata is written back correctly. 1369 */ 1370 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { 1371 int err = filemap_fdatawait(mapping); 1372 if (ret == 0) 1373 ret = err; 1374 } 1375 1376 /* 1377 * Some filesystems may redirty the inode during the writeback 1378 * due to delalloc, clear dirty metadata flags right before 1379 * write_inode() 1380 */ 1381 spin_lock(&inode->i_lock); 1382 1383 dirty = inode->i_state & I_DIRTY; 1384 if (inode->i_state & I_DIRTY_TIME) { 1385 if ((dirty & I_DIRTY_INODE) || 1386 wbc->sync_mode == WB_SYNC_ALL || 1387 unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || 1388 unlikely(time_after(jiffies, 1389 (inode->dirtied_time_when + 1390 dirtytime_expire_interval * HZ)))) { 1391 dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; 1392 trace_writeback_lazytime(inode); 1393 } 1394 } else 1395 inode->i_state &= ~I_DIRTY_TIME_EXPIRED; 1396 inode->i_state &= ~dirty; 1397 1398 /* 1399 * Paired with smp_mb() in __mark_inode_dirty(). This allows 1400 * __mark_inode_dirty() to test i_state without grabbing i_lock - 1401 * either they see the I_DIRTY bits cleared or we see the dirtied 1402 * inode. 1403 * 1404 * I_DIRTY_PAGES is always cleared together above even if @mapping 1405 * still has dirty pages. The flag is reinstated after smp_mb() if 1406 * necessary. This guarantees that either __mark_inode_dirty() 1407 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. 1408 */ 1409 smp_mb(); 1410 1411 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 1412 inode->i_state |= I_DIRTY_PAGES; 1413 1414 spin_unlock(&inode->i_lock); 1415 1416 if (dirty & I_DIRTY_TIME) 1417 mark_inode_dirty_sync(inode); 1418 /* Don't write the inode if only I_DIRTY_PAGES was set */ 1419 if (dirty & ~I_DIRTY_PAGES) { 1420 int err = write_inode(inode, wbc); 1421 if (ret == 0) 1422 ret = err; 1423 } 1424 trace_writeback_single_inode(inode, wbc, nr_to_write); 1425 return ret; 1426 } 1427 1428 /* 1429 * Write out an inode's dirty pages. Either the caller has an active reference 1430 * on the inode or the inode has I_WILL_FREE set. 1431 * 1432 * This function is designed to be called for writing back one inode which 1433 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() 1434 * and does more profound writeback list handling in writeback_sb_inodes(). 1435 */ 1436 static int writeback_single_inode(struct inode *inode, 1437 struct writeback_control *wbc) 1438 { 1439 struct bdi_writeback *wb; 1440 int ret = 0; 1441 1442 spin_lock(&inode->i_lock); 1443 if (!atomic_read(&inode->i_count)) 1444 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 1445 else 1446 WARN_ON(inode->i_state & I_WILL_FREE); 1447 1448 if (inode->i_state & I_SYNC) { 1449 if (wbc->sync_mode != WB_SYNC_ALL) 1450 goto out; 1451 /* 1452 * It's a data-integrity sync. We must wait. Since callers hold 1453 * inode reference or inode has I_WILL_FREE set, it cannot go 1454 * away under us. 1455 */ 1456 __inode_wait_for_writeback(inode); 1457 } 1458 WARN_ON(inode->i_state & I_SYNC); 1459 /* 1460 * Skip inode if it is clean and we have no outstanding writeback in 1461 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this 1462 * function since flusher thread may be doing for example sync in 1463 * parallel and if we move the inode, it could get skipped. So here we 1464 * make sure inode is on some writeback list and leave it there unless 1465 * we have completely cleaned the inode. 1466 */ 1467 if (!(inode->i_state & I_DIRTY_ALL) && 1468 (wbc->sync_mode != WB_SYNC_ALL || 1469 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 1470 goto out; 1471 inode->i_state |= I_SYNC; 1472 wbc_attach_and_unlock_inode(wbc, inode); 1473 1474 ret = __writeback_single_inode(inode, wbc); 1475 1476 wbc_detach_inode(wbc); 1477 1478 wb = inode_to_wb_and_lock_list(inode); 1479 spin_lock(&inode->i_lock); 1480 /* 1481 * If inode is clean, remove it from writeback lists. Otherwise don't 1482 * touch it. See comment above for explanation. 1483 */ 1484 if (!(inode->i_state & I_DIRTY_ALL)) 1485 inode_io_list_del_locked(inode, wb); 1486 spin_unlock(&wb->list_lock); 1487 inode_sync_complete(inode); 1488 out: 1489 spin_unlock(&inode->i_lock); 1490 return ret; 1491 } 1492 1493 static long writeback_chunk_size(struct bdi_writeback *wb, 1494 struct wb_writeback_work *work) 1495 { 1496 long pages; 1497 1498 /* 1499 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 1500 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 1501 * here avoids calling into writeback_inodes_wb() more than once. 1502 * 1503 * The intended call sequence for WB_SYNC_ALL writeback is: 1504 * 1505 * wb_writeback() 1506 * writeback_sb_inodes() <== called only once 1507 * write_cache_pages() <== called once for each inode 1508 * (quickly) tag currently dirty pages 1509 * (maybe slowly) sync all tagged pages 1510 */ 1511 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 1512 pages = LONG_MAX; 1513 else { 1514 pages = min(wb->avg_write_bandwidth / 2, 1515 global_wb_domain.dirty_limit / DIRTY_SCOPE); 1516 pages = min(pages, work->nr_pages); 1517 pages = round_down(pages + MIN_WRITEBACK_PAGES, 1518 MIN_WRITEBACK_PAGES); 1519 } 1520 1521 return pages; 1522 } 1523 1524 /* 1525 * Write a portion of b_io inodes which belong to @sb. 1526 * 1527 * Return the number of pages and/or inodes written. 1528 * 1529 * NOTE! This is called with wb->list_lock held, and will 1530 * unlock and relock that for each inode it ends up doing 1531 * IO for. 1532 */ 1533 static long writeback_sb_inodes(struct super_block *sb, 1534 struct bdi_writeback *wb, 1535 struct wb_writeback_work *work) 1536 { 1537 struct writeback_control wbc = { 1538 .sync_mode = work->sync_mode, 1539 .tagged_writepages = work->tagged_writepages, 1540 .for_kupdate = work->for_kupdate, 1541 .for_background = work->for_background, 1542 .for_sync = work->for_sync, 1543 .range_cyclic = work->range_cyclic, 1544 .range_start = 0, 1545 .range_end = LLONG_MAX, 1546 }; 1547 unsigned long start_time = jiffies; 1548 long write_chunk; 1549 long wrote = 0; /* count both pages and inodes */ 1550 1551 while (!list_empty(&wb->b_io)) { 1552 struct inode *inode = wb_inode(wb->b_io.prev); 1553 struct bdi_writeback *tmp_wb; 1554 1555 if (inode->i_sb != sb) { 1556 if (work->sb) { 1557 /* 1558 * We only want to write back data for this 1559 * superblock, move all inodes not belonging 1560 * to it back onto the dirty list. 1561 */ 1562 redirty_tail(inode, wb); 1563 continue; 1564 } 1565 1566 /* 1567 * The inode belongs to a different superblock. 1568 * Bounce back to the caller to unpin this and 1569 * pin the next superblock. 1570 */ 1571 break; 1572 } 1573 1574 /* 1575 * Don't bother with new inodes or inodes being freed, first 1576 * kind does not need periodic writeout yet, and for the latter 1577 * kind writeout is handled by the freer. 1578 */ 1579 spin_lock(&inode->i_lock); 1580 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 1581 spin_unlock(&inode->i_lock); 1582 redirty_tail(inode, wb); 1583 continue; 1584 } 1585 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 1586 /* 1587 * If this inode is locked for writeback and we are not 1588 * doing writeback-for-data-integrity, move it to 1589 * b_more_io so that writeback can proceed with the 1590 * other inodes on s_io. 1591 * 1592 * We'll have another go at writing back this inode 1593 * when we completed a full scan of b_io. 1594 */ 1595 spin_unlock(&inode->i_lock); 1596 requeue_io(inode, wb); 1597 trace_writeback_sb_inodes_requeue(inode); 1598 continue; 1599 } 1600 spin_unlock(&wb->list_lock); 1601 1602 /* 1603 * We already requeued the inode if it had I_SYNC set and we 1604 * are doing WB_SYNC_NONE writeback. So this catches only the 1605 * WB_SYNC_ALL case. 1606 */ 1607 if (inode->i_state & I_SYNC) { 1608 /* Wait for I_SYNC. This function drops i_lock... */ 1609 inode_sleep_on_writeback(inode); 1610 /* Inode may be gone, start again */ 1611 spin_lock(&wb->list_lock); 1612 continue; 1613 } 1614 inode->i_state |= I_SYNC; 1615 wbc_attach_and_unlock_inode(&wbc, inode); 1616 1617 write_chunk = writeback_chunk_size(wb, work); 1618 wbc.nr_to_write = write_chunk; 1619 wbc.pages_skipped = 0; 1620 1621 /* 1622 * We use I_SYNC to pin the inode in memory. While it is set 1623 * evict_inode() will wait so the inode cannot be freed. 1624 */ 1625 __writeback_single_inode(inode, &wbc); 1626 1627 wbc_detach_inode(&wbc); 1628 work->nr_pages -= write_chunk - wbc.nr_to_write; 1629 wrote += write_chunk - wbc.nr_to_write; 1630 1631 if (need_resched()) { 1632 /* 1633 * We're trying to balance between building up a nice 1634 * long list of IOs to improve our merge rate, and 1635 * getting those IOs out quickly for anyone throttling 1636 * in balance_dirty_pages(). cond_resched() doesn't 1637 * unplug, so get our IOs out the door before we 1638 * give up the CPU. 1639 */ 1640 blk_flush_plug(current); 1641 cond_resched(); 1642 } 1643 1644 /* 1645 * Requeue @inode if still dirty. Be careful as @inode may 1646 * have been switched to another wb in the meantime. 1647 */ 1648 tmp_wb = inode_to_wb_and_lock_list(inode); 1649 spin_lock(&inode->i_lock); 1650 if (!(inode->i_state & I_DIRTY_ALL)) 1651 wrote++; 1652 requeue_inode(inode, tmp_wb, &wbc); 1653 inode_sync_complete(inode); 1654 spin_unlock(&inode->i_lock); 1655 1656 if (unlikely(tmp_wb != wb)) { 1657 spin_unlock(&tmp_wb->list_lock); 1658 spin_lock(&wb->list_lock); 1659 } 1660 1661 /* 1662 * bail out to wb_writeback() often enough to check 1663 * background threshold and other termination conditions. 1664 */ 1665 if (wrote) { 1666 if (time_is_before_jiffies(start_time + HZ / 10UL)) 1667 break; 1668 if (work->nr_pages <= 0) 1669 break; 1670 } 1671 } 1672 return wrote; 1673 } 1674 1675 static long __writeback_inodes_wb(struct bdi_writeback *wb, 1676 struct wb_writeback_work *work) 1677 { 1678 unsigned long start_time = jiffies; 1679 long wrote = 0; 1680 1681 while (!list_empty(&wb->b_io)) { 1682 struct inode *inode = wb_inode(wb->b_io.prev); 1683 struct super_block *sb = inode->i_sb; 1684 1685 if (!trylock_super(sb)) { 1686 /* 1687 * trylock_super() may fail consistently due to 1688 * s_umount being grabbed by someone else. Don't use 1689 * requeue_io() to avoid busy retrying the inode/sb. 1690 */ 1691 redirty_tail(inode, wb); 1692 continue; 1693 } 1694 wrote += writeback_sb_inodes(sb, wb, work); 1695 up_read(&sb->s_umount); 1696 1697 /* refer to the same tests at the end of writeback_sb_inodes */ 1698 if (wrote) { 1699 if (time_is_before_jiffies(start_time + HZ / 10UL)) 1700 break; 1701 if (work->nr_pages <= 0) 1702 break; 1703 } 1704 } 1705 /* Leave any unwritten inodes on b_io */ 1706 return wrote; 1707 } 1708 1709 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 1710 enum wb_reason reason) 1711 { 1712 struct wb_writeback_work work = { 1713 .nr_pages = nr_pages, 1714 .sync_mode = WB_SYNC_NONE, 1715 .range_cyclic = 1, 1716 .reason = reason, 1717 }; 1718 struct blk_plug plug; 1719 1720 blk_start_plug(&plug); 1721 spin_lock(&wb->list_lock); 1722 if (list_empty(&wb->b_io)) 1723 queue_io(wb, &work); 1724 __writeback_inodes_wb(wb, &work); 1725 spin_unlock(&wb->list_lock); 1726 blk_finish_plug(&plug); 1727 1728 return nr_pages - work.nr_pages; 1729 } 1730 1731 /* 1732 * Explicit flushing or periodic writeback of "old" data. 1733 * 1734 * Define "old": the first time one of an inode's pages is dirtied, we mark the 1735 * dirtying-time in the inode's address_space. So this periodic writeback code 1736 * just walks the superblock inode list, writing back any inodes which are 1737 * older than a specific point in time. 1738 * 1739 * Try to run once per dirty_writeback_interval. But if a writeback event 1740 * takes longer than a dirty_writeback_interval interval, then leave a 1741 * one-second gap. 1742 * 1743 * older_than_this takes precedence over nr_to_write. So we'll only write back 1744 * all dirty pages if they are all attached to "old" mappings. 1745 */ 1746 static long wb_writeback(struct bdi_writeback *wb, 1747 struct wb_writeback_work *work) 1748 { 1749 unsigned long wb_start = jiffies; 1750 long nr_pages = work->nr_pages; 1751 unsigned long oldest_jif; 1752 struct inode *inode; 1753 long progress; 1754 struct blk_plug plug; 1755 1756 oldest_jif = jiffies; 1757 work->older_than_this = &oldest_jif; 1758 1759 blk_start_plug(&plug); 1760 spin_lock(&wb->list_lock); 1761 for (;;) { 1762 /* 1763 * Stop writeback when nr_pages has been consumed 1764 */ 1765 if (work->nr_pages <= 0) 1766 break; 1767 1768 /* 1769 * Background writeout and kupdate-style writeback may 1770 * run forever. Stop them if there is other work to do 1771 * so that e.g. sync can proceed. They'll be restarted 1772 * after the other works are all done. 1773 */ 1774 if ((work->for_background || work->for_kupdate) && 1775 !list_empty(&wb->work_list)) 1776 break; 1777 1778 /* 1779 * For background writeout, stop when we are below the 1780 * background dirty threshold 1781 */ 1782 if (work->for_background && !wb_over_bg_thresh(wb)) 1783 break; 1784 1785 /* 1786 * Kupdate and background works are special and we want to 1787 * include all inodes that need writing. Livelock avoidance is 1788 * handled by these works yielding to any other work so we are 1789 * safe. 1790 */ 1791 if (work->for_kupdate) { 1792 oldest_jif = jiffies - 1793 msecs_to_jiffies(dirty_expire_interval * 10); 1794 } else if (work->for_background) 1795 oldest_jif = jiffies; 1796 1797 trace_writeback_start(wb, work); 1798 if (list_empty(&wb->b_io)) 1799 queue_io(wb, work); 1800 if (work->sb) 1801 progress = writeback_sb_inodes(work->sb, wb, work); 1802 else 1803 progress = __writeback_inodes_wb(wb, work); 1804 trace_writeback_written(wb, work); 1805 1806 wb_update_bandwidth(wb, wb_start); 1807 1808 /* 1809 * Did we write something? Try for more 1810 * 1811 * Dirty inodes are moved to b_io for writeback in batches. 1812 * The completion of the current batch does not necessarily 1813 * mean the overall work is done. So we keep looping as long 1814 * as made some progress on cleaning pages or inodes. 1815 */ 1816 if (progress) 1817 continue; 1818 /* 1819 * No more inodes for IO, bail 1820 */ 1821 if (list_empty(&wb->b_more_io)) 1822 break; 1823 /* 1824 * Nothing written. Wait for some inode to 1825 * become available for writeback. Otherwise 1826 * we'll just busyloop. 1827 */ 1828 trace_writeback_wait(wb, work); 1829 inode = wb_inode(wb->b_more_io.prev); 1830 spin_lock(&inode->i_lock); 1831 spin_unlock(&wb->list_lock); 1832 /* This function drops i_lock... */ 1833 inode_sleep_on_writeback(inode); 1834 spin_lock(&wb->list_lock); 1835 } 1836 spin_unlock(&wb->list_lock); 1837 blk_finish_plug(&plug); 1838 1839 return nr_pages - work->nr_pages; 1840 } 1841 1842 /* 1843 * Return the next wb_writeback_work struct that hasn't been processed yet. 1844 */ 1845 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) 1846 { 1847 struct wb_writeback_work *work = NULL; 1848 1849 spin_lock_bh(&wb->work_lock); 1850 if (!list_empty(&wb->work_list)) { 1851 work = list_entry(wb->work_list.next, 1852 struct wb_writeback_work, list); 1853 list_del_init(&work->list); 1854 } 1855 spin_unlock_bh(&wb->work_lock); 1856 return work; 1857 } 1858 1859 static long wb_check_background_flush(struct bdi_writeback *wb) 1860 { 1861 if (wb_over_bg_thresh(wb)) { 1862 1863 struct wb_writeback_work work = { 1864 .nr_pages = LONG_MAX, 1865 .sync_mode = WB_SYNC_NONE, 1866 .for_background = 1, 1867 .range_cyclic = 1, 1868 .reason = WB_REASON_BACKGROUND, 1869 }; 1870 1871 return wb_writeback(wb, &work); 1872 } 1873 1874 return 0; 1875 } 1876 1877 static long wb_check_old_data_flush(struct bdi_writeback *wb) 1878 { 1879 unsigned long expired; 1880 long nr_pages; 1881 1882 /* 1883 * When set to zero, disable periodic writeback 1884 */ 1885 if (!dirty_writeback_interval) 1886 return 0; 1887 1888 expired = wb->last_old_flush + 1889 msecs_to_jiffies(dirty_writeback_interval * 10); 1890 if (time_before(jiffies, expired)) 1891 return 0; 1892 1893 wb->last_old_flush = jiffies; 1894 nr_pages = get_nr_dirty_pages(); 1895 1896 if (nr_pages) { 1897 struct wb_writeback_work work = { 1898 .nr_pages = nr_pages, 1899 .sync_mode = WB_SYNC_NONE, 1900 .for_kupdate = 1, 1901 .range_cyclic = 1, 1902 .reason = WB_REASON_PERIODIC, 1903 }; 1904 1905 return wb_writeback(wb, &work); 1906 } 1907 1908 return 0; 1909 } 1910 1911 static long wb_check_start_all(struct bdi_writeback *wb) 1912 { 1913 long nr_pages; 1914 1915 if (!test_bit(WB_start_all, &wb->state)) 1916 return 0; 1917 1918 nr_pages = get_nr_dirty_pages(); 1919 if (nr_pages) { 1920 struct wb_writeback_work work = { 1921 .nr_pages = wb_split_bdi_pages(wb, nr_pages), 1922 .sync_mode = WB_SYNC_NONE, 1923 .range_cyclic = 1, 1924 .reason = wb->start_all_reason, 1925 }; 1926 1927 nr_pages = wb_writeback(wb, &work); 1928 } 1929 1930 clear_bit(WB_start_all, &wb->state); 1931 return nr_pages; 1932 } 1933 1934 1935 /* 1936 * Retrieve work items and do the writeback they describe 1937 */ 1938 static long wb_do_writeback(struct bdi_writeback *wb) 1939 { 1940 struct wb_writeback_work *work; 1941 long wrote = 0; 1942 1943 set_bit(WB_writeback_running, &wb->state); 1944 while ((work = get_next_work_item(wb)) != NULL) { 1945 trace_writeback_exec(wb, work); 1946 wrote += wb_writeback(wb, work); 1947 finish_writeback_work(wb, work); 1948 } 1949 1950 /* 1951 * Check for a flush-everything request 1952 */ 1953 wrote += wb_check_start_all(wb); 1954 1955 /* 1956 * Check for periodic writeback, kupdated() style 1957 */ 1958 wrote += wb_check_old_data_flush(wb); 1959 wrote += wb_check_background_flush(wb); 1960 clear_bit(WB_writeback_running, &wb->state); 1961 1962 return wrote; 1963 } 1964 1965 /* 1966 * Handle writeback of dirty data for the device backed by this bdi. Also 1967 * reschedules periodically and does kupdated style flushing. 1968 */ 1969 void wb_workfn(struct work_struct *work) 1970 { 1971 struct bdi_writeback *wb = container_of(to_delayed_work(work), 1972 struct bdi_writeback, dwork); 1973 long pages_written; 1974 1975 set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); 1976 current->flags |= PF_SWAPWRITE; 1977 1978 if (likely(!current_is_workqueue_rescuer() || 1979 !test_bit(WB_registered, &wb->state))) { 1980 /* 1981 * The normal path. Keep writing back @wb until its 1982 * work_list is empty. Note that this path is also taken 1983 * if @wb is shutting down even when we're running off the 1984 * rescuer as work_list needs to be drained. 1985 */ 1986 do { 1987 pages_written = wb_do_writeback(wb); 1988 trace_writeback_pages_written(pages_written); 1989 } while (!list_empty(&wb->work_list)); 1990 } else { 1991 /* 1992 * bdi_wq can't get enough workers and we're running off 1993 * the emergency worker. Don't hog it. Hopefully, 1024 is 1994 * enough for efficient IO. 1995 */ 1996 pages_written = writeback_inodes_wb(wb, 1024, 1997 WB_REASON_FORKER_THREAD); 1998 trace_writeback_pages_written(pages_written); 1999 } 2000 2001 if (!list_empty(&wb->work_list)) 2002 wb_wakeup(wb); 2003 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 2004 wb_wakeup_delayed(wb); 2005 2006 current->flags &= ~PF_SWAPWRITE; 2007 } 2008 2009 /* 2010 * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero, 2011 * write back the whole world. 2012 */ 2013 static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, 2014 enum wb_reason reason) 2015 { 2016 struct bdi_writeback *wb; 2017 2018 if (!bdi_has_dirty_io(bdi)) 2019 return; 2020 2021 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 2022 wb_start_writeback(wb, reason); 2023 } 2024 2025 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, 2026 enum wb_reason reason) 2027 { 2028 rcu_read_lock(); 2029 __wakeup_flusher_threads_bdi(bdi, reason); 2030 rcu_read_unlock(); 2031 } 2032 2033 /* 2034 * Wakeup the flusher threads to start writeback of all currently dirty pages 2035 */ 2036 void wakeup_flusher_threads(enum wb_reason reason) 2037 { 2038 struct backing_dev_info *bdi; 2039 2040 /* 2041 * If we are expecting writeback progress we must submit plugged IO. 2042 */ 2043 if (blk_needs_flush_plug(current)) 2044 blk_schedule_flush_plug(current); 2045 2046 rcu_read_lock(); 2047 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) 2048 __wakeup_flusher_threads_bdi(bdi, reason); 2049 rcu_read_unlock(); 2050 } 2051 2052 /* 2053 * Wake up bdi's periodically to make sure dirtytime inodes gets 2054 * written back periodically. We deliberately do *not* check the 2055 * b_dirtytime list in wb_has_dirty_io(), since this would cause the 2056 * kernel to be constantly waking up once there are any dirtytime 2057 * inodes on the system. So instead we define a separate delayed work 2058 * function which gets called much more rarely. (By default, only 2059 * once every 12 hours.) 2060 * 2061 * If there is any other write activity going on in the file system, 2062 * this function won't be necessary. But if the only thing that has 2063 * happened on the file system is a dirtytime inode caused by an atime 2064 * update, we need this infrastructure below to make sure that inode 2065 * eventually gets pushed out to disk. 2066 */ 2067 static void wakeup_dirtytime_writeback(struct work_struct *w); 2068 static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); 2069 2070 static void wakeup_dirtytime_writeback(struct work_struct *w) 2071 { 2072 struct backing_dev_info *bdi; 2073 2074 rcu_read_lock(); 2075 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 2076 struct bdi_writeback *wb; 2077 2078 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 2079 if (!list_empty(&wb->b_dirty_time)) 2080 wb_wakeup(wb); 2081 } 2082 rcu_read_unlock(); 2083 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 2084 } 2085 2086 static int __init start_dirtytime_writeback(void) 2087 { 2088 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 2089 return 0; 2090 } 2091 __initcall(start_dirtytime_writeback); 2092 2093 int dirtytime_interval_handler(struct ctl_table *table, int write, 2094 void __user *buffer, size_t *lenp, loff_t *ppos) 2095 { 2096 int ret; 2097 2098 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 2099 if (ret == 0 && write) 2100 mod_delayed_work(system_wq, &dirtytime_work, 0); 2101 return ret; 2102 } 2103 2104 static noinline void block_dump___mark_inode_dirty(struct inode *inode) 2105 { 2106 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 2107 struct dentry *dentry; 2108 const char *name = "?"; 2109 2110 dentry = d_find_alias(inode); 2111 if (dentry) { 2112 spin_lock(&dentry->d_lock); 2113 name = (const char *) dentry->d_name.name; 2114 } 2115 printk(KERN_DEBUG 2116 "%s(%d): dirtied inode %lu (%s) on %s\n", 2117 current->comm, task_pid_nr(current), inode->i_ino, 2118 name, inode->i_sb->s_id); 2119 if (dentry) { 2120 spin_unlock(&dentry->d_lock); 2121 dput(dentry); 2122 } 2123 } 2124 } 2125 2126 /** 2127 * __mark_inode_dirty - internal function 2128 * 2129 * @inode: inode to mark 2130 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 2131 * 2132 * Mark an inode as dirty. Callers should use mark_inode_dirty or 2133 * mark_inode_dirty_sync. 2134 * 2135 * Put the inode on the super block's dirty list. 2136 * 2137 * CAREFUL! We mark it dirty unconditionally, but move it onto the 2138 * dirty list only if it is hashed or if it refers to a blockdev. 2139 * If it was not hashed, it will never be added to the dirty list 2140 * even if it is later hashed, as it will have been marked dirty already. 2141 * 2142 * In short, make sure you hash any inodes _before_ you start marking 2143 * them dirty. 2144 * 2145 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 2146 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 2147 * the kernel-internal blockdev inode represents the dirtying time of the 2148 * blockdev's pages. This is why for I_DIRTY_PAGES we always use 2149 * page->mapping->host, so the page-dirtying time is recorded in the internal 2150 * blockdev inode. 2151 */ 2152 void __mark_inode_dirty(struct inode *inode, int flags) 2153 { 2154 struct super_block *sb = inode->i_sb; 2155 int dirtytime; 2156 2157 trace_writeback_mark_inode_dirty(inode, flags); 2158 2159 /* 2160 * Don't do this for I_DIRTY_PAGES - that doesn't actually 2161 * dirty the inode itself 2162 */ 2163 if (flags & (I_DIRTY_INODE | I_DIRTY_TIME)) { 2164 trace_writeback_dirty_inode_start(inode, flags); 2165 2166 if (sb->s_op->dirty_inode) 2167 sb->s_op->dirty_inode(inode, flags); 2168 2169 trace_writeback_dirty_inode(inode, flags); 2170 } 2171 if (flags & I_DIRTY_INODE) 2172 flags &= ~I_DIRTY_TIME; 2173 dirtytime = flags & I_DIRTY_TIME; 2174 2175 /* 2176 * Paired with smp_mb() in __writeback_single_inode() for the 2177 * following lockless i_state test. See there for details. 2178 */ 2179 smp_mb(); 2180 2181 if (((inode->i_state & flags) == flags) || 2182 (dirtytime && (inode->i_state & I_DIRTY_INODE))) 2183 return; 2184 2185 if (unlikely(block_dump)) 2186 block_dump___mark_inode_dirty(inode); 2187 2188 spin_lock(&inode->i_lock); 2189 if (dirtytime && (inode->i_state & I_DIRTY_INODE)) 2190 goto out_unlock_inode; 2191 if ((inode->i_state & flags) != flags) { 2192 const int was_dirty = inode->i_state & I_DIRTY; 2193 2194 inode_attach_wb(inode, NULL); 2195 2196 if (flags & I_DIRTY_INODE) 2197 inode->i_state &= ~I_DIRTY_TIME; 2198 inode->i_state |= flags; 2199 2200 /* 2201 * If the inode is being synced, just update its dirty state. 2202 * The unlocker will place the inode on the appropriate 2203 * superblock list, based upon its state. 2204 */ 2205 if (inode->i_state & I_SYNC) 2206 goto out_unlock_inode; 2207 2208 /* 2209 * Only add valid (hashed) inodes to the superblock's 2210 * dirty list. Add blockdev inodes as well. 2211 */ 2212 if (!S_ISBLK(inode->i_mode)) { 2213 if (inode_unhashed(inode)) 2214 goto out_unlock_inode; 2215 } 2216 if (inode->i_state & I_FREEING) 2217 goto out_unlock_inode; 2218 2219 /* 2220 * If the inode was already on b_dirty/b_io/b_more_io, don't 2221 * reposition it (that would break b_dirty time-ordering). 2222 */ 2223 if (!was_dirty) { 2224 struct bdi_writeback *wb; 2225 struct list_head *dirty_list; 2226 bool wakeup_bdi = false; 2227 2228 wb = locked_inode_to_wb_and_lock_list(inode); 2229 2230 WARN(bdi_cap_writeback_dirty(wb->bdi) && 2231 !test_bit(WB_registered, &wb->state), 2232 "bdi-%s not registered\n", wb->bdi->name); 2233 2234 inode->dirtied_when = jiffies; 2235 if (dirtytime) 2236 inode->dirtied_time_when = jiffies; 2237 2238 if (inode->i_state & I_DIRTY) 2239 dirty_list = &wb->b_dirty; 2240 else 2241 dirty_list = &wb->b_dirty_time; 2242 2243 wakeup_bdi = inode_io_list_move_locked(inode, wb, 2244 dirty_list); 2245 2246 spin_unlock(&wb->list_lock); 2247 trace_writeback_dirty_inode_enqueue(inode); 2248 2249 /* 2250 * If this is the first dirty inode for this bdi, 2251 * we have to wake-up the corresponding bdi thread 2252 * to make sure background write-back happens 2253 * later. 2254 */ 2255 if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi) 2256 wb_wakeup_delayed(wb); 2257 return; 2258 } 2259 } 2260 out_unlock_inode: 2261 spin_unlock(&inode->i_lock); 2262 } 2263 EXPORT_SYMBOL(__mark_inode_dirty); 2264 2265 /* 2266 * The @s_sync_lock is used to serialise concurrent sync operations 2267 * to avoid lock contention problems with concurrent wait_sb_inodes() calls. 2268 * Concurrent callers will block on the s_sync_lock rather than doing contending 2269 * walks. The queueing maintains sync(2) required behaviour as all the IO that 2270 * has been issued up to the time this function is enter is guaranteed to be 2271 * completed by the time we have gained the lock and waited for all IO that is 2272 * in progress regardless of the order callers are granted the lock. 2273 */ 2274 static void wait_sb_inodes(struct super_block *sb) 2275 { 2276 LIST_HEAD(sync_list); 2277 2278 /* 2279 * We need to be protected against the filesystem going from 2280 * r/o to r/w or vice versa. 2281 */ 2282 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2283 2284 mutex_lock(&sb->s_sync_lock); 2285 2286 /* 2287 * Splice the writeback list onto a temporary list to avoid waiting on 2288 * inodes that have started writeback after this point. 2289 * 2290 * Use rcu_read_lock() to keep the inodes around until we have a 2291 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as 2292 * the local list because inodes can be dropped from either by writeback 2293 * completion. 2294 */ 2295 rcu_read_lock(); 2296 spin_lock_irq(&sb->s_inode_wblist_lock); 2297 list_splice_init(&sb->s_inodes_wb, &sync_list); 2298 2299 /* 2300 * Data integrity sync. Must wait for all pages under writeback, because 2301 * there may have been pages dirtied before our sync call, but which had 2302 * writeout started before we write it out. In which case, the inode 2303 * may not be on the dirty list, but we still have to wait for that 2304 * writeout. 2305 */ 2306 while (!list_empty(&sync_list)) { 2307 struct inode *inode = list_first_entry(&sync_list, struct inode, 2308 i_wb_list); 2309 struct address_space *mapping = inode->i_mapping; 2310 2311 /* 2312 * Move each inode back to the wb list before we drop the lock 2313 * to preserve consistency between i_wb_list and the mapping 2314 * writeback tag. Writeback completion is responsible to remove 2315 * the inode from either list once the writeback tag is cleared. 2316 */ 2317 list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb); 2318 2319 /* 2320 * The mapping can appear untagged while still on-list since we 2321 * do not have the mapping lock. Skip it here, wb completion 2322 * will remove it. 2323 */ 2324 if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 2325 continue; 2326 2327 spin_unlock_irq(&sb->s_inode_wblist_lock); 2328 2329 spin_lock(&inode->i_lock); 2330 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { 2331 spin_unlock(&inode->i_lock); 2332 2333 spin_lock_irq(&sb->s_inode_wblist_lock); 2334 continue; 2335 } 2336 __iget(inode); 2337 spin_unlock(&inode->i_lock); 2338 rcu_read_unlock(); 2339 2340 /* 2341 * We keep the error status of individual mapping so that 2342 * applications can catch the writeback error using fsync(2). 2343 * See filemap_fdatawait_keep_errors() for details. 2344 */ 2345 filemap_fdatawait_keep_errors(mapping); 2346 2347 cond_resched(); 2348 2349 iput(inode); 2350 2351 rcu_read_lock(); 2352 spin_lock_irq(&sb->s_inode_wblist_lock); 2353 } 2354 spin_unlock_irq(&sb->s_inode_wblist_lock); 2355 rcu_read_unlock(); 2356 mutex_unlock(&sb->s_sync_lock); 2357 } 2358 2359 static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, 2360 enum wb_reason reason, bool skip_if_busy) 2361 { 2362 DEFINE_WB_COMPLETION_ONSTACK(done); 2363 struct wb_writeback_work work = { 2364 .sb = sb, 2365 .sync_mode = WB_SYNC_NONE, 2366 .tagged_writepages = 1, 2367 .done = &done, 2368 .nr_pages = nr, 2369 .reason = reason, 2370 }; 2371 struct backing_dev_info *bdi = sb->s_bdi; 2372 2373 if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) 2374 return; 2375 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2376 2377 bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); 2378 wb_wait_for_completion(bdi, &done); 2379 } 2380 2381 /** 2382 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 2383 * @sb: the superblock 2384 * @nr: the number of pages to write 2385 * @reason: reason why some writeback work initiated 2386 * 2387 * Start writeback on some inodes on this super_block. No guarantees are made 2388 * on how many (if any) will be written, and this function does not wait 2389 * for IO completion of submitted IO. 2390 */ 2391 void writeback_inodes_sb_nr(struct super_block *sb, 2392 unsigned long nr, 2393 enum wb_reason reason) 2394 { 2395 __writeback_inodes_sb_nr(sb, nr, reason, false); 2396 } 2397 EXPORT_SYMBOL(writeback_inodes_sb_nr); 2398 2399 /** 2400 * writeback_inodes_sb - writeback dirty inodes from given super_block 2401 * @sb: the superblock 2402 * @reason: reason why some writeback work was initiated 2403 * 2404 * Start writeback on some inodes on this super_block. No guarantees are made 2405 * on how many (if any) will be written, and this function does not wait 2406 * for IO completion of submitted IO. 2407 */ 2408 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 2409 { 2410 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 2411 } 2412 EXPORT_SYMBOL(writeback_inodes_sb); 2413 2414 /** 2415 * try_to_writeback_inodes_sb - try to start writeback if none underway 2416 * @sb: the superblock 2417 * @reason: reason why some writeback work was initiated 2418 * 2419 * Invoke __writeback_inodes_sb_nr if no writeback is currently underway. 2420 */ 2421 void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 2422 { 2423 if (!down_read_trylock(&sb->s_umount)) 2424 return; 2425 2426 __writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true); 2427 up_read(&sb->s_umount); 2428 } 2429 EXPORT_SYMBOL(try_to_writeback_inodes_sb); 2430 2431 /** 2432 * sync_inodes_sb - sync sb inode pages 2433 * @sb: the superblock 2434 * 2435 * This function writes and waits on any dirty inode belonging to this 2436 * super_block. 2437 */ 2438 void sync_inodes_sb(struct super_block *sb) 2439 { 2440 DEFINE_WB_COMPLETION_ONSTACK(done); 2441 struct wb_writeback_work work = { 2442 .sb = sb, 2443 .sync_mode = WB_SYNC_ALL, 2444 .nr_pages = LONG_MAX, 2445 .range_cyclic = 0, 2446 .done = &done, 2447 .reason = WB_REASON_SYNC, 2448 .for_sync = 1, 2449 }; 2450 struct backing_dev_info *bdi = sb->s_bdi; 2451 2452 /* 2453 * Can't skip on !bdi_has_dirty() because we should wait for !dirty 2454 * inodes under writeback and I_DIRTY_TIME inodes ignored by 2455 * bdi_has_dirty() need to be written out too. 2456 */ 2457 if (bdi == &noop_backing_dev_info) 2458 return; 2459 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2460 2461 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */ 2462 bdi_down_write_wb_switch_rwsem(bdi); 2463 bdi_split_work_to_wbs(bdi, &work, false); 2464 wb_wait_for_completion(bdi, &done); 2465 bdi_up_write_wb_switch_rwsem(bdi); 2466 2467 wait_sb_inodes(sb); 2468 } 2469 EXPORT_SYMBOL(sync_inodes_sb); 2470 2471 /** 2472 * write_inode_now - write an inode to disk 2473 * @inode: inode to write to disk 2474 * @sync: whether the write should be synchronous or not 2475 * 2476 * This function commits an inode to disk immediately if it is dirty. This is 2477 * primarily needed by knfsd. 2478 * 2479 * The caller must either have a ref on the inode or must have set I_WILL_FREE. 2480 */ 2481 int write_inode_now(struct inode *inode, int sync) 2482 { 2483 struct writeback_control wbc = { 2484 .nr_to_write = LONG_MAX, 2485 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 2486 .range_start = 0, 2487 .range_end = LLONG_MAX, 2488 }; 2489 2490 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 2491 wbc.nr_to_write = 0; 2492 2493 might_sleep(); 2494 return writeback_single_inode(inode, &wbc); 2495 } 2496 EXPORT_SYMBOL(write_inode_now); 2497 2498 /** 2499 * sync_inode - write an inode and its pages to disk. 2500 * @inode: the inode to sync 2501 * @wbc: controls the writeback mode 2502 * 2503 * sync_inode() will write an inode and its pages to disk. It will also 2504 * correctly update the inode on its superblock's dirty inode lists and will 2505 * update inode->i_state. 2506 * 2507 * The caller must have a ref on the inode. 2508 */ 2509 int sync_inode(struct inode *inode, struct writeback_control *wbc) 2510 { 2511 return writeback_single_inode(inode, wbc); 2512 } 2513 EXPORT_SYMBOL(sync_inode); 2514 2515 /** 2516 * sync_inode_metadata - write an inode to disk 2517 * @inode: the inode to sync 2518 * @wait: wait for I/O to complete. 2519 * 2520 * Write an inode to disk and adjust its dirty state after completion. 2521 * 2522 * Note: only writes the actual inode, no associated data or other metadata. 2523 */ 2524 int sync_inode_metadata(struct inode *inode, int wait) 2525 { 2526 struct writeback_control wbc = { 2527 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 2528 .nr_to_write = 0, /* metadata-only */ 2529 }; 2530 2531 return sync_inode(inode, &wbc); 2532 } 2533 EXPORT_SYMBOL(sync_inode_metadata); 2534