1 /* 2 * fs/fs-writeback.c 3 * 4 * Copyright (C) 2002, Linus Torvalds. 5 * 6 * Contains all the functions related to writing back and waiting 7 * upon dirty inodes against superblocks, and writing back dirty 8 * pages against inodes. ie: data writeback. Writeout of the 9 * inode itself is not handled here. 10 * 11 * 10Apr2002 Andrew Morton 12 * Split out of fs/inode.c 13 * Additions for address_space-based writeback 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/spinlock.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/fs.h> 22 #include <linux/mm.h> 23 #include <linux/pagemap.h> 24 #include <linux/kthread.h> 25 #include <linux/writeback.h> 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/tracepoint.h> 29 #include <linux/device.h> 30 #include <linux/memcontrol.h> 31 #include "internal.h" 32 33 /* 34 * 4MB minimal write chunk size 35 */ 36 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10)) 37 38 struct wb_completion { 39 atomic_t cnt; 40 }; 41 42 /* 43 * Passed into wb_writeback(), essentially a subset of writeback_control 44 */ 45 struct wb_writeback_work { 46 long nr_pages; 47 struct super_block *sb; 48 unsigned long *older_than_this; 49 enum writeback_sync_modes sync_mode; 50 unsigned int tagged_writepages:1; 51 unsigned int for_kupdate:1; 52 unsigned int range_cyclic:1; 53 unsigned int for_background:1; 54 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 55 unsigned int auto_free:1; /* free on completion */ 56 enum wb_reason reason; /* why was writeback initiated? */ 57 58 struct list_head list; /* pending work list */ 59 struct wb_completion *done; /* set if the caller waits */ 60 }; 61 62 /* 63 * If one wants to wait for one or more wb_writeback_works, each work's 64 * ->done should be set to a wb_completion defined using the following 65 * macro. Once all work items are issued with wb_queue_work(), the caller 66 * can wait for the completion of all using wb_wait_for_completion(). Work 67 * items which are waited upon aren't freed automatically on completion. 68 */ 69 #define DEFINE_WB_COMPLETION_ONSTACK(cmpl) \ 70 struct wb_completion cmpl = { \ 71 .cnt = ATOMIC_INIT(1), \ 72 } 73 74 75 /* 76 * If an inode is constantly having its pages dirtied, but then the 77 * updates stop dirtytime_expire_interval seconds in the past, it's 78 * possible for the worst case time between when an inode has its 79 * timestamps updated and when they finally get written out to be two 80 * dirtytime_expire_intervals. We set the default to 12 hours (in 81 * seconds), which means most of the time inodes will have their 82 * timestamps written to disk after 12 hours, but in the worst case a 83 * few inodes might not their timestamps updated for 24 hours. 84 */ 85 unsigned int dirtytime_expire_interval = 12 * 60 * 60; 86 87 static inline struct inode *wb_inode(struct list_head *head) 88 { 89 return list_entry(head, struct inode, i_io_list); 90 } 91 92 /* 93 * Include the creation of the trace points after defining the 94 * wb_writeback_work structure and inline functions so that the definition 95 * remains local to this file. 96 */ 97 #define CREATE_TRACE_POINTS 98 #include <trace/events/writeback.h> 99 100 EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage); 101 102 static bool wb_io_lists_populated(struct bdi_writeback *wb) 103 { 104 if (wb_has_dirty_io(wb)) { 105 return false; 106 } else { 107 set_bit(WB_has_dirty_io, &wb->state); 108 WARN_ON_ONCE(!wb->avg_write_bandwidth); 109 atomic_long_add(wb->avg_write_bandwidth, 110 &wb->bdi->tot_write_bandwidth); 111 return true; 112 } 113 } 114 115 static void wb_io_lists_depopulated(struct bdi_writeback *wb) 116 { 117 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && 118 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { 119 clear_bit(WB_has_dirty_io, &wb->state); 120 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, 121 &wb->bdi->tot_write_bandwidth) < 0); 122 } 123 } 124 125 /** 126 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list 127 * @inode: inode to be moved 128 * @wb: target bdi_writeback 129 * @head: one of @wb->b_{dirty|io|more_io} 130 * 131 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io. 132 * Returns %true if @inode is the first occupant of the !dirty_time IO 133 * lists; otherwise, %false. 134 */ 135 static bool inode_io_list_move_locked(struct inode *inode, 136 struct bdi_writeback *wb, 137 struct list_head *head) 138 { 139 assert_spin_locked(&wb->list_lock); 140 141 list_move(&inode->i_io_list, head); 142 143 /* dirty_time doesn't count as dirty_io until expiration */ 144 if (head != &wb->b_dirty_time) 145 return wb_io_lists_populated(wb); 146 147 wb_io_lists_depopulated(wb); 148 return false; 149 } 150 151 /** 152 * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list 153 * @inode: inode to be removed 154 * @wb: bdi_writeback @inode is being removed from 155 * 156 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and 157 * clear %WB_has_dirty_io if all are empty afterwards. 158 */ 159 static void inode_io_list_del_locked(struct inode *inode, 160 struct bdi_writeback *wb) 161 { 162 assert_spin_locked(&wb->list_lock); 163 164 list_del_init(&inode->i_io_list); 165 wb_io_lists_depopulated(wb); 166 } 167 168 static void wb_wakeup(struct bdi_writeback *wb) 169 { 170 spin_lock_bh(&wb->work_lock); 171 if (test_bit(WB_registered, &wb->state)) 172 mod_delayed_work(bdi_wq, &wb->dwork, 0); 173 spin_unlock_bh(&wb->work_lock); 174 } 175 176 static void wb_queue_work(struct bdi_writeback *wb, 177 struct wb_writeback_work *work) 178 { 179 trace_writeback_queue(wb, work); 180 181 spin_lock_bh(&wb->work_lock); 182 if (!test_bit(WB_registered, &wb->state)) 183 goto out_unlock; 184 if (work->done) 185 atomic_inc(&work->done->cnt); 186 list_add_tail(&work->list, &wb->work_list); 187 mod_delayed_work(bdi_wq, &wb->dwork, 0); 188 out_unlock: 189 spin_unlock_bh(&wb->work_lock); 190 } 191 192 /** 193 * wb_wait_for_completion - wait for completion of bdi_writeback_works 194 * @bdi: bdi work items were issued to 195 * @done: target wb_completion 196 * 197 * Wait for one or more work items issued to @bdi with their ->done field 198 * set to @done, which should have been defined with 199 * DEFINE_WB_COMPLETION_ONSTACK(). This function returns after all such 200 * work items are completed. Work items which are waited upon aren't freed 201 * automatically on completion. 202 */ 203 static void wb_wait_for_completion(struct backing_dev_info *bdi, 204 struct wb_completion *done) 205 { 206 atomic_dec(&done->cnt); /* put down the initial count */ 207 wait_event(bdi->wb_waitq, !atomic_read(&done->cnt)); 208 } 209 210 #ifdef CONFIG_CGROUP_WRITEBACK 211 212 /* parameters for foreign inode detection, see wb_detach_inode() */ 213 #define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */ 214 #define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */ 215 #define WB_FRN_TIME_CUT_DIV 2 /* ignore rounds < avg / 2 */ 216 #define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */ 217 218 #define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */ 219 #define WB_FRN_HIST_UNIT (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS) 220 /* each slot's duration is 2s / 16 */ 221 #define WB_FRN_HIST_THR_SLOTS (WB_FRN_HIST_SLOTS / 2) 222 /* if foreign slots >= 8, switch */ 223 #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1) 224 /* one round can affect upto 5 slots */ 225 226 static atomic_t isw_nr_in_flight = ATOMIC_INIT(0); 227 static struct workqueue_struct *isw_wq; 228 229 void __inode_attach_wb(struct inode *inode, struct page *page) 230 { 231 struct backing_dev_info *bdi = inode_to_bdi(inode); 232 struct bdi_writeback *wb = NULL; 233 234 if (inode_cgwb_enabled(inode)) { 235 struct cgroup_subsys_state *memcg_css; 236 237 if (page) { 238 memcg_css = mem_cgroup_css_from_page(page); 239 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 240 } else { 241 /* must pin memcg_css, see wb_get_create() */ 242 memcg_css = task_get_css(current, memory_cgrp_id); 243 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 244 css_put(memcg_css); 245 } 246 } 247 248 if (!wb) 249 wb = &bdi->wb; 250 251 /* 252 * There may be multiple instances of this function racing to 253 * update the same inode. Use cmpxchg() to tell the winner. 254 */ 255 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) 256 wb_put(wb); 257 } 258 259 /** 260 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it 261 * @inode: inode of interest with i_lock held 262 * 263 * Returns @inode's wb with its list_lock held. @inode->i_lock must be 264 * held on entry and is released on return. The returned wb is guaranteed 265 * to stay @inode's associated wb until its list_lock is released. 266 */ 267 static struct bdi_writeback * 268 locked_inode_to_wb_and_lock_list(struct inode *inode) 269 __releases(&inode->i_lock) 270 __acquires(&wb->list_lock) 271 { 272 while (true) { 273 struct bdi_writeback *wb = inode_to_wb(inode); 274 275 /* 276 * inode_to_wb() association is protected by both 277 * @inode->i_lock and @wb->list_lock but list_lock nests 278 * outside i_lock. Drop i_lock and verify that the 279 * association hasn't changed after acquiring list_lock. 280 */ 281 wb_get(wb); 282 spin_unlock(&inode->i_lock); 283 spin_lock(&wb->list_lock); 284 285 /* i_wb may have changed inbetween, can't use inode_to_wb() */ 286 if (likely(wb == inode->i_wb)) { 287 wb_put(wb); /* @inode already has ref */ 288 return wb; 289 } 290 291 spin_unlock(&wb->list_lock); 292 wb_put(wb); 293 cpu_relax(); 294 spin_lock(&inode->i_lock); 295 } 296 } 297 298 /** 299 * inode_to_wb_and_lock_list - determine an inode's wb and lock it 300 * @inode: inode of interest 301 * 302 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held 303 * on entry. 304 */ 305 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) 306 __acquires(&wb->list_lock) 307 { 308 spin_lock(&inode->i_lock); 309 return locked_inode_to_wb_and_lock_list(inode); 310 } 311 312 struct inode_switch_wbs_context { 313 struct inode *inode; 314 struct bdi_writeback *new_wb; 315 316 struct rcu_head rcu_head; 317 struct work_struct work; 318 }; 319 320 static void inode_switch_wbs_work_fn(struct work_struct *work) 321 { 322 struct inode_switch_wbs_context *isw = 323 container_of(work, struct inode_switch_wbs_context, work); 324 struct inode *inode = isw->inode; 325 struct address_space *mapping = inode->i_mapping; 326 struct bdi_writeback *old_wb = inode->i_wb; 327 struct bdi_writeback *new_wb = isw->new_wb; 328 struct radix_tree_iter iter; 329 bool switched = false; 330 void **slot; 331 332 /* 333 * By the time control reaches here, RCU grace period has passed 334 * since I_WB_SWITCH assertion and all wb stat update transactions 335 * between unlocked_inode_to_wb_begin/end() are guaranteed to be 336 * synchronizing against mapping->tree_lock. 337 * 338 * Grabbing old_wb->list_lock, inode->i_lock and mapping->tree_lock 339 * gives us exclusion against all wb related operations on @inode 340 * including IO list manipulations and stat updates. 341 */ 342 if (old_wb < new_wb) { 343 spin_lock(&old_wb->list_lock); 344 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING); 345 } else { 346 spin_lock(&new_wb->list_lock); 347 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); 348 } 349 spin_lock(&inode->i_lock); 350 spin_lock_irq(&mapping->tree_lock); 351 352 /* 353 * Once I_FREEING is visible under i_lock, the eviction path owns 354 * the inode and we shouldn't modify ->i_io_list. 355 */ 356 if (unlikely(inode->i_state & I_FREEING)) 357 goto skip_switch; 358 359 /* 360 * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points 361 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to 362 * pages actually under underwriteback. 363 */ 364 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0, 365 PAGECACHE_TAG_DIRTY) { 366 struct page *page = radix_tree_deref_slot_protected(slot, 367 &mapping->tree_lock); 368 if (likely(page) && PageDirty(page)) { 369 __dec_wb_stat(old_wb, WB_RECLAIMABLE); 370 __inc_wb_stat(new_wb, WB_RECLAIMABLE); 371 } 372 } 373 374 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0, 375 PAGECACHE_TAG_WRITEBACK) { 376 struct page *page = radix_tree_deref_slot_protected(slot, 377 &mapping->tree_lock); 378 if (likely(page)) { 379 WARN_ON_ONCE(!PageWriteback(page)); 380 __dec_wb_stat(old_wb, WB_WRITEBACK); 381 __inc_wb_stat(new_wb, WB_WRITEBACK); 382 } 383 } 384 385 wb_get(new_wb); 386 387 /* 388 * Transfer to @new_wb's IO list if necessary. The specific list 389 * @inode was on is ignored and the inode is put on ->b_dirty which 390 * is always correct including from ->b_dirty_time. The transfer 391 * preserves @inode->dirtied_when ordering. 392 */ 393 if (!list_empty(&inode->i_io_list)) { 394 struct inode *pos; 395 396 inode_io_list_del_locked(inode, old_wb); 397 inode->i_wb = new_wb; 398 list_for_each_entry(pos, &new_wb->b_dirty, i_io_list) 399 if (time_after_eq(inode->dirtied_when, 400 pos->dirtied_when)) 401 break; 402 inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev); 403 } else { 404 inode->i_wb = new_wb; 405 } 406 407 /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */ 408 inode->i_wb_frn_winner = 0; 409 inode->i_wb_frn_avg_time = 0; 410 inode->i_wb_frn_history = 0; 411 switched = true; 412 skip_switch: 413 /* 414 * Paired with load_acquire in unlocked_inode_to_wb_begin() and 415 * ensures that the new wb is visible if they see !I_WB_SWITCH. 416 */ 417 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH); 418 419 spin_unlock_irq(&mapping->tree_lock); 420 spin_unlock(&inode->i_lock); 421 spin_unlock(&new_wb->list_lock); 422 spin_unlock(&old_wb->list_lock); 423 424 if (switched) { 425 wb_wakeup(new_wb); 426 wb_put(old_wb); 427 } 428 wb_put(new_wb); 429 430 iput(inode); 431 kfree(isw); 432 433 atomic_dec(&isw_nr_in_flight); 434 } 435 436 static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head) 437 { 438 struct inode_switch_wbs_context *isw = container_of(rcu_head, 439 struct inode_switch_wbs_context, rcu_head); 440 441 /* needs to grab bh-unsafe locks, bounce to work item */ 442 INIT_WORK(&isw->work, inode_switch_wbs_work_fn); 443 queue_work(isw_wq, &isw->work); 444 } 445 446 /** 447 * inode_switch_wbs - change the wb association of an inode 448 * @inode: target inode 449 * @new_wb_id: ID of the new wb 450 * 451 * Switch @inode's wb association to the wb identified by @new_wb_id. The 452 * switching is performed asynchronously and may fail silently. 453 */ 454 static void inode_switch_wbs(struct inode *inode, int new_wb_id) 455 { 456 struct backing_dev_info *bdi = inode_to_bdi(inode); 457 struct cgroup_subsys_state *memcg_css; 458 struct inode_switch_wbs_context *isw; 459 460 /* noop if seems to be already in progress */ 461 if (inode->i_state & I_WB_SWITCH) 462 return; 463 464 isw = kzalloc(sizeof(*isw), GFP_ATOMIC); 465 if (!isw) 466 return; 467 468 /* find and pin the new wb */ 469 rcu_read_lock(); 470 memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys); 471 if (memcg_css) 472 isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); 473 rcu_read_unlock(); 474 if (!isw->new_wb) 475 goto out_free; 476 477 /* while holding I_WB_SWITCH, no one else can update the association */ 478 spin_lock(&inode->i_lock); 479 if (!(inode->i_sb->s_flags & MS_ACTIVE) || 480 inode->i_state & (I_WB_SWITCH | I_FREEING) || 481 inode_to_wb(inode) == isw->new_wb) { 482 spin_unlock(&inode->i_lock); 483 goto out_free; 484 } 485 inode->i_state |= I_WB_SWITCH; 486 spin_unlock(&inode->i_lock); 487 488 ihold(inode); 489 isw->inode = inode; 490 491 atomic_inc(&isw_nr_in_flight); 492 493 /* 494 * In addition to synchronizing among switchers, I_WB_SWITCH tells 495 * the RCU protected stat update paths to grab the mapping's 496 * tree_lock so that stat transfer can synchronize against them. 497 * Let's continue after I_WB_SWITCH is guaranteed to be visible. 498 */ 499 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); 500 return; 501 502 out_free: 503 if (isw->new_wb) 504 wb_put(isw->new_wb); 505 kfree(isw); 506 } 507 508 /** 509 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it 510 * @wbc: writeback_control of interest 511 * @inode: target inode 512 * 513 * @inode is locked and about to be written back under the control of @wbc. 514 * Record @inode's writeback context into @wbc and unlock the i_lock. On 515 * writeback completion, wbc_detach_inode() should be called. This is used 516 * to track the cgroup writeback context. 517 */ 518 void wbc_attach_and_unlock_inode(struct writeback_control *wbc, 519 struct inode *inode) 520 { 521 if (!inode_cgwb_enabled(inode)) { 522 spin_unlock(&inode->i_lock); 523 return; 524 } 525 526 wbc->wb = inode_to_wb(inode); 527 wbc->inode = inode; 528 529 wbc->wb_id = wbc->wb->memcg_css->id; 530 wbc->wb_lcand_id = inode->i_wb_frn_winner; 531 wbc->wb_tcand_id = 0; 532 wbc->wb_bytes = 0; 533 wbc->wb_lcand_bytes = 0; 534 wbc->wb_tcand_bytes = 0; 535 536 wb_get(wbc->wb); 537 spin_unlock(&inode->i_lock); 538 539 /* 540 * A dying wb indicates that the memcg-blkcg mapping has changed 541 * and a new wb is already serving the memcg. Switch immediately. 542 */ 543 if (unlikely(wb_dying(wbc->wb))) 544 inode_switch_wbs(inode, wbc->wb_id); 545 } 546 547 /** 548 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection 549 * @wbc: writeback_control of the just finished writeback 550 * 551 * To be called after a writeback attempt of an inode finishes and undoes 552 * wbc_attach_and_unlock_inode(). Can be called under any context. 553 * 554 * As concurrent write sharing of an inode is expected to be very rare and 555 * memcg only tracks page ownership on first-use basis severely confining 556 * the usefulness of such sharing, cgroup writeback tracks ownership 557 * per-inode. While the support for concurrent write sharing of an inode 558 * is deemed unnecessary, an inode being written to by different cgroups at 559 * different points in time is a lot more common, and, more importantly, 560 * charging only by first-use can too readily lead to grossly incorrect 561 * behaviors (single foreign page can lead to gigabytes of writeback to be 562 * incorrectly attributed). 563 * 564 * To resolve this issue, cgroup writeback detects the majority dirtier of 565 * an inode and transfers the ownership to it. To avoid unnnecessary 566 * oscillation, the detection mechanism keeps track of history and gives 567 * out the switch verdict only if the foreign usage pattern is stable over 568 * a certain amount of time and/or writeback attempts. 569 * 570 * On each writeback attempt, @wbc tries to detect the majority writer 571 * using Boyer-Moore majority vote algorithm. In addition to the byte 572 * count from the majority voting, it also counts the bytes written for the 573 * current wb and the last round's winner wb (max of last round's current 574 * wb, the winner from two rounds ago, and the last round's majority 575 * candidate). Keeping track of the historical winner helps the algorithm 576 * to semi-reliably detect the most active writer even when it's not the 577 * absolute majority. 578 * 579 * Once the winner of the round is determined, whether the winner is 580 * foreign or not and how much IO time the round consumed is recorded in 581 * inode->i_wb_frn_history. If the amount of recorded foreign IO time is 582 * over a certain threshold, the switch verdict is given. 583 */ 584 void wbc_detach_inode(struct writeback_control *wbc) 585 { 586 struct bdi_writeback *wb = wbc->wb; 587 struct inode *inode = wbc->inode; 588 unsigned long avg_time, max_bytes, max_time; 589 u16 history; 590 int max_id; 591 592 if (!wb) 593 return; 594 595 history = inode->i_wb_frn_history; 596 avg_time = inode->i_wb_frn_avg_time; 597 598 /* pick the winner of this round */ 599 if (wbc->wb_bytes >= wbc->wb_lcand_bytes && 600 wbc->wb_bytes >= wbc->wb_tcand_bytes) { 601 max_id = wbc->wb_id; 602 max_bytes = wbc->wb_bytes; 603 } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) { 604 max_id = wbc->wb_lcand_id; 605 max_bytes = wbc->wb_lcand_bytes; 606 } else { 607 max_id = wbc->wb_tcand_id; 608 max_bytes = wbc->wb_tcand_bytes; 609 } 610 611 /* 612 * Calculate the amount of IO time the winner consumed and fold it 613 * into the running average kept per inode. If the consumed IO 614 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for 615 * deciding whether to switch or not. This is to prevent one-off 616 * small dirtiers from skewing the verdict. 617 */ 618 max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT, 619 wb->avg_write_bandwidth); 620 if (avg_time) 621 avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) - 622 (avg_time >> WB_FRN_TIME_AVG_SHIFT); 623 else 624 avg_time = max_time; /* immediate catch up on first run */ 625 626 if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) { 627 int slots; 628 629 /* 630 * The switch verdict is reached if foreign wb's consume 631 * more than a certain proportion of IO time in a 632 * WB_FRN_TIME_PERIOD. This is loosely tracked by 16 slot 633 * history mask where each bit represents one sixteenth of 634 * the period. Determine the number of slots to shift into 635 * history from @max_time. 636 */ 637 slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT), 638 (unsigned long)WB_FRN_HIST_MAX_SLOTS); 639 history <<= slots; 640 if (wbc->wb_id != max_id) 641 history |= (1U << slots) - 1; 642 643 /* 644 * Switch if the current wb isn't the consistent winner. 645 * If there are multiple closely competing dirtiers, the 646 * inode may switch across them repeatedly over time, which 647 * is okay. The main goal is avoiding keeping an inode on 648 * the wrong wb for an extended period of time. 649 */ 650 if (hweight32(history) > WB_FRN_HIST_THR_SLOTS) 651 inode_switch_wbs(inode, max_id); 652 } 653 654 /* 655 * Multiple instances of this function may race to update the 656 * following fields but we don't mind occassional inaccuracies. 657 */ 658 inode->i_wb_frn_winner = max_id; 659 inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); 660 inode->i_wb_frn_history = history; 661 662 wb_put(wbc->wb); 663 wbc->wb = NULL; 664 } 665 666 /** 667 * wbc_account_io - account IO issued during writeback 668 * @wbc: writeback_control of the writeback in progress 669 * @page: page being written out 670 * @bytes: number of bytes being written out 671 * 672 * @bytes from @page are about to written out during the writeback 673 * controlled by @wbc. Keep the book for foreign inode detection. See 674 * wbc_detach_inode(). 675 */ 676 void wbc_account_io(struct writeback_control *wbc, struct page *page, 677 size_t bytes) 678 { 679 int id; 680 681 /* 682 * pageout() path doesn't attach @wbc to the inode being written 683 * out. This is intentional as we don't want the function to block 684 * behind a slow cgroup. Ultimately, we want pageout() to kick off 685 * regular writeback instead of writing things out itself. 686 */ 687 if (!wbc->wb) 688 return; 689 690 id = mem_cgroup_css_from_page(page)->id; 691 692 if (id == wbc->wb_id) { 693 wbc->wb_bytes += bytes; 694 return; 695 } 696 697 if (id == wbc->wb_lcand_id) 698 wbc->wb_lcand_bytes += bytes; 699 700 /* Boyer-Moore majority vote algorithm */ 701 if (!wbc->wb_tcand_bytes) 702 wbc->wb_tcand_id = id; 703 if (id == wbc->wb_tcand_id) 704 wbc->wb_tcand_bytes += bytes; 705 else 706 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 707 } 708 EXPORT_SYMBOL_GPL(wbc_account_io); 709 710 /** 711 * inode_congested - test whether an inode is congested 712 * @inode: inode to test for congestion (may be NULL) 713 * @cong_bits: mask of WB_[a]sync_congested bits to test 714 * 715 * Tests whether @inode is congested. @cong_bits is the mask of congestion 716 * bits to test and the return value is the mask of set bits. 717 * 718 * If cgroup writeback is enabled for @inode, the congestion state is 719 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg 720 * associated with @inode is congested; otherwise, the root wb's congestion 721 * state is used. 722 * 723 * @inode is allowed to be NULL as this function is often called on 724 * mapping->host which is NULL for the swapper space. 725 */ 726 int inode_congested(struct inode *inode, int cong_bits) 727 { 728 /* 729 * Once set, ->i_wb never becomes NULL while the inode is alive. 730 * Start transaction iff ->i_wb is visible. 731 */ 732 if (inode && inode_to_wb_is_valid(inode)) { 733 struct bdi_writeback *wb; 734 bool locked, congested; 735 736 wb = unlocked_inode_to_wb_begin(inode, &locked); 737 congested = wb_congested(wb, cong_bits); 738 unlocked_inode_to_wb_end(inode, locked); 739 return congested; 740 } 741 742 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 743 } 744 EXPORT_SYMBOL_GPL(inode_congested); 745 746 /** 747 * wb_split_bdi_pages - split nr_pages to write according to bandwidth 748 * @wb: target bdi_writeback to split @nr_pages to 749 * @nr_pages: number of pages to write for the whole bdi 750 * 751 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in 752 * relation to the total write bandwidth of all wb's w/ dirty inodes on 753 * @wb->bdi. 754 */ 755 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 756 { 757 unsigned long this_bw = wb->avg_write_bandwidth; 758 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); 759 760 if (nr_pages == LONG_MAX) 761 return LONG_MAX; 762 763 /* 764 * This may be called on clean wb's and proportional distribution 765 * may not make sense, just use the original @nr_pages in those 766 * cases. In general, we wanna err on the side of writing more. 767 */ 768 if (!tot_bw || this_bw >= tot_bw) 769 return nr_pages; 770 else 771 return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); 772 } 773 774 /** 775 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi 776 * @bdi: target backing_dev_info 777 * @base_work: wb_writeback_work to issue 778 * @skip_if_busy: skip wb's which already have writeback in progress 779 * 780 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which 781 * have dirty inodes. If @base_work->nr_page isn't %LONG_MAX, it's 782 * distributed to the busy wbs according to each wb's proportion in the 783 * total active write bandwidth of @bdi. 784 */ 785 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, 786 struct wb_writeback_work *base_work, 787 bool skip_if_busy) 788 { 789 struct bdi_writeback *last_wb = NULL; 790 struct bdi_writeback *wb = list_entry(&bdi->wb_list, 791 struct bdi_writeback, bdi_node); 792 793 might_sleep(); 794 restart: 795 rcu_read_lock(); 796 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { 797 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); 798 struct wb_writeback_work fallback_work; 799 struct wb_writeback_work *work; 800 long nr_pages; 801 802 if (last_wb) { 803 wb_put(last_wb); 804 last_wb = NULL; 805 } 806 807 /* SYNC_ALL writes out I_DIRTY_TIME too */ 808 if (!wb_has_dirty_io(wb) && 809 (base_work->sync_mode == WB_SYNC_NONE || 810 list_empty(&wb->b_dirty_time))) 811 continue; 812 if (skip_if_busy && writeback_in_progress(wb)) 813 continue; 814 815 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); 816 817 work = kmalloc(sizeof(*work), GFP_ATOMIC); 818 if (work) { 819 *work = *base_work; 820 work->nr_pages = nr_pages; 821 work->auto_free = 1; 822 wb_queue_work(wb, work); 823 continue; 824 } 825 826 /* alloc failed, execute synchronously using on-stack fallback */ 827 work = &fallback_work; 828 *work = *base_work; 829 work->nr_pages = nr_pages; 830 work->auto_free = 0; 831 work->done = &fallback_work_done; 832 833 wb_queue_work(wb, work); 834 835 /* 836 * Pin @wb so that it stays on @bdi->wb_list. This allows 837 * continuing iteration from @wb after dropping and 838 * regrabbing rcu read lock. 839 */ 840 wb_get(wb); 841 last_wb = wb; 842 843 rcu_read_unlock(); 844 wb_wait_for_completion(bdi, &fallback_work_done); 845 goto restart; 846 } 847 rcu_read_unlock(); 848 849 if (last_wb) 850 wb_put(last_wb); 851 } 852 853 /** 854 * cgroup_writeback_umount - flush inode wb switches for umount 855 * 856 * This function is called when a super_block is about to be destroyed and 857 * flushes in-flight inode wb switches. An inode wb switch goes through 858 * RCU and then workqueue, so the two need to be flushed in order to ensure 859 * that all previously scheduled switches are finished. As wb switches are 860 * rare occurrences and synchronize_rcu() can take a while, perform 861 * flushing iff wb switches are in flight. 862 */ 863 void cgroup_writeback_umount(void) 864 { 865 if (atomic_read(&isw_nr_in_flight)) { 866 synchronize_rcu(); 867 flush_workqueue(isw_wq); 868 } 869 } 870 871 static int __init cgroup_writeback_init(void) 872 { 873 isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0); 874 if (!isw_wq) 875 return -ENOMEM; 876 return 0; 877 } 878 fs_initcall(cgroup_writeback_init); 879 880 #else /* CONFIG_CGROUP_WRITEBACK */ 881 882 static struct bdi_writeback * 883 locked_inode_to_wb_and_lock_list(struct inode *inode) 884 __releases(&inode->i_lock) 885 __acquires(&wb->list_lock) 886 { 887 struct bdi_writeback *wb = inode_to_wb(inode); 888 889 spin_unlock(&inode->i_lock); 890 spin_lock(&wb->list_lock); 891 return wb; 892 } 893 894 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode) 895 __acquires(&wb->list_lock) 896 { 897 struct bdi_writeback *wb = inode_to_wb(inode); 898 899 spin_lock(&wb->list_lock); 900 return wb; 901 } 902 903 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) 904 { 905 return nr_pages; 906 } 907 908 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, 909 struct wb_writeback_work *base_work, 910 bool skip_if_busy) 911 { 912 might_sleep(); 913 914 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { 915 base_work->auto_free = 0; 916 wb_queue_work(&bdi->wb, base_work); 917 } 918 } 919 920 #endif /* CONFIG_CGROUP_WRITEBACK */ 921 922 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 923 bool range_cyclic, enum wb_reason reason) 924 { 925 struct wb_writeback_work *work; 926 927 if (!wb_has_dirty_io(wb)) 928 return; 929 930 /* 931 * This is WB_SYNC_NONE writeback, so if allocation fails just 932 * wakeup the thread for old dirty data writeback 933 */ 934 work = kzalloc(sizeof(*work), GFP_ATOMIC); 935 if (!work) { 936 trace_writeback_nowork(wb); 937 wb_wakeup(wb); 938 return; 939 } 940 941 work->sync_mode = WB_SYNC_NONE; 942 work->nr_pages = nr_pages; 943 work->range_cyclic = range_cyclic; 944 work->reason = reason; 945 work->auto_free = 1; 946 947 wb_queue_work(wb, work); 948 } 949 950 /** 951 * wb_start_background_writeback - start background writeback 952 * @wb: bdi_writback to write from 953 * 954 * Description: 955 * This makes sure WB_SYNC_NONE background writeback happens. When 956 * this function returns, it is only guaranteed that for given wb 957 * some IO is happening if we are over background dirty threshold. 958 * Caller need not hold sb s_umount semaphore. 959 */ 960 void wb_start_background_writeback(struct bdi_writeback *wb) 961 { 962 /* 963 * We just wake up the flusher thread. It will perform background 964 * writeback as soon as there is no other work to do. 965 */ 966 trace_writeback_wake_background(wb); 967 wb_wakeup(wb); 968 } 969 970 /* 971 * Remove the inode from the writeback list it is on. 972 */ 973 void inode_io_list_del(struct inode *inode) 974 { 975 struct bdi_writeback *wb; 976 977 wb = inode_to_wb_and_lock_list(inode); 978 inode_io_list_del_locked(inode, wb); 979 spin_unlock(&wb->list_lock); 980 } 981 982 /* 983 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 984 * furthest end of its superblock's dirty-inode list. 985 * 986 * Before stamping the inode's ->dirtied_when, we check to see whether it is 987 * already the most-recently-dirtied inode on the b_dirty list. If that is 988 * the case then the inode must have been redirtied while it was being written 989 * out and we don't reset its dirtied_when. 990 */ 991 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) 992 { 993 if (!list_empty(&wb->b_dirty)) { 994 struct inode *tail; 995 996 tail = wb_inode(wb->b_dirty.next); 997 if (time_before(inode->dirtied_when, tail->dirtied_when)) 998 inode->dirtied_when = jiffies; 999 } 1000 inode_io_list_move_locked(inode, wb, &wb->b_dirty); 1001 } 1002 1003 /* 1004 * requeue inode for re-scanning after bdi->b_io list is exhausted. 1005 */ 1006 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) 1007 { 1008 inode_io_list_move_locked(inode, wb, &wb->b_more_io); 1009 } 1010 1011 static void inode_sync_complete(struct inode *inode) 1012 { 1013 inode->i_state &= ~I_SYNC; 1014 /* If inode is clean an unused, put it into LRU now... */ 1015 inode_add_lru(inode); 1016 /* Waiters must see I_SYNC cleared before being woken up */ 1017 smp_mb(); 1018 wake_up_bit(&inode->i_state, __I_SYNC); 1019 } 1020 1021 static bool inode_dirtied_after(struct inode *inode, unsigned long t) 1022 { 1023 bool ret = time_after(inode->dirtied_when, t); 1024 #ifndef CONFIG_64BIT 1025 /* 1026 * For inodes being constantly redirtied, dirtied_when can get stuck. 1027 * It _appears_ to be in the future, but is actually in distant past. 1028 * This test is necessary to prevent such wrapped-around relative times 1029 * from permanently stopping the whole bdi writeback. 1030 */ 1031 ret = ret && time_before_eq(inode->dirtied_when, jiffies); 1032 #endif 1033 return ret; 1034 } 1035 1036 #define EXPIRE_DIRTY_ATIME 0x0001 1037 1038 /* 1039 * Move expired (dirtied before work->older_than_this) dirty inodes from 1040 * @delaying_queue to @dispatch_queue. 1041 */ 1042 static int move_expired_inodes(struct list_head *delaying_queue, 1043 struct list_head *dispatch_queue, 1044 int flags, 1045 struct wb_writeback_work *work) 1046 { 1047 unsigned long *older_than_this = NULL; 1048 unsigned long expire_time; 1049 LIST_HEAD(tmp); 1050 struct list_head *pos, *node; 1051 struct super_block *sb = NULL; 1052 struct inode *inode; 1053 int do_sb_sort = 0; 1054 int moved = 0; 1055 1056 if ((flags & EXPIRE_DIRTY_ATIME) == 0) 1057 older_than_this = work->older_than_this; 1058 else if (!work->for_sync) { 1059 expire_time = jiffies - (dirtytime_expire_interval * HZ); 1060 older_than_this = &expire_time; 1061 } 1062 while (!list_empty(delaying_queue)) { 1063 inode = wb_inode(delaying_queue->prev); 1064 if (older_than_this && 1065 inode_dirtied_after(inode, *older_than_this)) 1066 break; 1067 list_move(&inode->i_io_list, &tmp); 1068 moved++; 1069 if (flags & EXPIRE_DIRTY_ATIME) 1070 set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); 1071 if (sb_is_blkdev_sb(inode->i_sb)) 1072 continue; 1073 if (sb && sb != inode->i_sb) 1074 do_sb_sort = 1; 1075 sb = inode->i_sb; 1076 } 1077 1078 /* just one sb in list, splice to dispatch_queue and we're done */ 1079 if (!do_sb_sort) { 1080 list_splice(&tmp, dispatch_queue); 1081 goto out; 1082 } 1083 1084 /* Move inodes from one superblock together */ 1085 while (!list_empty(&tmp)) { 1086 sb = wb_inode(tmp.prev)->i_sb; 1087 list_for_each_prev_safe(pos, node, &tmp) { 1088 inode = wb_inode(pos); 1089 if (inode->i_sb == sb) 1090 list_move(&inode->i_io_list, dispatch_queue); 1091 } 1092 } 1093 out: 1094 return moved; 1095 } 1096 1097 /* 1098 * Queue all expired dirty inodes for io, eldest first. 1099 * Before 1100 * newly dirtied b_dirty b_io b_more_io 1101 * =============> gf edc BA 1102 * After 1103 * newly dirtied b_dirty b_io b_more_io 1104 * =============> g fBAedc 1105 * | 1106 * +--> dequeue for IO 1107 */ 1108 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 1109 { 1110 int moved; 1111 1112 assert_spin_locked(&wb->list_lock); 1113 list_splice_init(&wb->b_more_io, &wb->b_io); 1114 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); 1115 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, 1116 EXPIRE_DIRTY_ATIME, work); 1117 if (moved) 1118 wb_io_lists_populated(wb); 1119 trace_writeback_queue_io(wb, work, moved); 1120 } 1121 1122 static int write_inode(struct inode *inode, struct writeback_control *wbc) 1123 { 1124 int ret; 1125 1126 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) { 1127 trace_writeback_write_inode_start(inode, wbc); 1128 ret = inode->i_sb->s_op->write_inode(inode, wbc); 1129 trace_writeback_write_inode(inode, wbc); 1130 return ret; 1131 } 1132 return 0; 1133 } 1134 1135 /* 1136 * Wait for writeback on an inode to complete. Called with i_lock held. 1137 * Caller must make sure inode cannot go away when we drop i_lock. 1138 */ 1139 static void __inode_wait_for_writeback(struct inode *inode) 1140 __releases(inode->i_lock) 1141 __acquires(inode->i_lock) 1142 { 1143 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 1144 wait_queue_head_t *wqh; 1145 1146 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1147 while (inode->i_state & I_SYNC) { 1148 spin_unlock(&inode->i_lock); 1149 __wait_on_bit(wqh, &wq, bit_wait, 1150 TASK_UNINTERRUPTIBLE); 1151 spin_lock(&inode->i_lock); 1152 } 1153 } 1154 1155 /* 1156 * Wait for writeback on an inode to complete. Caller must have inode pinned. 1157 */ 1158 void inode_wait_for_writeback(struct inode *inode) 1159 { 1160 spin_lock(&inode->i_lock); 1161 __inode_wait_for_writeback(inode); 1162 spin_unlock(&inode->i_lock); 1163 } 1164 1165 /* 1166 * Sleep until I_SYNC is cleared. This function must be called with i_lock 1167 * held and drops it. It is aimed for callers not holding any inode reference 1168 * so once i_lock is dropped, inode can go away. 1169 */ 1170 static void inode_sleep_on_writeback(struct inode *inode) 1171 __releases(inode->i_lock) 1172 { 1173 DEFINE_WAIT(wait); 1174 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 1175 int sleep; 1176 1177 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 1178 sleep = inode->i_state & I_SYNC; 1179 spin_unlock(&inode->i_lock); 1180 if (sleep) 1181 schedule(); 1182 finish_wait(wqh, &wait); 1183 } 1184 1185 /* 1186 * Find proper writeback list for the inode depending on its current state and 1187 * possibly also change of its state while we were doing writeback. Here we 1188 * handle things such as livelock prevention or fairness of writeback among 1189 * inodes. This function can be called only by flusher thread - noone else 1190 * processes all inodes in writeback lists and requeueing inodes behind flusher 1191 * thread's back can have unexpected consequences. 1192 */ 1193 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, 1194 struct writeback_control *wbc) 1195 { 1196 if (inode->i_state & I_FREEING) 1197 return; 1198 1199 /* 1200 * Sync livelock prevention. Each inode is tagged and synced in one 1201 * shot. If still dirty, it will be redirty_tail()'ed below. Update 1202 * the dirty time to prevent enqueue and sync it again. 1203 */ 1204 if ((inode->i_state & I_DIRTY) && 1205 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) 1206 inode->dirtied_when = jiffies; 1207 1208 if (wbc->pages_skipped) { 1209 /* 1210 * writeback is not making progress due to locked 1211 * buffers. Skip this inode for now. 1212 */ 1213 redirty_tail(inode, wb); 1214 return; 1215 } 1216 1217 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { 1218 /* 1219 * We didn't write back all the pages. nfs_writepages() 1220 * sometimes bales out without doing anything. 1221 */ 1222 if (wbc->nr_to_write <= 0) { 1223 /* Slice used up. Queue for next turn. */ 1224 requeue_io(inode, wb); 1225 } else { 1226 /* 1227 * Writeback blocked by something other than 1228 * congestion. Delay the inode for some time to 1229 * avoid spinning on the CPU (100% iowait) 1230 * retrying writeback of the dirty page/inode 1231 * that cannot be performed immediately. 1232 */ 1233 redirty_tail(inode, wb); 1234 } 1235 } else if (inode->i_state & I_DIRTY) { 1236 /* 1237 * Filesystems can dirty the inode during writeback operations, 1238 * such as delayed allocation during submission or metadata 1239 * updates after data IO completion. 1240 */ 1241 redirty_tail(inode, wb); 1242 } else if (inode->i_state & I_DIRTY_TIME) { 1243 inode->dirtied_when = jiffies; 1244 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); 1245 } else { 1246 /* The inode is clean. Remove from writeback lists. */ 1247 inode_io_list_del_locked(inode, wb); 1248 } 1249 } 1250 1251 /* 1252 * Write out an inode and its dirty pages. Do not update the writeback list 1253 * linkage. That is left to the caller. The caller is also responsible for 1254 * setting I_SYNC flag and calling inode_sync_complete() to clear it. 1255 */ 1256 static int 1257 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) 1258 { 1259 struct address_space *mapping = inode->i_mapping; 1260 long nr_to_write = wbc->nr_to_write; 1261 unsigned dirty; 1262 int ret; 1263 1264 WARN_ON(!(inode->i_state & I_SYNC)); 1265 1266 trace_writeback_single_inode_start(inode, wbc, nr_to_write); 1267 1268 ret = do_writepages(mapping, wbc); 1269 1270 /* 1271 * Make sure to wait on the data before writing out the metadata. 1272 * This is important for filesystems that modify metadata on data 1273 * I/O completion. We don't do it for sync(2) writeback because it has a 1274 * separate, external IO completion path and ->sync_fs for guaranteeing 1275 * inode metadata is written back correctly. 1276 */ 1277 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) { 1278 int err = filemap_fdatawait(mapping); 1279 if (ret == 0) 1280 ret = err; 1281 } 1282 1283 /* 1284 * Some filesystems may redirty the inode during the writeback 1285 * due to delalloc, clear dirty metadata flags right before 1286 * write_inode() 1287 */ 1288 spin_lock(&inode->i_lock); 1289 1290 dirty = inode->i_state & I_DIRTY; 1291 if (inode->i_state & I_DIRTY_TIME) { 1292 if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || 1293 unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || 1294 unlikely(time_after(jiffies, 1295 (inode->dirtied_time_when + 1296 dirtytime_expire_interval * HZ)))) { 1297 dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; 1298 trace_writeback_lazytime(inode); 1299 } 1300 } else 1301 inode->i_state &= ~I_DIRTY_TIME_EXPIRED; 1302 inode->i_state &= ~dirty; 1303 1304 /* 1305 * Paired with smp_mb() in __mark_inode_dirty(). This allows 1306 * __mark_inode_dirty() to test i_state without grabbing i_lock - 1307 * either they see the I_DIRTY bits cleared or we see the dirtied 1308 * inode. 1309 * 1310 * I_DIRTY_PAGES is always cleared together above even if @mapping 1311 * still has dirty pages. The flag is reinstated after smp_mb() if 1312 * necessary. This guarantees that either __mark_inode_dirty() 1313 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. 1314 */ 1315 smp_mb(); 1316 1317 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 1318 inode->i_state |= I_DIRTY_PAGES; 1319 1320 spin_unlock(&inode->i_lock); 1321 1322 if (dirty & I_DIRTY_TIME) 1323 mark_inode_dirty_sync(inode); 1324 /* Don't write the inode if only I_DIRTY_PAGES was set */ 1325 if (dirty & ~I_DIRTY_PAGES) { 1326 int err = write_inode(inode, wbc); 1327 if (ret == 0) 1328 ret = err; 1329 } 1330 trace_writeback_single_inode(inode, wbc, nr_to_write); 1331 return ret; 1332 } 1333 1334 /* 1335 * Write out an inode's dirty pages. Either the caller has an active reference 1336 * on the inode or the inode has I_WILL_FREE set. 1337 * 1338 * This function is designed to be called for writing back one inode which 1339 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode() 1340 * and does more profound writeback list handling in writeback_sb_inodes(). 1341 */ 1342 static int writeback_single_inode(struct inode *inode, 1343 struct writeback_control *wbc) 1344 { 1345 struct bdi_writeback *wb; 1346 int ret = 0; 1347 1348 spin_lock(&inode->i_lock); 1349 if (!atomic_read(&inode->i_count)) 1350 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 1351 else 1352 WARN_ON(inode->i_state & I_WILL_FREE); 1353 1354 if (inode->i_state & I_SYNC) { 1355 if (wbc->sync_mode != WB_SYNC_ALL) 1356 goto out; 1357 /* 1358 * It's a data-integrity sync. We must wait. Since callers hold 1359 * inode reference or inode has I_WILL_FREE set, it cannot go 1360 * away under us. 1361 */ 1362 __inode_wait_for_writeback(inode); 1363 } 1364 WARN_ON(inode->i_state & I_SYNC); 1365 /* 1366 * Skip inode if it is clean and we have no outstanding writeback in 1367 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this 1368 * function since flusher thread may be doing for example sync in 1369 * parallel and if we move the inode, it could get skipped. So here we 1370 * make sure inode is on some writeback list and leave it there unless 1371 * we have completely cleaned the inode. 1372 */ 1373 if (!(inode->i_state & I_DIRTY_ALL) && 1374 (wbc->sync_mode != WB_SYNC_ALL || 1375 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 1376 goto out; 1377 inode->i_state |= I_SYNC; 1378 wbc_attach_and_unlock_inode(wbc, inode); 1379 1380 ret = __writeback_single_inode(inode, wbc); 1381 1382 wbc_detach_inode(wbc); 1383 1384 wb = inode_to_wb_and_lock_list(inode); 1385 spin_lock(&inode->i_lock); 1386 /* 1387 * If inode is clean, remove it from writeback lists. Otherwise don't 1388 * touch it. See comment above for explanation. 1389 */ 1390 if (!(inode->i_state & I_DIRTY_ALL)) 1391 inode_io_list_del_locked(inode, wb); 1392 spin_unlock(&wb->list_lock); 1393 inode_sync_complete(inode); 1394 out: 1395 spin_unlock(&inode->i_lock); 1396 return ret; 1397 } 1398 1399 static long writeback_chunk_size(struct bdi_writeback *wb, 1400 struct wb_writeback_work *work) 1401 { 1402 long pages; 1403 1404 /* 1405 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty 1406 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX 1407 * here avoids calling into writeback_inodes_wb() more than once. 1408 * 1409 * The intended call sequence for WB_SYNC_ALL writeback is: 1410 * 1411 * wb_writeback() 1412 * writeback_sb_inodes() <== called only once 1413 * write_cache_pages() <== called once for each inode 1414 * (quickly) tag currently dirty pages 1415 * (maybe slowly) sync all tagged pages 1416 */ 1417 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 1418 pages = LONG_MAX; 1419 else { 1420 pages = min(wb->avg_write_bandwidth / 2, 1421 global_wb_domain.dirty_limit / DIRTY_SCOPE); 1422 pages = min(pages, work->nr_pages); 1423 pages = round_down(pages + MIN_WRITEBACK_PAGES, 1424 MIN_WRITEBACK_PAGES); 1425 } 1426 1427 return pages; 1428 } 1429 1430 /* 1431 * Write a portion of b_io inodes which belong to @sb. 1432 * 1433 * Return the number of pages and/or inodes written. 1434 * 1435 * NOTE! This is called with wb->list_lock held, and will 1436 * unlock and relock that for each inode it ends up doing 1437 * IO for. 1438 */ 1439 static long writeback_sb_inodes(struct super_block *sb, 1440 struct bdi_writeback *wb, 1441 struct wb_writeback_work *work) 1442 { 1443 struct writeback_control wbc = { 1444 .sync_mode = work->sync_mode, 1445 .tagged_writepages = work->tagged_writepages, 1446 .for_kupdate = work->for_kupdate, 1447 .for_background = work->for_background, 1448 .for_sync = work->for_sync, 1449 .range_cyclic = work->range_cyclic, 1450 .range_start = 0, 1451 .range_end = LLONG_MAX, 1452 }; 1453 unsigned long start_time = jiffies; 1454 long write_chunk; 1455 long wrote = 0; /* count both pages and inodes */ 1456 1457 while (!list_empty(&wb->b_io)) { 1458 struct inode *inode = wb_inode(wb->b_io.prev); 1459 struct bdi_writeback *tmp_wb; 1460 1461 if (inode->i_sb != sb) { 1462 if (work->sb) { 1463 /* 1464 * We only want to write back data for this 1465 * superblock, move all inodes not belonging 1466 * to it back onto the dirty list. 1467 */ 1468 redirty_tail(inode, wb); 1469 continue; 1470 } 1471 1472 /* 1473 * The inode belongs to a different superblock. 1474 * Bounce back to the caller to unpin this and 1475 * pin the next superblock. 1476 */ 1477 break; 1478 } 1479 1480 /* 1481 * Don't bother with new inodes or inodes being freed, first 1482 * kind does not need periodic writeout yet, and for the latter 1483 * kind writeout is handled by the freer. 1484 */ 1485 spin_lock(&inode->i_lock); 1486 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 1487 spin_unlock(&inode->i_lock); 1488 redirty_tail(inode, wb); 1489 continue; 1490 } 1491 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { 1492 /* 1493 * If this inode is locked for writeback and we are not 1494 * doing writeback-for-data-integrity, move it to 1495 * b_more_io so that writeback can proceed with the 1496 * other inodes on s_io. 1497 * 1498 * We'll have another go at writing back this inode 1499 * when we completed a full scan of b_io. 1500 */ 1501 spin_unlock(&inode->i_lock); 1502 requeue_io(inode, wb); 1503 trace_writeback_sb_inodes_requeue(inode); 1504 continue; 1505 } 1506 spin_unlock(&wb->list_lock); 1507 1508 /* 1509 * We already requeued the inode if it had I_SYNC set and we 1510 * are doing WB_SYNC_NONE writeback. So this catches only the 1511 * WB_SYNC_ALL case. 1512 */ 1513 if (inode->i_state & I_SYNC) { 1514 /* Wait for I_SYNC. This function drops i_lock... */ 1515 inode_sleep_on_writeback(inode); 1516 /* Inode may be gone, start again */ 1517 spin_lock(&wb->list_lock); 1518 continue; 1519 } 1520 inode->i_state |= I_SYNC; 1521 wbc_attach_and_unlock_inode(&wbc, inode); 1522 1523 write_chunk = writeback_chunk_size(wb, work); 1524 wbc.nr_to_write = write_chunk; 1525 wbc.pages_skipped = 0; 1526 1527 /* 1528 * We use I_SYNC to pin the inode in memory. While it is set 1529 * evict_inode() will wait so the inode cannot be freed. 1530 */ 1531 __writeback_single_inode(inode, &wbc); 1532 1533 wbc_detach_inode(&wbc); 1534 work->nr_pages -= write_chunk - wbc.nr_to_write; 1535 wrote += write_chunk - wbc.nr_to_write; 1536 1537 if (need_resched()) { 1538 /* 1539 * We're trying to balance between building up a nice 1540 * long list of IOs to improve our merge rate, and 1541 * getting those IOs out quickly for anyone throttling 1542 * in balance_dirty_pages(). cond_resched() doesn't 1543 * unplug, so get our IOs out the door before we 1544 * give up the CPU. 1545 */ 1546 blk_flush_plug(current); 1547 cond_resched(); 1548 } 1549 1550 /* 1551 * Requeue @inode if still dirty. Be careful as @inode may 1552 * have been switched to another wb in the meantime. 1553 */ 1554 tmp_wb = inode_to_wb_and_lock_list(inode); 1555 spin_lock(&inode->i_lock); 1556 if (!(inode->i_state & I_DIRTY_ALL)) 1557 wrote++; 1558 requeue_inode(inode, tmp_wb, &wbc); 1559 inode_sync_complete(inode); 1560 spin_unlock(&inode->i_lock); 1561 1562 if (unlikely(tmp_wb != wb)) { 1563 spin_unlock(&tmp_wb->list_lock); 1564 spin_lock(&wb->list_lock); 1565 } 1566 1567 /* 1568 * bail out to wb_writeback() often enough to check 1569 * background threshold and other termination conditions. 1570 */ 1571 if (wrote) { 1572 if (time_is_before_jiffies(start_time + HZ / 10UL)) 1573 break; 1574 if (work->nr_pages <= 0) 1575 break; 1576 } 1577 } 1578 return wrote; 1579 } 1580 1581 static long __writeback_inodes_wb(struct bdi_writeback *wb, 1582 struct wb_writeback_work *work) 1583 { 1584 unsigned long start_time = jiffies; 1585 long wrote = 0; 1586 1587 while (!list_empty(&wb->b_io)) { 1588 struct inode *inode = wb_inode(wb->b_io.prev); 1589 struct super_block *sb = inode->i_sb; 1590 1591 if (!trylock_super(sb)) { 1592 /* 1593 * trylock_super() may fail consistently due to 1594 * s_umount being grabbed by someone else. Don't use 1595 * requeue_io() to avoid busy retrying the inode/sb. 1596 */ 1597 redirty_tail(inode, wb); 1598 continue; 1599 } 1600 wrote += writeback_sb_inodes(sb, wb, work); 1601 up_read(&sb->s_umount); 1602 1603 /* refer to the same tests at the end of writeback_sb_inodes */ 1604 if (wrote) { 1605 if (time_is_before_jiffies(start_time + HZ / 10UL)) 1606 break; 1607 if (work->nr_pages <= 0) 1608 break; 1609 } 1610 } 1611 /* Leave any unwritten inodes on b_io */ 1612 return wrote; 1613 } 1614 1615 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, 1616 enum wb_reason reason) 1617 { 1618 struct wb_writeback_work work = { 1619 .nr_pages = nr_pages, 1620 .sync_mode = WB_SYNC_NONE, 1621 .range_cyclic = 1, 1622 .reason = reason, 1623 }; 1624 struct blk_plug plug; 1625 1626 blk_start_plug(&plug); 1627 spin_lock(&wb->list_lock); 1628 if (list_empty(&wb->b_io)) 1629 queue_io(wb, &work); 1630 __writeback_inodes_wb(wb, &work); 1631 spin_unlock(&wb->list_lock); 1632 blk_finish_plug(&plug); 1633 1634 return nr_pages - work.nr_pages; 1635 } 1636 1637 /* 1638 * Explicit flushing or periodic writeback of "old" data. 1639 * 1640 * Define "old": the first time one of an inode's pages is dirtied, we mark the 1641 * dirtying-time in the inode's address_space. So this periodic writeback code 1642 * just walks the superblock inode list, writing back any inodes which are 1643 * older than a specific point in time. 1644 * 1645 * Try to run once per dirty_writeback_interval. But if a writeback event 1646 * takes longer than a dirty_writeback_interval interval, then leave a 1647 * one-second gap. 1648 * 1649 * older_than_this takes precedence over nr_to_write. So we'll only write back 1650 * all dirty pages if they are all attached to "old" mappings. 1651 */ 1652 static long wb_writeback(struct bdi_writeback *wb, 1653 struct wb_writeback_work *work) 1654 { 1655 unsigned long wb_start = jiffies; 1656 long nr_pages = work->nr_pages; 1657 unsigned long oldest_jif; 1658 struct inode *inode; 1659 long progress; 1660 struct blk_plug plug; 1661 1662 oldest_jif = jiffies; 1663 work->older_than_this = &oldest_jif; 1664 1665 blk_start_plug(&plug); 1666 spin_lock(&wb->list_lock); 1667 for (;;) { 1668 /* 1669 * Stop writeback when nr_pages has been consumed 1670 */ 1671 if (work->nr_pages <= 0) 1672 break; 1673 1674 /* 1675 * Background writeout and kupdate-style writeback may 1676 * run forever. Stop them if there is other work to do 1677 * so that e.g. sync can proceed. They'll be restarted 1678 * after the other works are all done. 1679 */ 1680 if ((work->for_background || work->for_kupdate) && 1681 !list_empty(&wb->work_list)) 1682 break; 1683 1684 /* 1685 * For background writeout, stop when we are below the 1686 * background dirty threshold 1687 */ 1688 if (work->for_background && !wb_over_bg_thresh(wb)) 1689 break; 1690 1691 /* 1692 * Kupdate and background works are special and we want to 1693 * include all inodes that need writing. Livelock avoidance is 1694 * handled by these works yielding to any other work so we are 1695 * safe. 1696 */ 1697 if (work->for_kupdate) { 1698 oldest_jif = jiffies - 1699 msecs_to_jiffies(dirty_expire_interval * 10); 1700 } else if (work->for_background) 1701 oldest_jif = jiffies; 1702 1703 trace_writeback_start(wb, work); 1704 if (list_empty(&wb->b_io)) 1705 queue_io(wb, work); 1706 if (work->sb) 1707 progress = writeback_sb_inodes(work->sb, wb, work); 1708 else 1709 progress = __writeback_inodes_wb(wb, work); 1710 trace_writeback_written(wb, work); 1711 1712 wb_update_bandwidth(wb, wb_start); 1713 1714 /* 1715 * Did we write something? Try for more 1716 * 1717 * Dirty inodes are moved to b_io for writeback in batches. 1718 * The completion of the current batch does not necessarily 1719 * mean the overall work is done. So we keep looping as long 1720 * as made some progress on cleaning pages or inodes. 1721 */ 1722 if (progress) 1723 continue; 1724 /* 1725 * No more inodes for IO, bail 1726 */ 1727 if (list_empty(&wb->b_more_io)) 1728 break; 1729 /* 1730 * Nothing written. Wait for some inode to 1731 * become available for writeback. Otherwise 1732 * we'll just busyloop. 1733 */ 1734 if (!list_empty(&wb->b_more_io)) { 1735 trace_writeback_wait(wb, work); 1736 inode = wb_inode(wb->b_more_io.prev); 1737 spin_lock(&inode->i_lock); 1738 spin_unlock(&wb->list_lock); 1739 /* This function drops i_lock... */ 1740 inode_sleep_on_writeback(inode); 1741 spin_lock(&wb->list_lock); 1742 } 1743 } 1744 spin_unlock(&wb->list_lock); 1745 blk_finish_plug(&plug); 1746 1747 return nr_pages - work->nr_pages; 1748 } 1749 1750 /* 1751 * Return the next wb_writeback_work struct that hasn't been processed yet. 1752 */ 1753 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) 1754 { 1755 struct wb_writeback_work *work = NULL; 1756 1757 spin_lock_bh(&wb->work_lock); 1758 if (!list_empty(&wb->work_list)) { 1759 work = list_entry(wb->work_list.next, 1760 struct wb_writeback_work, list); 1761 list_del_init(&work->list); 1762 } 1763 spin_unlock_bh(&wb->work_lock); 1764 return work; 1765 } 1766 1767 /* 1768 * Add in the number of potentially dirty inodes, because each inode 1769 * write can dirty pagecache in the underlying blockdev. 1770 */ 1771 static unsigned long get_nr_dirty_pages(void) 1772 { 1773 return global_page_state(NR_FILE_DIRTY) + 1774 global_page_state(NR_UNSTABLE_NFS) + 1775 get_nr_dirty_inodes(); 1776 } 1777 1778 static long wb_check_background_flush(struct bdi_writeback *wb) 1779 { 1780 if (wb_over_bg_thresh(wb)) { 1781 1782 struct wb_writeback_work work = { 1783 .nr_pages = LONG_MAX, 1784 .sync_mode = WB_SYNC_NONE, 1785 .for_background = 1, 1786 .range_cyclic = 1, 1787 .reason = WB_REASON_BACKGROUND, 1788 }; 1789 1790 return wb_writeback(wb, &work); 1791 } 1792 1793 return 0; 1794 } 1795 1796 static long wb_check_old_data_flush(struct bdi_writeback *wb) 1797 { 1798 unsigned long expired; 1799 long nr_pages; 1800 1801 /* 1802 * When set to zero, disable periodic writeback 1803 */ 1804 if (!dirty_writeback_interval) 1805 return 0; 1806 1807 expired = wb->last_old_flush + 1808 msecs_to_jiffies(dirty_writeback_interval * 10); 1809 if (time_before(jiffies, expired)) 1810 return 0; 1811 1812 wb->last_old_flush = jiffies; 1813 nr_pages = get_nr_dirty_pages(); 1814 1815 if (nr_pages) { 1816 struct wb_writeback_work work = { 1817 .nr_pages = nr_pages, 1818 .sync_mode = WB_SYNC_NONE, 1819 .for_kupdate = 1, 1820 .range_cyclic = 1, 1821 .reason = WB_REASON_PERIODIC, 1822 }; 1823 1824 return wb_writeback(wb, &work); 1825 } 1826 1827 return 0; 1828 } 1829 1830 /* 1831 * Retrieve work items and do the writeback they describe 1832 */ 1833 static long wb_do_writeback(struct bdi_writeback *wb) 1834 { 1835 struct wb_writeback_work *work; 1836 long wrote = 0; 1837 1838 set_bit(WB_writeback_running, &wb->state); 1839 while ((work = get_next_work_item(wb)) != NULL) { 1840 struct wb_completion *done = work->done; 1841 1842 trace_writeback_exec(wb, work); 1843 1844 wrote += wb_writeback(wb, work); 1845 1846 if (work->auto_free) 1847 kfree(work); 1848 if (done && atomic_dec_and_test(&done->cnt)) 1849 wake_up_all(&wb->bdi->wb_waitq); 1850 } 1851 1852 /* 1853 * Check for periodic writeback, kupdated() style 1854 */ 1855 wrote += wb_check_old_data_flush(wb); 1856 wrote += wb_check_background_flush(wb); 1857 clear_bit(WB_writeback_running, &wb->state); 1858 1859 return wrote; 1860 } 1861 1862 /* 1863 * Handle writeback of dirty data for the device backed by this bdi. Also 1864 * reschedules periodically and does kupdated style flushing. 1865 */ 1866 void wb_workfn(struct work_struct *work) 1867 { 1868 struct bdi_writeback *wb = container_of(to_delayed_work(work), 1869 struct bdi_writeback, dwork); 1870 long pages_written; 1871 1872 set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); 1873 current->flags |= PF_SWAPWRITE; 1874 1875 if (likely(!current_is_workqueue_rescuer() || 1876 !test_bit(WB_registered, &wb->state))) { 1877 /* 1878 * The normal path. Keep writing back @wb until its 1879 * work_list is empty. Note that this path is also taken 1880 * if @wb is shutting down even when we're running off the 1881 * rescuer as work_list needs to be drained. 1882 */ 1883 do { 1884 pages_written = wb_do_writeback(wb); 1885 trace_writeback_pages_written(pages_written); 1886 } while (!list_empty(&wb->work_list)); 1887 } else { 1888 /* 1889 * bdi_wq can't get enough workers and we're running off 1890 * the emergency worker. Don't hog it. Hopefully, 1024 is 1891 * enough for efficient IO. 1892 */ 1893 pages_written = writeback_inodes_wb(wb, 1024, 1894 WB_REASON_FORKER_THREAD); 1895 trace_writeback_pages_written(pages_written); 1896 } 1897 1898 if (!list_empty(&wb->work_list)) 1899 mod_delayed_work(bdi_wq, &wb->dwork, 0); 1900 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) 1901 wb_wakeup_delayed(wb); 1902 1903 current->flags &= ~PF_SWAPWRITE; 1904 } 1905 1906 /* 1907 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back 1908 * the whole world. 1909 */ 1910 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) 1911 { 1912 struct backing_dev_info *bdi; 1913 1914 if (!nr_pages) 1915 nr_pages = get_nr_dirty_pages(); 1916 1917 rcu_read_lock(); 1918 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1919 struct bdi_writeback *wb; 1920 1921 if (!bdi_has_dirty_io(bdi)) 1922 continue; 1923 1924 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 1925 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), 1926 false, reason); 1927 } 1928 rcu_read_unlock(); 1929 } 1930 1931 /* 1932 * Wake up bdi's periodically to make sure dirtytime inodes gets 1933 * written back periodically. We deliberately do *not* check the 1934 * b_dirtytime list in wb_has_dirty_io(), since this would cause the 1935 * kernel to be constantly waking up once there are any dirtytime 1936 * inodes on the system. So instead we define a separate delayed work 1937 * function which gets called much more rarely. (By default, only 1938 * once every 12 hours.) 1939 * 1940 * If there is any other write activity going on in the file system, 1941 * this function won't be necessary. But if the only thing that has 1942 * happened on the file system is a dirtytime inode caused by an atime 1943 * update, we need this infrastructure below to make sure that inode 1944 * eventually gets pushed out to disk. 1945 */ 1946 static void wakeup_dirtytime_writeback(struct work_struct *w); 1947 static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); 1948 1949 static void wakeup_dirtytime_writeback(struct work_struct *w) 1950 { 1951 struct backing_dev_info *bdi; 1952 1953 rcu_read_lock(); 1954 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1955 struct bdi_writeback *wb; 1956 1957 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 1958 if (!list_empty(&wb->b_dirty_time)) 1959 wb_wakeup(wb); 1960 } 1961 rcu_read_unlock(); 1962 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1963 } 1964 1965 static int __init start_dirtytime_writeback(void) 1966 { 1967 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); 1968 return 0; 1969 } 1970 __initcall(start_dirtytime_writeback); 1971 1972 int dirtytime_interval_handler(struct ctl_table *table, int write, 1973 void __user *buffer, size_t *lenp, loff_t *ppos) 1974 { 1975 int ret; 1976 1977 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 1978 if (ret == 0 && write) 1979 mod_delayed_work(system_wq, &dirtytime_work, 0); 1980 return ret; 1981 } 1982 1983 static noinline void block_dump___mark_inode_dirty(struct inode *inode) 1984 { 1985 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 1986 struct dentry *dentry; 1987 const char *name = "?"; 1988 1989 dentry = d_find_alias(inode); 1990 if (dentry) { 1991 spin_lock(&dentry->d_lock); 1992 name = (const char *) dentry->d_name.name; 1993 } 1994 printk(KERN_DEBUG 1995 "%s(%d): dirtied inode %lu (%s) on %s\n", 1996 current->comm, task_pid_nr(current), inode->i_ino, 1997 name, inode->i_sb->s_id); 1998 if (dentry) { 1999 spin_unlock(&dentry->d_lock); 2000 dput(dentry); 2001 } 2002 } 2003 } 2004 2005 /** 2006 * __mark_inode_dirty - internal function 2007 * @inode: inode to mark 2008 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) 2009 * Mark an inode as dirty. Callers should use mark_inode_dirty or 2010 * mark_inode_dirty_sync. 2011 * 2012 * Put the inode on the super block's dirty list. 2013 * 2014 * CAREFUL! We mark it dirty unconditionally, but move it onto the 2015 * dirty list only if it is hashed or if it refers to a blockdev. 2016 * If it was not hashed, it will never be added to the dirty list 2017 * even if it is later hashed, as it will have been marked dirty already. 2018 * 2019 * In short, make sure you hash any inodes _before_ you start marking 2020 * them dirty. 2021 * 2022 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of 2023 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of 2024 * the kernel-internal blockdev inode represents the dirtying time of the 2025 * blockdev's pages. This is why for I_DIRTY_PAGES we always use 2026 * page->mapping->host, so the page-dirtying time is recorded in the internal 2027 * blockdev inode. 2028 */ 2029 void __mark_inode_dirty(struct inode *inode, int flags) 2030 { 2031 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 2032 struct super_block *sb = inode->i_sb; 2033 int dirtytime; 2034 2035 trace_writeback_mark_inode_dirty(inode, flags); 2036 2037 /* 2038 * Don't do this for I_DIRTY_PAGES - that doesn't actually 2039 * dirty the inode itself 2040 */ 2041 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) { 2042 trace_writeback_dirty_inode_start(inode, flags); 2043 2044 if (sb->s_op->dirty_inode) 2045 sb->s_op->dirty_inode(inode, flags); 2046 2047 trace_writeback_dirty_inode(inode, flags); 2048 } 2049 if (flags & I_DIRTY_INODE) 2050 flags &= ~I_DIRTY_TIME; 2051 dirtytime = flags & I_DIRTY_TIME; 2052 2053 /* 2054 * Paired with smp_mb() in __writeback_single_inode() for the 2055 * following lockless i_state test. See there for details. 2056 */ 2057 smp_mb(); 2058 2059 if (((inode->i_state & flags) == flags) || 2060 (dirtytime && (inode->i_state & I_DIRTY_INODE))) 2061 return; 2062 2063 if (unlikely(block_dump)) 2064 block_dump___mark_inode_dirty(inode); 2065 2066 spin_lock(&inode->i_lock); 2067 if (dirtytime && (inode->i_state & I_DIRTY_INODE)) 2068 goto out_unlock_inode; 2069 if ((inode->i_state & flags) != flags) { 2070 const int was_dirty = inode->i_state & I_DIRTY; 2071 2072 inode_attach_wb(inode, NULL); 2073 2074 if (flags & I_DIRTY_INODE) 2075 inode->i_state &= ~I_DIRTY_TIME; 2076 inode->i_state |= flags; 2077 2078 /* 2079 * If the inode is being synced, just update its dirty state. 2080 * The unlocker will place the inode on the appropriate 2081 * superblock list, based upon its state. 2082 */ 2083 if (inode->i_state & I_SYNC) 2084 goto out_unlock_inode; 2085 2086 /* 2087 * Only add valid (hashed) inodes to the superblock's 2088 * dirty list. Add blockdev inodes as well. 2089 */ 2090 if (!S_ISBLK(inode->i_mode)) { 2091 if (inode_unhashed(inode)) 2092 goto out_unlock_inode; 2093 } 2094 if (inode->i_state & I_FREEING) 2095 goto out_unlock_inode; 2096 2097 /* 2098 * If the inode was already on b_dirty/b_io/b_more_io, don't 2099 * reposition it (that would break b_dirty time-ordering). 2100 */ 2101 if (!was_dirty) { 2102 struct bdi_writeback *wb; 2103 struct list_head *dirty_list; 2104 bool wakeup_bdi = false; 2105 2106 wb = locked_inode_to_wb_and_lock_list(inode); 2107 2108 WARN(bdi_cap_writeback_dirty(wb->bdi) && 2109 !test_bit(WB_registered, &wb->state), 2110 "bdi-%s not registered\n", wb->bdi->name); 2111 2112 inode->dirtied_when = jiffies; 2113 if (dirtytime) 2114 inode->dirtied_time_when = jiffies; 2115 2116 if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES)) 2117 dirty_list = &wb->b_dirty; 2118 else 2119 dirty_list = &wb->b_dirty_time; 2120 2121 wakeup_bdi = inode_io_list_move_locked(inode, wb, 2122 dirty_list); 2123 2124 spin_unlock(&wb->list_lock); 2125 trace_writeback_dirty_inode_enqueue(inode); 2126 2127 /* 2128 * If this is the first dirty inode for this bdi, 2129 * we have to wake-up the corresponding bdi thread 2130 * to make sure background write-back happens 2131 * later. 2132 */ 2133 if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi) 2134 wb_wakeup_delayed(wb); 2135 return; 2136 } 2137 } 2138 out_unlock_inode: 2139 spin_unlock(&inode->i_lock); 2140 2141 #undef I_DIRTY_INODE 2142 } 2143 EXPORT_SYMBOL(__mark_inode_dirty); 2144 2145 /* 2146 * The @s_sync_lock is used to serialise concurrent sync operations 2147 * to avoid lock contention problems with concurrent wait_sb_inodes() calls. 2148 * Concurrent callers will block on the s_sync_lock rather than doing contending 2149 * walks. The queueing maintains sync(2) required behaviour as all the IO that 2150 * has been issued up to the time this function is enter is guaranteed to be 2151 * completed by the time we have gained the lock and waited for all IO that is 2152 * in progress regardless of the order callers are granted the lock. 2153 */ 2154 static void wait_sb_inodes(struct super_block *sb) 2155 { 2156 struct inode *inode, *old_inode = NULL; 2157 2158 /* 2159 * We need to be protected against the filesystem going from 2160 * r/o to r/w or vice versa. 2161 */ 2162 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2163 2164 mutex_lock(&sb->s_sync_lock); 2165 spin_lock(&sb->s_inode_list_lock); 2166 2167 /* 2168 * Data integrity sync. Must wait for all pages under writeback, 2169 * because there may have been pages dirtied before our sync 2170 * call, but which had writeout started before we write it out. 2171 * In which case, the inode may not be on the dirty list, but 2172 * we still have to wait for that writeout. 2173 */ 2174 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 2175 struct address_space *mapping = inode->i_mapping; 2176 2177 spin_lock(&inode->i_lock); 2178 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 2179 (mapping->nrpages == 0)) { 2180 spin_unlock(&inode->i_lock); 2181 continue; 2182 } 2183 __iget(inode); 2184 spin_unlock(&inode->i_lock); 2185 spin_unlock(&sb->s_inode_list_lock); 2186 2187 /* 2188 * We hold a reference to 'inode' so it couldn't have been 2189 * removed from s_inodes list while we dropped the 2190 * s_inode_list_lock. We cannot iput the inode now as we can 2191 * be holding the last reference and we cannot iput it under 2192 * s_inode_list_lock. So we keep the reference and iput it 2193 * later. 2194 */ 2195 iput(old_inode); 2196 old_inode = inode; 2197 2198 /* 2199 * We keep the error status of individual mapping so that 2200 * applications can catch the writeback error using fsync(2). 2201 * See filemap_fdatawait_keep_errors() for details. 2202 */ 2203 filemap_fdatawait_keep_errors(mapping); 2204 2205 cond_resched(); 2206 2207 spin_lock(&sb->s_inode_list_lock); 2208 } 2209 spin_unlock(&sb->s_inode_list_lock); 2210 iput(old_inode); 2211 mutex_unlock(&sb->s_sync_lock); 2212 } 2213 2214 static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, 2215 enum wb_reason reason, bool skip_if_busy) 2216 { 2217 DEFINE_WB_COMPLETION_ONSTACK(done); 2218 struct wb_writeback_work work = { 2219 .sb = sb, 2220 .sync_mode = WB_SYNC_NONE, 2221 .tagged_writepages = 1, 2222 .done = &done, 2223 .nr_pages = nr, 2224 .reason = reason, 2225 }; 2226 struct backing_dev_info *bdi = sb->s_bdi; 2227 2228 if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) 2229 return; 2230 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2231 2232 bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); 2233 wb_wait_for_completion(bdi, &done); 2234 } 2235 2236 /** 2237 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block 2238 * @sb: the superblock 2239 * @nr: the number of pages to write 2240 * @reason: reason why some writeback work initiated 2241 * 2242 * Start writeback on some inodes on this super_block. No guarantees are made 2243 * on how many (if any) will be written, and this function does not wait 2244 * for IO completion of submitted IO. 2245 */ 2246 void writeback_inodes_sb_nr(struct super_block *sb, 2247 unsigned long nr, 2248 enum wb_reason reason) 2249 { 2250 __writeback_inodes_sb_nr(sb, nr, reason, false); 2251 } 2252 EXPORT_SYMBOL(writeback_inodes_sb_nr); 2253 2254 /** 2255 * writeback_inodes_sb - writeback dirty inodes from given super_block 2256 * @sb: the superblock 2257 * @reason: reason why some writeback work was initiated 2258 * 2259 * Start writeback on some inodes on this super_block. No guarantees are made 2260 * on how many (if any) will be written, and this function does not wait 2261 * for IO completion of submitted IO. 2262 */ 2263 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 2264 { 2265 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 2266 } 2267 EXPORT_SYMBOL(writeback_inodes_sb); 2268 2269 /** 2270 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway 2271 * @sb: the superblock 2272 * @nr: the number of pages to write 2273 * @reason: the reason of writeback 2274 * 2275 * Invoke writeback_inodes_sb_nr if no writeback is currently underway. 2276 * Returns 1 if writeback was started, 0 if not. 2277 */ 2278 bool try_to_writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr, 2279 enum wb_reason reason) 2280 { 2281 if (!down_read_trylock(&sb->s_umount)) 2282 return false; 2283 2284 __writeback_inodes_sb_nr(sb, nr, reason, true); 2285 up_read(&sb->s_umount); 2286 return true; 2287 } 2288 EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); 2289 2290 /** 2291 * try_to_writeback_inodes_sb - try to start writeback if none underway 2292 * @sb: the superblock 2293 * @reason: reason why some writeback work was initiated 2294 * 2295 * Implement by try_to_writeback_inodes_sb_nr() 2296 * Returns 1 if writeback was started, 0 if not. 2297 */ 2298 bool try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) 2299 { 2300 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); 2301 } 2302 EXPORT_SYMBOL(try_to_writeback_inodes_sb); 2303 2304 /** 2305 * sync_inodes_sb - sync sb inode pages 2306 * @sb: the superblock 2307 * 2308 * This function writes and waits on any dirty inode belonging to this 2309 * super_block. 2310 */ 2311 void sync_inodes_sb(struct super_block *sb) 2312 { 2313 DEFINE_WB_COMPLETION_ONSTACK(done); 2314 struct wb_writeback_work work = { 2315 .sb = sb, 2316 .sync_mode = WB_SYNC_ALL, 2317 .nr_pages = LONG_MAX, 2318 .range_cyclic = 0, 2319 .done = &done, 2320 .reason = WB_REASON_SYNC, 2321 .for_sync = 1, 2322 }; 2323 struct backing_dev_info *bdi = sb->s_bdi; 2324 2325 /* 2326 * Can't skip on !bdi_has_dirty() because we should wait for !dirty 2327 * inodes under writeback and I_DIRTY_TIME inodes ignored by 2328 * bdi_has_dirty() need to be written out too. 2329 */ 2330 if (bdi == &noop_backing_dev_info) 2331 return; 2332 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2333 2334 bdi_split_work_to_wbs(bdi, &work, false); 2335 wb_wait_for_completion(bdi, &done); 2336 2337 wait_sb_inodes(sb); 2338 } 2339 EXPORT_SYMBOL(sync_inodes_sb); 2340 2341 /** 2342 * write_inode_now - write an inode to disk 2343 * @inode: inode to write to disk 2344 * @sync: whether the write should be synchronous or not 2345 * 2346 * This function commits an inode to disk immediately if it is dirty. This is 2347 * primarily needed by knfsd. 2348 * 2349 * The caller must either have a ref on the inode or must have set I_WILL_FREE. 2350 */ 2351 int write_inode_now(struct inode *inode, int sync) 2352 { 2353 struct writeback_control wbc = { 2354 .nr_to_write = LONG_MAX, 2355 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 2356 .range_start = 0, 2357 .range_end = LLONG_MAX, 2358 }; 2359 2360 if (!mapping_cap_writeback_dirty(inode->i_mapping)) 2361 wbc.nr_to_write = 0; 2362 2363 might_sleep(); 2364 return writeback_single_inode(inode, &wbc); 2365 } 2366 EXPORT_SYMBOL(write_inode_now); 2367 2368 /** 2369 * sync_inode - write an inode and its pages to disk. 2370 * @inode: the inode to sync 2371 * @wbc: controls the writeback mode 2372 * 2373 * sync_inode() will write an inode and its pages to disk. It will also 2374 * correctly update the inode on its superblock's dirty inode lists and will 2375 * update inode->i_state. 2376 * 2377 * The caller must have a ref on the inode. 2378 */ 2379 int sync_inode(struct inode *inode, struct writeback_control *wbc) 2380 { 2381 return writeback_single_inode(inode, wbc); 2382 } 2383 EXPORT_SYMBOL(sync_inode); 2384 2385 /** 2386 * sync_inode_metadata - write an inode to disk 2387 * @inode: the inode to sync 2388 * @wait: wait for I/O to complete. 2389 * 2390 * Write an inode to disk and adjust its dirty state after completion. 2391 * 2392 * Note: only writes the actual inode, no associated data or other metadata. 2393 */ 2394 int sync_inode_metadata(struct inode *inode, int wait) 2395 { 2396 struct writeback_control wbc = { 2397 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 2398 .nr_to_write = 0, /* metadata-only */ 2399 }; 2400 2401 return sync_inode(inode, &wbc); 2402 } 2403 EXPORT_SYMBOL(sync_inode_metadata); 2404