fs-writeback.c (9022ca6b1129da44e3d5c4fa779b8bb9ceabe2ce) fs-writeback.c (b93b016313b3ba8003c3b8bb71f569af91f19fc7)
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the

--- 333 unchanged lines hidden (view full) ---

342 struct radix_tree_iter iter;
343 bool switched = false;
344 void **slot;
345
346 /*
347 * By the time control reaches here, RCU grace period has passed
348 * since I_WB_SWITCH assertion and all wb stat update transactions
349 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the

--- 333 unchanged lines hidden (view full) ---

342 struct radix_tree_iter iter;
343 bool switched = false;
344 void **slot;
345
346 /*
347 * By the time control reaches here, RCU grace period has passed
348 * since I_WB_SWITCH assertion and all wb stat update transactions
349 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
350 * synchronizing against mapping->tree_lock.
350 * synchronizing against the i_pages lock.
351 *
351 *
352 * Grabbing old_wb->list_lock, inode->i_lock and mapping->tree_lock
352 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
353 * gives us exclusion against all wb related operations on @inode
354 * including IO list manipulations and stat updates.
355 */
356 if (old_wb < new_wb) {
357 spin_lock(&old_wb->list_lock);
358 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
359 } else {
360 spin_lock(&new_wb->list_lock);
361 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
362 }
363 spin_lock(&inode->i_lock);
353 * gives us exclusion against all wb related operations on @inode
354 * including IO list manipulations and stat updates.
355 */
356 if (old_wb < new_wb) {
357 spin_lock(&old_wb->list_lock);
358 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
359 } else {
360 spin_lock(&new_wb->list_lock);
361 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
362 }
363 spin_lock(&inode->i_lock);
364 spin_lock_irq(&mapping->tree_lock);
364 xa_lock_irq(&mapping->i_pages);
365
366 /*
367 * Once I_FREEING is visible under i_lock, the eviction path owns
368 * the inode and we shouldn't modify ->i_io_list.
369 */
370 if (unlikely(inode->i_state & I_FREEING))
371 goto skip_switch;
372
373 /*
374 * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
375 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
365
366 /*
367 * Once I_FREEING is visible under i_lock, the eviction path owns
368 * the inode and we shouldn't modify ->i_io_list.
369 */
370 if (unlikely(inode->i_state & I_FREEING))
371 goto skip_switch;
372
373 /*
374 * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
375 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
376 * pages actually under underwriteback.
376 * pages actually under writeback.
377 */
377 */
378 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
378 radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
379 PAGECACHE_TAG_DIRTY) {
380 struct page *page = radix_tree_deref_slot_protected(slot,
379 PAGECACHE_TAG_DIRTY) {
380 struct page *page = radix_tree_deref_slot_protected(slot,
381 &mapping->tree_lock);
381 &mapping->i_pages.xa_lock);
382 if (likely(page) && PageDirty(page)) {
383 dec_wb_stat(old_wb, WB_RECLAIMABLE);
384 inc_wb_stat(new_wb, WB_RECLAIMABLE);
385 }
386 }
387
382 if (likely(page) && PageDirty(page)) {
383 dec_wb_stat(old_wb, WB_RECLAIMABLE);
384 inc_wb_stat(new_wb, WB_RECLAIMABLE);
385 }
386 }
387
388 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
388 radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, 0,
389 PAGECACHE_TAG_WRITEBACK) {
390 struct page *page = radix_tree_deref_slot_protected(slot,
389 PAGECACHE_TAG_WRITEBACK) {
390 struct page *page = radix_tree_deref_slot_protected(slot,
391 &mapping->tree_lock);
391 &mapping->i_pages.xa_lock);
392 if (likely(page)) {
393 WARN_ON_ONCE(!PageWriteback(page));
394 dec_wb_stat(old_wb, WB_WRITEBACK);
395 inc_wb_stat(new_wb, WB_WRITEBACK);
396 }
397 }
398
399 wb_get(new_wb);

--- 25 unchanged lines hidden (view full) ---

425 switched = true;
426skip_switch:
427 /*
428 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
429 * ensures that the new wb is visible if they see !I_WB_SWITCH.
430 */
431 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
432
392 if (likely(page)) {
393 WARN_ON_ONCE(!PageWriteback(page));
394 dec_wb_stat(old_wb, WB_WRITEBACK);
395 inc_wb_stat(new_wb, WB_WRITEBACK);
396 }
397 }
398
399 wb_get(new_wb);

--- 25 unchanged lines hidden (view full) ---

425 switched = true;
426skip_switch:
427 /*
428 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
429 * ensures that the new wb is visible if they see !I_WB_SWITCH.
430 */
431 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
432
433 spin_unlock_irq(&mapping->tree_lock);
433 xa_unlock_irq(&mapping->i_pages);
434 spin_unlock(&inode->i_lock);
435 spin_unlock(&new_wb->list_lock);
436 spin_unlock(&old_wb->list_lock);
437
438 if (switched) {
439 wb_wakeup(new_wb);
440 wb_put(old_wb);
441 }

--- 59 unchanged lines hidden (view full) ---

501 spin_unlock(&inode->i_lock);
502
503 isw->inode = inode;
504
505 atomic_inc(&isw_nr_in_flight);
506
507 /*
508 * In addition to synchronizing among switchers, I_WB_SWITCH tells
434 spin_unlock(&inode->i_lock);
435 spin_unlock(&new_wb->list_lock);
436 spin_unlock(&old_wb->list_lock);
437
438 if (switched) {
439 wb_wakeup(new_wb);
440 wb_put(old_wb);
441 }

--- 59 unchanged lines hidden (view full) ---

501 spin_unlock(&inode->i_lock);
502
503 isw->inode = inode;
504
505 atomic_inc(&isw_nr_in_flight);
506
507 /*
508 * In addition to synchronizing among switchers, I_WB_SWITCH tells
509 * the RCU protected stat update paths to grab the mapping's
510 * tree_lock so that stat transfer can synchronize against them.
509 * the RCU protected stat update paths to grab the i_page
510 * lock so that stat transfer can synchronize against them.
511 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
512 */
513 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
514 return;
515
516out_free:
517 if (isw->new_wb)
518 wb_put(isw->new_wb);

--- 1973 unchanged lines hidden ---
511 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
512 */
513 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
514 return;
515
516out_free:
517 if (isw->new_wb)
518 wb_put(isw->new_wb);

--- 1973 unchanged lines hidden ---