Lines Matching refs:wb
81 static bool wb_io_lists_populated(struct bdi_writeback *wb) in wb_io_lists_populated() argument
83 if (wb_has_dirty_io(wb)) { in wb_io_lists_populated()
86 set_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_populated()
87 WARN_ON_ONCE(!wb->avg_write_bandwidth); in wb_io_lists_populated()
88 atomic_long_add(wb->avg_write_bandwidth, in wb_io_lists_populated()
89 &wb->bdi->tot_write_bandwidth); in wb_io_lists_populated()
94 static void wb_io_lists_depopulated(struct bdi_writeback *wb) in wb_io_lists_depopulated() argument
96 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && in wb_io_lists_depopulated()
97 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { in wb_io_lists_depopulated()
98 clear_bit(WB_has_dirty_io, &wb->state); in wb_io_lists_depopulated()
99 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, in wb_io_lists_depopulated()
100 &wb->bdi->tot_write_bandwidth) < 0); in wb_io_lists_depopulated()
115 struct bdi_writeback *wb, in inode_io_list_move_locked() argument
118 assert_spin_locked(&wb->list_lock); in inode_io_list_move_locked()
125 if (head != &wb->b_dirty_time) in inode_io_list_move_locked()
126 return wb_io_lists_populated(wb); in inode_io_list_move_locked()
128 wb_io_lists_depopulated(wb); in inode_io_list_move_locked()
132 static void wb_wakeup(struct bdi_writeback *wb) in wb_wakeup() argument
134 spin_lock_irq(&wb->work_lock); in wb_wakeup()
135 if (test_bit(WB_registered, &wb->state)) in wb_wakeup()
136 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_wakeup()
137 spin_unlock_irq(&wb->work_lock); in wb_wakeup()
154 static void wb_wakeup_delayed(struct bdi_writeback *wb) in wb_wakeup_delayed() argument
159 spin_lock_irq(&wb->work_lock); in wb_wakeup_delayed()
160 if (test_bit(WB_registered, &wb->state)) in wb_wakeup_delayed()
161 queue_delayed_work(bdi_wq, &wb->dwork, timeout); in wb_wakeup_delayed()
162 spin_unlock_irq(&wb->work_lock); in wb_wakeup_delayed()
180 static void wb_queue_work(struct bdi_writeback *wb, in wb_queue_work() argument
183 trace_writeback_queue(wb, work); in wb_queue_work()
188 spin_lock_irq(&wb->work_lock); in wb_queue_work()
190 if (test_bit(WB_registered, &wb->state)) { in wb_queue_work()
191 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
192 mod_delayed_work(bdi_wq, &wb->dwork, 0); in wb_queue_work()
196 spin_unlock_irq(&wb->work_lock); in wb_queue_work()
277 struct bdi_writeback *wb = NULL; in __inode_attach_wb() local
284 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); in __inode_attach_wb()
288 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); in __inode_attach_wb()
293 if (!wb) in __inode_attach_wb()
294 wb = &bdi->wb; in __inode_attach_wb()
300 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) in __inode_attach_wb()
301 wb_put(wb); in __inode_attach_wb()
313 struct bdi_writeback *wb) in inode_cgwb_move_to_attached() argument
315 assert_spin_locked(&wb->list_lock); in inode_cgwb_move_to_attached()
320 if (wb != &wb->bdi->wb) in inode_cgwb_move_to_attached()
321 list_move(&inode->i_io_list, &wb->b_attached); in inode_cgwb_move_to_attached()
324 wb_io_lists_depopulated(wb); in inode_cgwb_move_to_attached()
338 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
341 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list() local
349 wb_get(wb); in locked_inode_to_wb_and_lock_list()
351 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
354 if (likely(wb == inode->i_wb)) { in locked_inode_to_wb_and_lock_list()
355 wb_put(wb); /* @inode already has ref */ in locked_inode_to_wb_and_lock_list()
356 return wb; in locked_inode_to_wb_and_lock_list()
359 spin_unlock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
360 wb_put(wb); in locked_inode_to_wb_and_lock_list()
374 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
626 static void wb_queue_isw(struct bdi_writeback *wb, in wb_queue_isw() argument
629 if (llist_add(&isw->list, &wb->switch_wbs_ctxs)) in wb_queue_isw()
630 queue_work(isw_wq, &wb->switch_work); in wb_queue_isw()
719 bool cleanup_offline_cgwb(struct bdi_writeback *wb) in cleanup_offline_cgwb() argument
734 for (memcg_css = wb->memcg_css->parent; memcg_css; in cleanup_offline_cgwb()
736 new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL); in cleanup_offline_cgwb()
741 new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */ in cleanup_offline_cgwb()
744 spin_lock(&wb->list_lock); in cleanup_offline_cgwb()
753 restart = isw_prepare_wbs_switch(new_wb, isw, &wb->b_attached, &nr); in cleanup_offline_cgwb()
755 restart = isw_prepare_wbs_switch(new_wb, isw, &wb->b_dirty_time, in cleanup_offline_cgwb()
757 spin_unlock(&wb->list_lock); in cleanup_offline_cgwb()
767 trace_inode_switch_wbs_queue(wb, new_wb, nr); in cleanup_offline_cgwb()
792 wbc->wb = inode_to_wb(inode); in wbc_attach_and_unlock_inode()
795 wbc->wb_id = wbc->wb->memcg_css->id; in wbc_attach_and_unlock_inode()
802 wb_get(wbc->wb); in wbc_attach_and_unlock_inode()
812 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) in wbc_attach_and_unlock_inode()
873 struct bdi_writeback *wb = wbc->wb; in wbc_detach_inode() local
879 if (!wb) in wbc_detach_inode()
906 wb->avg_write_bandwidth); in wbc_detach_inode()
952 wb_put(wbc->wb); in wbc_detach_inode()
953 wbc->wb = NULL; in wbc_detach_inode()
979 if (!wbc->wb || wbc->no_cgroup_owner) in wbc_account_cgroup_owner()
1016 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument
1018 unsigned long this_bw = wb->avg_write_bandwidth; in wb_split_bdi_pages()
1019 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_split_bdi_pages()
1051 struct bdi_writeback *wb = list_entry(&bdi->wb_list, in bdi_split_work_to_wbs() local
1057 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { in bdi_split_work_to_wbs()
1069 if (!wb_has_dirty_io(wb) && in bdi_split_work_to_wbs()
1071 list_empty(&wb->b_dirty_time))) in bdi_split_work_to_wbs()
1073 if (skip_if_busy && writeback_in_progress(wb)) in bdi_split_work_to_wbs()
1076 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); in bdi_split_work_to_wbs()
1083 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
1094 if (!wb_tryget(wb)) in bdi_split_work_to_wbs()
1104 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
1105 last_wb = wb; in bdi_split_work_to_wbs()
1132 struct bdi_writeback *wb; in cgroup_writeback_by_id() local
1156 wb = wb_get_lookup(bdi, memcg_css); in cgroup_writeback_by_id()
1157 if (!wb) { in cgroup_writeback_by_id()
1184 wb_queue_work(wb, work); in cgroup_writeback_by_id()
1190 wb_put(wb); in cgroup_writeback_by_id()
1246 struct bdi_writeback *wb) in inode_cgwb_move_to_attached() argument
1248 assert_spin_locked(&wb->list_lock); in inode_cgwb_move_to_attached()
1254 wb_io_lists_depopulated(wb); in inode_cgwb_move_to_attached()
1260 __acquires(&wb->list_lock) in locked_inode_to_wb_and_lock_list()
1262 struct bdi_writeback *wb = inode_to_wb(inode); in locked_inode_to_wb_and_lock_list() local
1265 spin_lock(&wb->list_lock); in locked_inode_to_wb_and_lock_list()
1266 return wb; in locked_inode_to_wb_and_lock_list()
1270 __acquires(&wb->list_lock) in inode_to_wb_and_lock_list()
1272 struct bdi_writeback *wb = inode_to_wb(inode); in inode_to_wb_and_lock_list() local
1274 spin_lock(&wb->list_lock); in inode_to_wb_and_lock_list()
1275 return wb; in inode_to_wb_and_lock_list()
1278 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) in wb_split_bdi_pages() argument
1289 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { in bdi_split_work_to_wbs()
1291 wb_queue_work(&bdi->wb, base_work); in bdi_split_work_to_wbs()
1314 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) in wb_start_writeback() argument
1316 if (!wb_has_dirty_io(wb)) in wb_start_writeback()
1327 if (test_bit(WB_start_all, &wb->state) || in wb_start_writeback()
1328 test_and_set_bit(WB_start_all, &wb->state)) in wb_start_writeback()
1331 wb->start_all_reason = reason; in wb_start_writeback()
1332 wb_wakeup(wb); in wb_start_writeback()
1345 void wb_start_background_writeback(struct bdi_writeback *wb) in wb_start_background_writeback() argument
1351 trace_writeback_wake_background(wb); in wb_start_background_writeback()
1352 wb_wakeup(wb); in wb_start_background_writeback()
1360 struct bdi_writeback *wb; in inode_io_list_del() local
1369 wb = inode_to_wb_and_lock_list(inode); in inode_io_list_del()
1374 wb_io_lists_depopulated(wb); in inode_io_list_del()
1377 spin_unlock(&wb->list_lock); in inode_io_list_del()
1426 static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) in redirty_tail_locked() argument
1438 wb_io_lists_depopulated(wb); in redirty_tail_locked()
1441 if (!list_empty(&wb->b_dirty)) { in redirty_tail_locked()
1444 tail = wb_inode(wb->b_dirty.next); in redirty_tail_locked()
1448 inode_io_list_move_locked(inode, wb, &wb->b_dirty); in redirty_tail_locked()
1451 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) in redirty_tail() argument
1454 redirty_tail_locked(inode, wb); in redirty_tail()
1461 static void requeue_io(struct inode *inode, struct bdi_writeback *wb) in requeue_io() argument
1463 inode_io_list_move_locked(inode, wb, &wb->b_more_io); in requeue_io()
1558 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, in queue_io() argument
1564 assert_spin_locked(&wb->list_lock); in queue_io()
1565 list_splice_init(&wb->b_more_io, &wb->b_io); in queue_io()
1566 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); in queue_io()
1569 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, in queue_io()
1572 wb_io_lists_populated(wb); in queue_io()
1573 trace_writeback_queue_io(wb, work, dirtied_before, moved); in queue_io()
1648 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, in requeue_inode() argument
1672 redirty_tail_locked(inode, wb); in requeue_inode()
1674 inode_cgwb_move_to_attached(inode, wb); in requeue_inode()
1686 requeue_io(inode, wb); in requeue_inode()
1695 redirty_tail_locked(inode, wb); in requeue_inode()
1703 redirty_tail_locked(inode, wb); in requeue_inode()
1706 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); in requeue_inode()
1710 inode_cgwb_move_to_attached(inode, wb); in requeue_inode()
1823 struct bdi_writeback *wb; in writeback_single_inode() local
1862 wb = inode_to_wb_and_lock_list(inode); in writeback_single_inode()
1875 inode_cgwb_move_to_attached(inode, wb); in writeback_single_inode()
1878 redirty_tail_locked(inode, wb); in writeback_single_inode()
1882 wb, in writeback_single_inode()
1883 &wb->b_dirty_time); in writeback_single_inode()
1888 spin_unlock(&wb->list_lock); in writeback_single_inode()
1896 struct bdi_writeback *wb, struct wb_writeback_work *work) in writeback_chunk_size() argument
1916 pages = min(wb->avg_write_bandwidth / 2, in writeback_chunk_size()
1933 struct bdi_writeback *wb, in writeback_sb_inodes() argument
1955 while (!list_empty(&wb->b_io)) { in writeback_sb_inodes()
1956 struct inode *inode = wb_inode(wb->b_io.prev); in writeback_sb_inodes()
1967 redirty_tail(inode, wb); in writeback_sb_inodes()
1986 redirty_tail_locked(inode, wb); in writeback_sb_inodes()
2000 requeue_io(inode, wb); in writeback_sb_inodes()
2005 spin_unlock(&wb->list_lock); in writeback_sb_inodes()
2016 spin_lock(&wb->list_lock); in writeback_sb_inodes()
2022 write_chunk = writeback_chunk_size(inode->i_sb, wb, work); in writeback_sb_inodes()
2069 if (unlikely(tmp_wb != wb)) { in writeback_sb_inodes()
2071 spin_lock(&wb->list_lock); in writeback_sb_inodes()
2088 static long __writeback_inodes_wb(struct bdi_writeback *wb, in __writeback_inodes_wb() argument
2094 while (!list_empty(&wb->b_io)) { in __writeback_inodes_wb()
2095 struct inode *inode = wb_inode(wb->b_io.prev); in __writeback_inodes_wb()
2104 redirty_tail(inode, wb); in __writeback_inodes_wb()
2107 wrote += writeback_sb_inodes(sb, wb, work); in __writeback_inodes_wb()
2122 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, in writeback_inodes_wb() argument
2134 spin_lock(&wb->list_lock); in writeback_inodes_wb()
2135 if (list_empty(&wb->b_io)) in writeback_inodes_wb()
2136 queue_io(wb, &work, jiffies); in writeback_inodes_wb()
2137 __writeback_inodes_wb(wb, &work); in writeback_inodes_wb()
2138 spin_unlock(&wb->list_lock); in writeback_inodes_wb()
2159 static long wb_writeback(struct bdi_writeback *wb, in wb_writeback() argument
2184 !list_empty(&wb->work_list)) in wb_writeback()
2191 if (work->for_background && !wb_over_bg_thresh(wb)) in wb_writeback()
2195 spin_lock(&wb->list_lock); in wb_writeback()
2197 trace_writeback_start(wb, work); in wb_writeback()
2198 if (list_empty(&wb->b_io)) { in wb_writeback()
2212 queue_io(wb, work, dirtied_before); in wb_writeback()
2216 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
2218 progress = __writeback_inodes_wb(wb, work); in wb_writeback()
2219 trace_writeback_written(wb, work); in wb_writeback()
2230 spin_unlock(&wb->list_lock); in wb_writeback()
2237 if (list_empty(&wb->b_more_io)) { in wb_writeback()
2238 spin_unlock(&wb->list_lock); in wb_writeback()
2247 trace_writeback_wait(wb, work); in wb_writeback()
2248 inode = wb_inode(wb->b_more_io.prev); in wb_writeback()
2250 spin_unlock(&wb->list_lock); in wb_writeback()
2262 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) in get_next_work_item() argument
2266 spin_lock_irq(&wb->work_lock); in get_next_work_item()
2267 if (!list_empty(&wb->work_list)) { in get_next_work_item()
2268 work = list_entry(wb->work_list.next, in get_next_work_item()
2272 spin_unlock_irq(&wb->work_lock); in get_next_work_item()
2276 static long wb_check_background_flush(struct bdi_writeback *wb) in wb_check_background_flush() argument
2278 if (wb_over_bg_thresh(wb)) { in wb_check_background_flush()
2288 return wb_writeback(wb, &work); in wb_check_background_flush()
2294 static long wb_check_old_data_flush(struct bdi_writeback *wb) in wb_check_old_data_flush() argument
2305 expired = wb->last_old_flush + in wb_check_old_data_flush()
2310 wb->last_old_flush = jiffies; in wb_check_old_data_flush()
2322 return wb_writeback(wb, &work); in wb_check_old_data_flush()
2328 static long wb_check_start_all(struct bdi_writeback *wb) in wb_check_start_all() argument
2332 if (!test_bit(WB_start_all, &wb->state)) in wb_check_start_all()
2338 .nr_pages = wb_split_bdi_pages(wb, nr_pages), in wb_check_start_all()
2341 .reason = wb->start_all_reason, in wb_check_start_all()
2344 nr_pages = wb_writeback(wb, &work); in wb_check_start_all()
2347 clear_bit(WB_start_all, &wb->state); in wb_check_start_all()
2355 static long wb_do_writeback(struct bdi_writeback *wb) in wb_do_writeback() argument
2360 set_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
2361 while ((work = get_next_work_item(wb)) != NULL) { in wb_do_writeback()
2362 trace_writeback_exec(wb, work); in wb_do_writeback()
2363 wrote += wb_writeback(wb, work); in wb_do_writeback()
2370 wrote += wb_check_start_all(wb); in wb_do_writeback()
2375 wrote += wb_check_old_data_flush(wb); in wb_do_writeback()
2376 wrote += wb_check_background_flush(wb); in wb_do_writeback()
2377 clear_bit(WB_writeback_running, &wb->state); in wb_do_writeback()
2388 struct bdi_writeback *wb = container_of(to_delayed_work(work), in wb_workfn() local
2392 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); in wb_workfn()
2395 !test_bit(WB_registered, &wb->state))) { in wb_workfn()
2403 pages_written = wb_do_writeback(wb); in wb_workfn()
2405 } while (!list_empty(&wb->work_list)); in wb_workfn()
2412 pages_written = writeback_inodes_wb(wb, 1024, in wb_workfn()
2417 if (!list_empty(&wb->work_list)) in wb_workfn()
2418 wb_wakeup(wb); in wb_workfn()
2419 else if (wb_has_dirty_io(wb) && dirty_writeback_interval) in wb_workfn()
2420 wb_wakeup_delayed(wb); in wb_workfn()
2429 struct bdi_writeback *wb; in __wakeup_flusher_threads_bdi() local
2434 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in __wakeup_flusher_threads_bdi()
2435 wb_start_writeback(wb, reason); in __wakeup_flusher_threads_bdi()
2488 struct bdi_writeback *wb; in wakeup_dirtytime_writeback() local
2490 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) in wakeup_dirtytime_writeback()
2491 if (!list_empty(&wb->b_dirty_time)) in wakeup_dirtytime_writeback()
2492 wb_wakeup(wb); in wakeup_dirtytime_writeback()
2559 struct bdi_writeback *wb = NULL; in __mark_inode_dirty() local
2627 wb = locked_inode_to_wb_and_lock_list(inode); in __mark_inode_dirty()
2664 dirty_list = &wb->b_dirty; in __mark_inode_dirty()
2666 dirty_list = &wb->b_dirty_time; in __mark_inode_dirty()
2668 wakeup_bdi = inode_io_list_move_locked(inode, wb, in __mark_inode_dirty()
2678 (wb->bdi->capabilities & BDI_CAP_WRITEBACK)) in __mark_inode_dirty()
2679 wb_wakeup_delayed(wb); in __mark_inode_dirty()
2681 spin_unlock(&wb->list_lock); in __mark_inode_dirty()
2689 if (wb) in __mark_inode_dirty()
2690 spin_unlock(&wb->list_lock); in __mark_inode_dirty()