Lines Matching refs:wbc
103 struct writeback_control *wbc; member
716 if (bio_ctrl->wbc) { in alloc_new_bio()
734 wbc_init_bio(bio_ctrl->wbc, &bbio->bio); in alloc_new_bio()
785 if (bio_ctrl->wbc) in submit_extent_folio()
786 wbc_account_cgroup_owner(bio_ctrl->wbc, folio, in submit_extent_folio()
1159 struct writeback_control *wbc = bio_ctrl->wbc; in writepage_delalloc() local
1258 wbc); in writepage_delalloc()
1337 wbc->nr_to_write -= delalloc_to_write; in writepage_delalloc()
1341 if (wbc->nr_to_write < delalloc_to_write) { in writepage_delalloc()
1346 wbc->nr_to_write = min_t(u64, delalloc_to_write, in writepage_delalloc()
1447 folio_redirty_for_writepage(bio_ctrl->wbc, folio); in extent_writepage_io()
1534 trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc); in extent_writepage()
1575 bio_ctrl->wbc->nr_to_write--; in extent_writepage()
1597 struct writeback_control *wbc) in lock_extent_buffer_for_io() argument
1605 if (wbc->sync_mode != WB_SYNC_ALL) in lock_extent_buffer_for_io()
1780 struct writeback_control *wbc) in write_one_eb() argument
1788 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), in write_one_eb()
1792 wbc_init_bio(wbc, &bbio->bio); in write_one_eb()
1804 wbc->nr_to_write--; in write_one_eb()
1809 wbc_account_cgroup_owner(wbc, folio, eb->len); in write_one_eb()
1823 wbc_account_cgroup_owner(wbc, folio, eb->folio_size); in write_one_eb()
1824 wbc->nr_to_write -= folio_nr_pages(folio); in write_one_eb()
1845 static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc) in submit_eb_subpage() argument
1897 if (lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_subpage()
1898 write_one_eb(eb, wbc); in submit_eb_subpage()
1928 struct writeback_control *wbc = ctx->wbc; in submit_eb_page() local
1937 return submit_eb_subpage(folio, wbc); in submit_eb_page()
1975 if (!lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_page()
1985 write_one_eb(eb, wbc); in submit_eb_page()
1991 struct writeback_control *wbc) in btree_write_cache_pages() argument
1993 struct btrfs_eb_write_context ctx = { .wbc = wbc }; in btree_write_cache_pages()
2006 if (wbc->range_cyclic) { in btree_write_cache_pages()
2015 index = wbc->range_start >> PAGE_SHIFT; in btree_write_cache_pages()
2016 end = wbc->range_end >> PAGE_SHIFT; in btree_write_cache_pages()
2019 if (wbc->sync_mode == WB_SYNC_ALL) in btree_write_cache_pages()
2025 if (wbc->sync_mode == WB_SYNC_ALL) in btree_write_cache_pages()
2048 nr_to_write_done = wbc->nr_to_write <= 0; in btree_write_cache_pages()
2121 struct writeback_control *wbc = bio_ctrl->wbc; in extent_write_cache_pages() local
2148 if (wbc->range_cyclic) { in extent_write_cache_pages()
2157 index = wbc->range_start >> PAGE_SHIFT; in extent_write_cache_pages()
2158 end = wbc->range_end >> PAGE_SHIFT; in extent_write_cache_pages()
2159 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in extent_write_cache_pages()
2171 if (range_whole && wbc->nr_to_write == LONG_MAX && in extent_write_cache_pages()
2174 wbc->tagged_writepages = 1; in extent_write_cache_pages()
2176 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in extent_write_cache_pages()
2181 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in extent_write_cache_pages()
2235 if (wbc->sync_mode != WB_SYNC_NONE || in extent_write_cache_pages()
2259 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE && in extent_write_cache_pages()
2260 wbc->nr_to_write <= 0); in extent_write_cache_pages()
2283 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole)) in extent_write_cache_pages()
2296 u64 start, u64 end, struct writeback_control *wbc, in extent_write_locked_range() argument
2307 .wbc = wbc, in extent_write_locked_range()
2308 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), in extent_write_locked_range()
2311 if (wbc->no_cgroup_owner) in extent_write_locked_range()
2362 int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc) in btrfs_writepages() argument
2367 .wbc = wbc, in btrfs_writepages()
2368 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), in btrfs_writepages()