Lines Matching refs:work
169 static void finish_writeback_work(struct wb_writeback_work *work) in finish_writeback_work() argument
171 struct wb_completion *done = work->done; in finish_writeback_work()
173 if (work->auto_free) in finish_writeback_work()
174 kfree(work); in finish_writeback_work()
185 struct wb_writeback_work *work) in wb_queue_work() argument
187 trace_writeback_queue(wb, work); in wb_queue_work()
189 if (work->done) in wb_queue_work()
190 atomic_inc(&work->done->cnt); in wb_queue_work()
195 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
198 finish_writeback_work(work); in wb_queue_work()
371 struct rcu_work work; member
489 static void inode_switch_wbs_work_fn(struct work_struct *work) in inode_switch_wbs_work_fn() argument
492 container_of(to_rcu_work(work), struct inode_switch_wbs_context, work); in inode_switch_wbs_work_fn()
628 INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn); in inode_switch_wbs()
629 queue_rcu_work(isw_wq, &isw->work); in inode_switch_wbs()
717 INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn); in cleanup_offline_cgwb()
718 queue_rcu_work(isw_wq, &isw->work); in cleanup_offline_cgwb()
1010 struct wb_writeback_work *work; in bdi_split_work_to_wbs() local
1028 work = kmalloc(sizeof(*work), GFP_ATOMIC); in bdi_split_work_to_wbs()
1029 if (work) { in bdi_split_work_to_wbs()
1030 *work = *base_work; in bdi_split_work_to_wbs()
1031 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
1032 work->auto_free = 1; in bdi_split_work_to_wbs()
1033 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
1048 work = &fallback_work; in bdi_split_work_to_wbs()
1049 *work = *base_work; in bdi_split_work_to_wbs()
1050 work->nr_pages = nr_pages; in bdi_split_work_to_wbs()
1051 work->auto_free = 0; in bdi_split_work_to_wbs()
1052 work->done = &fallback_work_done; in bdi_split_work_to_wbs()
1054 wb_queue_work(wb, work); in bdi_split_work_to_wbs()
1083 struct wb_writeback_work *work; in cgroup_writeback_by_id() local
1126 work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN); in cgroup_writeback_by_id()
1127 if (work) { in cgroup_writeback_by_id()
1128 work->nr_pages = dirty; in cgroup_writeback_by_id()
1129 work->sync_mode = WB_SYNC_NONE; in cgroup_writeback_by_id()
1130 work->range_cyclic = 1; in cgroup_writeback_by_id()
1131 work->reason = reason; in cgroup_writeback_by_id()
1132 work->done = done; in cgroup_writeback_by_id()
1133 work->auto_free = 1; in cgroup_writeback_by_id()
1134 wb_queue_work(wb, work); in cgroup_writeback_by_id()
1501 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, in queue_io() argument
1510 if (!work->for_sync) in queue_io()
1516 trace_writeback_queue_io(wb, work, dirtied_before, moved); in queue_io()
1839 struct wb_writeback_work *work) in writeback_chunk_size() argument
1856 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) in writeback_chunk_size()
1861 pages = min(pages, work->nr_pages); in writeback_chunk_size()
1880 struct wb_writeback_work *work) in writeback_sb_inodes() argument
1883 .sync_mode = work->sync_mode, in writeback_sb_inodes()
1884 .tagged_writepages = work->tagged_writepages, in writeback_sb_inodes()
1885 .for_kupdate = work->for_kupdate, in writeback_sb_inodes()
1886 .for_background = work->for_background, in writeback_sb_inodes()
1887 .for_sync = work->for_sync, in writeback_sb_inodes()
1888 .range_cyclic = work->range_cyclic, in writeback_sb_inodes()
1897 if (work->for_kupdate) in writeback_sb_inodes()
1907 if (work->sb) { in writeback_sb_inodes()
1968 write_chunk = writeback_chunk_size(wb, work); in writeback_sb_inodes()
1979 work->nr_pages -= write_chunk - wbc.nr_to_write; in writeback_sb_inodes()
2021 if (work->nr_pages <= 0) in writeback_sb_inodes()
2029 struct wb_writeback_work *work) in __writeback_inodes_wb() argument
2047 wrote += writeback_sb_inodes(sb, wb, work); in __writeback_inodes_wb()
2054 if (work->nr_pages <= 0) in __writeback_inodes_wb()
2065 struct wb_writeback_work work = { in writeback_inodes_wb() local
2076 queue_io(wb, &work, jiffies); in writeback_inodes_wb()
2077 __writeback_inodes_wb(wb, &work); in writeback_inodes_wb()
2081 return nr_pages - work.nr_pages; in writeback_inodes_wb()
2100 struct wb_writeback_work *work) in wb_writeback() argument
2102 long nr_pages = work->nr_pages; in wb_writeback()
2114 if (work->nr_pages <= 0) in wb_writeback()
2123 if ((work->for_background || work->for_kupdate) && in wb_writeback()
2131 if (work->for_background && !wb_over_bg_thresh(wb)) in wb_writeback()
2137 trace_writeback_start(wb, work); in wb_writeback()
2145 if (work->for_kupdate) { in wb_writeback()
2149 } else if (work->for_background) in wb_writeback()
2152 queue_io(wb, work, dirtied_before); in wb_writeback()
2155 if (work->sb) in wb_writeback()
2156 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
2158 progress = __writeback_inodes_wb(wb, work); in wb_writeback()
2159 trace_writeback_written(wb, work); in wb_writeback()
2187 trace_writeback_wait(wb, work); in wb_writeback()
2196 return nr_pages - work->nr_pages; in wb_writeback()
2204 struct wb_writeback_work *work = NULL; in get_next_work_item() local
2208 work = list_entry(wb->work_list.next, in get_next_work_item()
2210 list_del_init(&work->list); in get_next_work_item()
2213 return work; in get_next_work_item()
2220 struct wb_writeback_work work = { in wb_check_background_flush() local
2228 return wb_writeback(wb, &work); in wb_check_background_flush()
2254 struct wb_writeback_work work = { in wb_check_old_data_flush() local
2262 return wb_writeback(wb, &work); in wb_check_old_data_flush()
2277 struct wb_writeback_work work = { in wb_check_start_all() local
2284 nr_pages = wb_writeback(wb, &work); in wb_check_start_all()
2297 struct wb_writeback_work *work; in wb_do_writeback() local
2301 while ((work = get_next_work_item(wb)) != NULL) { in wb_do_writeback()
2302 trace_writeback_exec(wb, work); in wb_do_writeback()
2303 wrote += wb_writeback(wb, work); in wb_do_writeback()
2304 finish_writeback_work(work); in wb_do_writeback()
2326 void wb_workfn(struct work_struct *work) in wb_workfn() argument
2328 struct bdi_writeback *wb = container_of(to_delayed_work(work), in wb_workfn()
2721 struct wb_writeback_work work = { in __writeback_inodes_sb_nr() local
2734 bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy); in __writeback_inodes_sb_nr()
2799 struct wb_writeback_work work = { in sync_inodes_sb() local
2820 bdi_split_work_to_wbs(bdi, &work, false); in sync_inodes_sb()