Home
last modified time | relevance | path

Searched refs:work (Results 1 – 25 of 3254) sorted by relevance

12345678910>>...131

/linux/fs/smb/server/
H A Dksmbd_work.c21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP); in ksmbd_alloc_work_struct() local
23 if (work) { in ksmbd_alloc_work_struct()
24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct()
27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct()
28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct()
29 INIT_LIST_HEAD(&work->interim_entry); in ksmbd_alloc_work_struct()
30 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct()
31 work->iov_alloc_cnt = 4; in ksmbd_alloc_work_struct()
[all …]
H A Dserver.c92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument
96 if (ksmbd_conn_exiting(work->conn) || in check_conn_state()
97 ksmbd_conn_need_reconnect(work->conn)) { in check_conn_state()
98 rsp_hdr = work->response_buf; in check_conn_state()
108 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument
115 if (check_conn_state(work)) in __process_request()
118 if (ksmbd_verify_smb_message(work)) { in __process_request()
119 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request()
123 command = conn->ops->get_cmd_val(work); in __process_request()
128 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request()
[all …]
H A Dsmb2pdu.h437 bool is_smb2_neg_cmd(struct ksmbd_work *work);
438 bool is_smb2_rsp(struct ksmbd_work *work);
440 u16 get_smb2_cmd_val(struct ksmbd_work *work);
441 void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
442 int init_smb2_rsp_hdr(struct ksmbd_work *work);
443 int smb2_allocate_rsp_buf(struct ksmbd_work *work);
444 bool is_chained_smb2_message(struct ksmbd_work *work);
445 int init_smb2_neg_rsp(struct ksmbd_work *work);
446 void smb2_set_err_rsp(struct ksmbd_work *work);
447 int smb2_check_user_session(struct ksmbd_work *work);
[all …]
H A Dsmb2pdu.c42 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument
44 if (work->next_smb2_rcv_hdr_off) { in __wbuf()
45 *req = ksmbd_req_buf_next(work); in __wbuf()
46 *rsp = ksmbd_resp_buf_next(work); in __wbuf()
48 *req = smb2_get_msg(work->request_buf); in __wbuf()
49 *rsp = smb2_get_msg(work->response_buf); in __wbuf()
90 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument
92 struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); in smb2_get_ksmbd_tcon()
103 if (xa_empty(&work->sess->tree_conns)) { in smb2_get_ksmbd_tcon()
114 if (work->next_smb2_rcv_hdr_off) { in smb2_get_ksmbd_tcon()
[all …]
H A Dksmbd_work.h86 struct work_struct work; member
99 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument
101 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next()
108 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument
110 return work->response_buf + work->curr_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_curr()
117 static inline void *ksmbd_req_buf_next(struct ksmbd_work *work) in ksmbd_req_buf_next() argument
119 return work->request_buf + work->next_smb2_rcv_hdr_off + 4; in ksmbd_req_buf_next()
123 void ksmbd_free_work_struct(struct ksmbd_work *work);
130 bool ksmbd_queue_work(struct ksmbd_work *work);
131 int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
[all …]
H A Dsmb_common.c135 int ksmbd_verify_smb_message(struct ksmbd_work *work) in ksmbd_verify_smb_message() argument
137 struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work); in ksmbd_verify_smb_message()
141 return ksmbd_smb2_check_message(work); in ksmbd_verify_smb_message()
143 hdr = work->request_buf; in ksmbd_verify_smb_message()
146 work->conn->outstanding_credits++; in ksmbd_verify_smb_message()
310 static u16 get_smb1_cmd_val(struct ksmbd_work *work) in get_smb1_cmd_val() argument
321 static int init_smb1_rsp_hdr(struct ksmbd_work *work) in init_smb1_rsp_hdr() argument
323 struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf; in init_smb1_rsp_hdr()
324 struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf; in init_smb1_rsp_hdr()
342 static int smb1_check_user_session(struct ksmbd_work *work) in smb1_check_user_session() argument
[all …]
/linux/virt/kvm/
H A Dasync_pf.c45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument
48 container_of(work, struct kvm_async_pf, work); in async_pf_execute()
99 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument
113 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work()
114 WARN_ON_ONCE(work->work.func); in kvm_flush_and_free_async_pf_work()
116 flush_work(&work->work); in kvm_flush_and_free_async_pf_work()
117 kmem_cache_free(async_pf_cache, work); in kvm_flush_and_free_async_pf_work()
124 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local
126 typeof(*work), queue); in kvm_clear_async_pf_completion_queue()
127 list_del(&work->queue); in kvm_clear_async_pf_completion_queue()
[all …]
/linux/drivers/gpu/drm/
H A Ddrm_flip_work.c46 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) in drm_flip_work_queue_task() argument
50 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task()
51 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
52 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task()
63 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument
70 drm_flip_work_queue_task(work, task); in drm_flip_work_queue()
72 DRM_ERROR("%s could not allocate task!\n", work->name); in drm_flip_work_queue()
73 work->func(work, val); in drm_flip_work_queue()
88 void drm_flip_work_commit(struct drm_flip_work *work, in drm_flip_work_commit() argument
93 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_commit()
[all …]
H A Ddrm_vblank_work.c48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local
54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works()
55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works()
58 list_del_init(&work->node); in drm_handle_vblank_works()
60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works()
72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local
79 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_vblank_cancel_pending_works()
80 list_del_init(&work->node); in drm_vblank_cancel_pending_works()
109 int drm_vblank_work_schedule(struct drm_vblank_work *work, in drm_vblank_work_schedule() argument
112 struct drm_vblank_crtc *vblank = work->vblank; in drm_vblank_work_schedule()
[all …]
/linux/include/trace/events/
H A Dworkqueue.h14 * workqueue_queue_work - called when a work gets queued
17 * @work: pointer to struct work_struct
19 * This event occurs when a work is queued immediately or once a
20 * delayed work is actually queued on a workqueue (ie: once the delay
26 struct work_struct *work),
28 TP_ARGS(req_cpu, pwq, work),
31 __field( void *, work )
39 __entry->work = work;
40 __entry->function = work
[all...]
/linux/kernel/
H A Dirq_work.c57 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument
61 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim()
79 static __always_inline void irq_work_raise(struct irq_work *work) in irq_work_raise() argument
82 trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); in irq_work_raise()
88 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument
95 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local()
107 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local()
112 irq_work_raise(work); in __irq_work_queue_local()
116 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument
119 if (!irq_work_claim(work)) in irq_work_queue()
[all …]
H A Dtask_work.c54 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument
75 kasan_record_aux_stack_noalloc(work); in task_work_add()
77 kasan_record_aux_stack(work); in task_work_add()
84 work->next = head; in task_work_add()
85 } while (!try_cmpxchg(&task->task_works, &head, work)); in task_work_add()
127 struct callback_head *work; in task_work_cancel_match() local
139 work = READ_ONCE(*pprev); in task_work_cancel_match()
140 while (work) { in task_work_cancel_match()
141 if (!match(work, data)) { in task_work_cancel_match()
142 pprev = &work->next; in task_work_cancel_match()
[all …]
H A Dkthread.c807 struct kthread_work *work; in kthread_worker_fn() local
830 work = NULL; in kthread_worker_fn()
833 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
835 list_del_init(&work->node); in kthread_worker_fn()
837 worker->current_work = work; in kthread_worker_fn()
840 if (work) { in kthread_worker_fn()
841 kthread_work_func_t func = work->func; in kthread_worker_fn()
843 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
844 work->func(work); in kthread_worker_fn()
849 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn()
[all …]
/linux/include/linux/
H A Dcompletion.h35 #define COMPLETION_INITIALIZER(work) \ argument
36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument
39 (*({ init_completion_map(&(work), &(map)); &(work); }))
41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument
42 (*({ init_completion(&work); &work; }))
52 #define DECLARE_COMPLETION(work) \ argument
53 struct completion work = COMPLETION_INITIALIZER(work)
68 # define DECLARE_COMPLETION_ONSTACK(work) \ argument
69 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
[all …]
H A Dworkqueue.h23 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
114 struct work_struct work; member
123 struct work_struct work; member
211 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument
213 return container_of(work, struct delayed_work, work); in to_delayed_work()
216 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument
218 return container_of(work, struct rcu_work, work); in to_rcu_work()
222 struct work_struct work; member
245 .work = __WORK_INITIALIZER((n).work, (f)), \
260 extern void __init_work(struct work_struct *work, int onstack);
[all …]
H A Dkthread.h116 typedef void (*kthread_work_func_t)(struct kthread_work *work);
141 struct kthread_work work; member
145 #define KTHREAD_WORK_INIT(work, fn) { \ argument
146 .node = LIST_HEAD_INIT((work).node), \
151 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
156 #define DEFINE_KTHREAD_WORK(work, fn) \ argument
157 struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
172 #define kthread_init_work(work, fn) \ argument
174 memset((work), 0, sizeof(struct kthread_work)); \
175 INIT_LIST_HEAD(&(work)->node); \
[all …]
H A Djump_label_ratelimit.h12 struct delayed_work work; member
18 struct delayed_work work; member
24 struct delayed_work work; member
28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
33 __static_key_deferred_flush((x), &(x)->work)
37 struct delayed_work *work,
39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
43 extern void jump_label_update_timeout(struct work_struct *work);
49 .work = __DELAYED_WORK_INITIALIZER((name).work, \
[all …]
/linux/tools/perf/
H A Dbuiltin-kwork.c314 struct kwork_work *work; in work_search() local
318 work = container_of(node, struct kwork_work, node); in work_search()
319 cmp = work_cmp(sort_list, key, work); in work_search()
325 if (work->name == NULL) in work_search()
326 work->name = key->name; in work_search()
327 return work; in work_search()
361 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local
363 if (work == NULL) { in work_new()
364 pr_err("Failed to zalloc kwork work\ in work_new()
382 struct kwork_work *work = work_search(root, key, sort_list); work_findnew() local
408 profile_name_match(struct perf_kwork * kwork,struct kwork_work * work) profile_name_match() argument
419 profile_event_match(struct perf_kwork * kwork,struct kwork_work * work,struct perf_sample * sample) profile_event_match() argument
457 struct kwork_work *work, key; work_push_atom() local
515 struct kwork_work *work, key; work_pop_atom() local
550 struct kwork_work *work; find_work_by_id() local
578 report_update_exit_event(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample) report_update_exit_event() argument
617 struct kwork_work *work = NULL; report_exit_event() local
633 latency_update_entry_event(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample) latency_update_entry_event() argument
672 struct kwork_work *work = NULL; latency_entry_event() local
738 timehist_print_event(struct perf_kwork * kwork,struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample,struct addr_location * al) timehist_print_event() argument
831 struct kwork_work *work = NULL; timehist_entry_event() local
852 struct kwork_work *work = NULL; timehist_exit_event() local
882 top_update_runtime(struct kwork_work * work,struct kwork_atom * atom,struct perf_sample * sample) top_update_runtime() argument
913 struct kwork_work *work, *sched_work; top_exit_event() local
944 struct kwork_work *work; top_sched_switch_event() local
1006 irq_work_init(struct perf_kwork * kwork,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused) irq_work_init() argument
1024 irq_work_name(struct kwork_work * work,char * buf,int len) irq_work_name() argument
1133 softirq_work_init(struct perf_kwork * kwork,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused) softirq_work_init() argument
1154 softirq_work_name(struct kwork_work * work,char * buf,int len) softirq_work_name() argument
1233 workqueue_work_init(struct perf_kwork * kwork __maybe_unused,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine) workqueue_work_init() argument
1250 workqueue_work_name(struct kwork_work * work,char * buf,int len) workqueue_work_name() argument
1301 sched_work_init(struct perf_kwork * kwork __maybe_unused,struct kwork_class * class,struct kwork_work * work,enum kwork_trace_type src_type,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused) sched_work_init() argument
1319 sched_work_name(struct kwork_work * work,char * buf,int len) sched_work_name() argument
1346 report_print_work(struct perf_kwork * kwork,struct kwork_work * work) report_print_work() argument
1637 top_print_work(struct perf_kwork * kwork __maybe_unused,struct kwork_work * work) top_print_work() argument
1837 process_skipped_events(struct perf_kwork * kwork,struct kwork_work * work) process_skipped_events() argument
1853 struct kwork_work *work = NULL; perf_kwork_add_work() local
1905 struct kwork_work *work; perf_kwork__report() local
1998 struct kwork_work *work; top_calc_total_runtime() local
2017 top_calc_idle_time(struct perf_kwork * kwork,struct kwork_work * work) top_calc_idle_time() argument
2029 top_calc_irq_runtime(struct perf_kwork * kwork,enum kwork_class_type type,struct kwork_work * work) top_calc_irq_runtime() argument
2043 top_subtract_irq_runtime(struct perf_kwork * kwork,struct kwork_work * work) top_subtract_irq_runtime() argument
2070 struct kwork_work *work; top_calc_cpu_usage() local
2099 top_calc_load_runtime(struct perf_kwork * kwork,struct kwork_work * work) top_calc_load_runtime() argument
2150 struct kwork_work *work; perf_kwork__top_report() local
[all...]
/linux/fs/btrfs/
H A Dasync-thread.c55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
57 return work->wq->fs_info; in btrfs_work_owner()
214 struct btrfs_work *work; in run_ordered_work() local
223 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
225 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
241 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
243 trace_btrfs_ordered_sched(work); in run_ordered_work()
245 work->ordered_func(work, false); in run_ordered_work()
249 list_del(&work->ordered_list); in run_ordered_work()
252 if (work == self) { in run_ordered_work()
[all …]
/linux/drivers/staging/octeon/
H A Dethernet-rx.c63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument
68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error()
70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error()
72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error()
81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error()
82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error()
99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error()
102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error()
111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error()
112 work->word1.len -= i + 5; in cvm_oct_check_rcv_error()
[all …]
/linux/net/wireless/
H A Ddebugfs.c115 struct wiphy_work work; member
131 struct wiphy_work *work) in wiphy_locked_debugfs_read_work() argument
133 struct debugfs_read_work *w = container_of(work, typeof(*w), work); in wiphy_locked_debugfs_read_work()
144 wiphy_work_cancel(w->wiphy, &w->work); in wiphy_locked_debugfs_read_cancel()
159 struct debugfs_read_work work = { in wiphy_locked_debugfs_read() local
167 .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), in wiphy_locked_debugfs_read()
171 .cancel_data = &work, in wiphy_locked_debugfs_read()
177 wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work); in wiphy_locked_debugfs_read()
178 wiphy_work_queue(wiphy, &work.work); in wiphy_locked_debugfs_read()
181 wait_for_completion(&work.completion); in wiphy_locked_debugfs_read()
[all …]
/linux/drivers/accessibility/speakup/
H A Dselection.c20 struct work_struct work; member
25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument
28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection()
58 .work = __WORK_INITIALIZER(speakup_sel_work.work,
87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection()
96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection()
106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument
109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection()
117 .work = __WORK_INITIALIZER(speakup_paste_work.work,
129 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); in speakup_paste_selection()
[all …]
/linux/io_uring/
H A Dio-wq.c67 struct work_struct work; member
163 struct io_wq_work *work) in io_work_get_acct() argument
165 return io_get_acct(wq, !(atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
455 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
457 return atomic_read(&work->flags) >> IO_WQ_HASH_SHIFT; in io_get_work_hash()
482 struct io_wq_work *work, *tail; in io_get_next_work() local
489 work = container_of(node, struct io_wq_work, list); in io_get_next_work()
492 if (!io_wq_is_hashed(work)) { in io_get_next_work()
494 return work; in io_get_next_work()
497 hash = io_get_work_hash(work); in io_get_next_work()
[all …]
/linux/drivers/net/wireless/st/cw1200/
H A Dsta.h59 void cw1200_event_handler(struct work_struct *work);
60 void cw1200_bss_loss_work(struct work_struct *work);
61 void cw1200_bss_params_work(struct work_struct *work);
62 void cw1200_keep_alive_work(struct work_struct *work);
63 void cw1200_tx_failure_work(struct work_struct *work);
79 void cw1200_join_timeout(struct work_struct *work);
80 void cw1200_unjoin_work(struct work_struct *work);
81 void cw1200_join_complete_work(struct work_struct *work);
82 void cw1200_wep_key_work(struct work_struct *work);
85 void cw1200_update_filtering_work(struct work_struct *work);
[all …]
/linux/fs/
H A Dfs-writeback.c169 static void finish_writeback_work(struct wb_writeback_work *work) in finish_writeback_work() argument
171 struct wb_completion *done = work->done; in finish_writeback_work()
173 if (work->auto_free) in finish_writeback_work()
174 kfree(work); in finish_writeback_work()
185 struct wb_writeback_work *work) in wb_queue_work() argument
187 trace_writeback_queue(wb, work); in wb_queue_work()
189 if (work->done) in wb_queue_work()
190 atomic_inc(&work->done->cnt); in wb_queue_work()
195 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
198 finish_writeback_work(work); in wb_queue_work()
[all …]

12345678910>>...131