Lines Matching refs:work
613 struct work_struct *work = addr; in work_is_static_object() local
615 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object()
624 struct work_struct *work = addr; in work_fixup_init() local
628 cancel_work_sync(work); in work_fixup_init()
629 debug_object_init(work, &work_debug_descr); in work_fixup_init()
642 struct work_struct *work = addr; in work_fixup_free() local
646 cancel_work_sync(work); in work_fixup_free()
647 debug_object_free(work, &work_debug_descr); in work_fixup_free()
662 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
664 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
667 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
669 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
672 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
675 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
677 debug_object_init(work, &work_debug_descr); in __init_work()
681 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
683 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
687 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
689 timer_destroy_on_stack(&work->timer); in destroy_delayed_work_on_stack()
690 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
695 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
696 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
786 static inline void set_work_data(struct work_struct *work, unsigned long data) in set_work_data() argument
788 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
789 atomic_long_set(&work->data, data | work_static(work)); in set_work_data()
792 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
795 set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING | in set_work_pwq()
799 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
802 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | in set_work_pool_and_keep_pending()
806 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
816 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | in set_work_pool_and_clear_pending()
854 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
856 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
879 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
881 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
1110 struct work_struct *work) in find_worker_executing_work() argument
1115 (unsigned long)work) in find_worker_executing_work()
1116 if (worker->current_work == work && in find_worker_executing_work()
1117 worker->current_func == work->func) in find_worker_executing_work()
1137 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1146 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1147 list_move_tail(&work->entry, head); in move_linked_works()
1148 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1177 static bool assign_work(struct work_struct *work, struct worker *worker, in assign_work() argument
1193 collision = find_worker_executing_work(pool, work); in assign_work()
1195 move_linked_works(work, &collision->scheduled, nextp); in assign_work()
1199 move_linked_works(work, &worker->scheduled, nextp); in assign_work()
1269 struct work_struct *work = list_first_entry(&pool->worklist, in kick_pool() local
1275 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; in kick_pool()
1674 struct work_struct *work) in __pwq_activate_work() argument
1676 unsigned long *wdb = work_data_bits(work); in __pwq_activate_work()
1679 trace_workqueue_activate_work(work); in __pwq_activate_work()
1682 move_linked_works(work, &pwq->pool->worklist, NULL); in __pwq_activate_work()
1787 struct work_struct *work = in pwq_activate_first_inactive() local
1791 if (work && pwq_tryinc_nr_active(pwq, fill)) { in pwq_activate_first_inactive()
1792 __pwq_activate_work(pwq, work); in pwq_activate_first_inactive()
1852 struct work_struct *work; in node_activate_pending_pwq() local
1884 work = list_first_entry_or_null(&pwq->inactive_works, in node_activate_pending_pwq()
1886 if (!work) { in node_activate_pending_pwq()
1900 __pwq_activate_work(pwq, work); in node_activate_pending_pwq()
2044 static int try_to_grab_pending(struct work_struct *work, u32 cflags, in try_to_grab_pending() argument
2054 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
2066 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
2074 pool = get_work_pool(work); in try_to_grab_pending()
2087 pwq = get_work_pwq(work); in try_to_grab_pending()
2089 unsigned long work_data = *work_data_bits(work); in try_to_grab_pending()
2091 debug_work_deactivate(work); in try_to_grab_pending()
2108 move_linked_works(work, &pwq->pool->worklist, NULL); in try_to_grab_pending()
2110 list_del_init(&work->entry); in try_to_grab_pending()
2116 set_work_pool_and_keep_pending(work, pool->id, in try_to_grab_pending()
2148 static bool work_grab_pending(struct work_struct *work, u32 cflags, in work_grab_pending() argument
2154 ret = try_to_grab_pending(work, cflags, irq_flags); in work_grab_pending()
2174 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
2177 debug_work_activate(work); in insert_work()
2180 kasan_record_aux_stack(work); in insert_work()
2183 set_work_pwq(work, pwq, extra_flags); in insert_work()
2184 list_add_tail(&work->entry, head); in insert_work()
2230 struct work_struct *work) in __queue_work() argument
2252 work->func, wq->name))) { in __queue_work()
2277 last_pool = get_work_pool(work); in __queue_work()
2283 worker = find_worker_executing_work(last_pool, work); in __queue_work()
2317 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
2319 if (WARN_ON(!list_empty(&work->entry))) in __queue_work()
2334 trace_workqueue_activate_work(work); in __queue_work()
2335 insert_work(pwq, work, &pool->worklist, work_flags); in __queue_work()
2339 insert_work(pwq, work, &pwq->inactive_works, work_flags); in __queue_work()
2347 static bool clear_pending_if_disabled(struct work_struct *work) in clear_pending_if_disabled() argument
2349 unsigned long data = *work_data_bits(work); in clear_pending_if_disabled()
2357 set_work_pool_and_clear_pending(work, offqd.pool_id, in clear_pending_if_disabled()
2377 struct work_struct *work) in queue_work_on() argument
2384 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_work_on()
2385 !clear_pending_if_disabled(work)) { in queue_work_on()
2386 __queue_work(cpu, wq, work); in queue_work_on()
2445 struct work_struct *work) in queue_work_node() argument
2463 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_work_node()
2464 !clear_pending_if_disabled(work)) { in queue_work_node()
2467 __queue_work(cpu, wq, work); in queue_work_node()
2481 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
2489 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
2494 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
2503 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
2546 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
2553 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_delayed_work_on()
2554 !clear_pending_if_disabled(work)) { in queue_delayed_work_on()
2588 ret = work_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, &irq_flags); in mod_delayed_work_on()
2590 if (!clear_pending_if_disabled(&dwork->work)) in mod_delayed_work_on()
2604 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); in rcu_work_rcufn()
2620 struct work_struct *work = &rwork->work; in queue_rcu_work() local
2626 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_rcu_work()
2627 !WARN_ON_ONCE(clear_pending_if_disabled(work))) { in queue_rcu_work()
2943 static void idle_cull_fn(struct work_struct *work) in idle_cull_fn() argument
2945 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); in idle_cull_fn()
2979 static void send_mayday(struct work_struct *work) in send_mayday() argument
2981 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday()
3006 struct work_struct *work; in pool_mayday_timeout() local
3018 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
3019 send_mayday(work); in pool_mayday_timeout()
3156 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
3160 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
3175 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
3182 debug_work_deactivate(work); in process_one_work()
3183 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
3184 worker->current_work = work; in process_one_work()
3185 worker->current_func = work->func; in process_one_work()
3189 work_data = *work_data_bits(work); in process_one_work()
3198 list_del_init(&work->entry); in process_one_work()
3223 set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool)); in process_one_work()
3256 trace_workqueue_execute_start(work); in process_one_work()
3257 worker->current_func(work); in process_one_work()
3262 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
3331 struct work_struct *work; in process_scheduled_works() local
3334 while ((work = list_first_entry_or_null(&worker->scheduled, in process_scheduled_works()
3340 process_one_work(worker, work); in process_scheduled_works()
3416 struct work_struct *work = in worker_thread() local
3420 if (assign_work(work, worker, NULL)) in worker_thread()
3443 struct work_struct *work, *n; in assign_rescuer_work() local
3453 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in assign_rescuer_work()
3454 if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n)) in assign_rescuer_work()
3606 struct work_struct *work = in bh_worker() local
3610 if (assign_work(work, worker, NULL)) in bh_worker()
3643 struct work_struct work; member
3648 static void drain_dead_softirq_workfn(struct work_struct *work) in drain_dead_softirq_workfn() argument
3651 container_of(work, struct wq_drain_dead_softirq_work, work); in drain_dead_softirq_workfn()
3680 queue_work(system_bh_highpri_wq, work); in drain_dead_softirq_workfn()
3682 queue_work(system_bh_wq, work); in drain_dead_softirq_workfn()
3707 INIT_WORK_ONSTACK(&dead_work.work, drain_dead_softirq_workfn); in workqueue_softirq_dead()
3712 queue_work(system_bh_highpri_wq, &dead_work.work); in workqueue_softirq_dead()
3714 queue_work(system_bh_wq, &dead_work.work); in workqueue_softirq_dead()
3717 destroy_work_on_stack(&dead_work.work); in workqueue_softirq_dead()
3758 struct work_struct work; member
3763 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
3765 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
3812 INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func, in insert_wq_barrier()
3814 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
3843 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
3948 static void touch_work_lockdep_map(struct work_struct *work, in touch_work_lockdep_map() argument
3955 lock_map_acquire(&work->lockdep_map); in touch_work_lockdep_map()
3956 lock_map_release(&work->lockdep_map); in touch_work_lockdep_map()
4175 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, in start_flush_work() argument
4184 pool = get_work_pool(work); in start_flush_work()
4192 pwq = get_work_pwq(work); in start_flush_work()
4197 worker = find_worker_executing_work(pool, work); in start_flush_work()
4204 check_flush_dependency(wq, work, from_cancel); in start_flush_work()
4206 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
4209 touch_work_lockdep_map(work, wq); in start_flush_work()
4231 static bool __flush_work(struct work_struct *work, bool from_cancel) in __flush_work() argument
4238 if (WARN_ON(!work->func)) in __flush_work()
4241 if (!start_flush_work(work, &barr, from_cancel)) in __flush_work()
4252 unsigned long data = *work_data_bits(work); in __flush_work()
4266 pool = get_work_pool(work); in __flush_work()
4280 destroy_work_on_stack(&barr.work); in __flush_work()
4295 bool flush_work(struct work_struct *work) in flush_work() argument
4298 return __flush_work(work, false); in flush_work()
4318 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
4320 return flush_work(&dwork->work); in flush_delayed_work()
4334 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { in flush_rcu_work()
4336 flush_work(&rwork->work); in flush_rcu_work()
4339 return flush_work(&rwork->work); in flush_rcu_work()
4362 static bool __cancel_work(struct work_struct *work, u32 cflags) in __cancel_work() argument
4368 ret = work_grab_pending(work, cflags, &irq_flags); in __cancel_work()
4370 work_offqd_unpack(&offqd, *work_data_bits(work)); in __cancel_work()
4375 set_work_pool_and_clear_pending(work, offqd.pool_id, in __cancel_work()
4381 static bool __cancel_work_sync(struct work_struct *work, u32 cflags) in __cancel_work_sync() argument
4385 ret = __cancel_work(work, cflags | WORK_CANCEL_DISABLE); in __cancel_work_sync()
4387 if (*work_data_bits(work) & WORK_OFFQ_BH) in __cancel_work_sync()
4397 __flush_work(work, true); in __cancel_work_sync()
4400 enable_work(work); in __cancel_work_sync()
4408 bool cancel_work(struct work_struct *work) in cancel_work() argument
4410 return __cancel_work(work, 0); in cancel_work()
4432 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
4434 return __cancel_work_sync(work, 0); in cancel_work_sync()
4456 return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED); in cancel_delayed_work()
4471 return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED); in cancel_delayed_work_sync()
4487 bool disable_work(struct work_struct *work) in disable_work() argument
4489 return __cancel_work(work, WORK_CANCEL_DISABLE); in disable_work()
4506 bool disable_work_sync(struct work_struct *work) in disable_work_sync() argument
4508 return __cancel_work_sync(work, WORK_CANCEL_DISABLE); in disable_work_sync()
4522 bool enable_work(struct work_struct *work) in enable_work() argument
4527 work_grab_pending(work, 0, &irq_flags); in enable_work()
4529 work_offqd_unpack(&offqd, *work_data_bits(work)); in enable_work()
4531 set_work_pool_and_clear_pending(work, offqd.pool_id, in enable_work()
4547 return __cancel_work(&dwork->work, in disable_delayed_work()
4560 return __cancel_work_sync(&dwork->work, in disable_delayed_work_sync()
4573 return enable_work(&dwork->work); in enable_delayed_work()
4600 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
4602 INIT_WORK(work, func); in schedule_on_each_cpu()
4603 schedule_work_on(cpu, work); in schedule_on_each_cpu()
4629 fn(&ew->work); in execute_in_process_context()
4633 INIT_WORK(&ew->work, fn); in execute_in_process_context()
4634 schedule_work(&ew->work); in execute_in_process_context()
5100 static void pwq_release_workfn(struct kthread_work *work) in pwq_release_workfn() argument
5102 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_release_workfn()
6102 unsigned int work_busy(struct work_struct *work) in work_busy() argument
6108 if (work_pending(work)) in work_busy()
6112 pool = get_work_pool(work); in work_busy()
6115 if (find_worker_executing_work(pool, work)) in work_busy()
6249 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) in pr_cont_work() argument
6251 if (work->func == wq_barrier_func) { in pr_cont_work()
6254 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
6262 pr_cont_work_flush(comma, work->func, pcwsp); in pr_cont_work()
6270 struct work_struct *work; in show_pwq() local
6299 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
6300 pr_cont_work(false, work, &pcws); in show_pwq()
6307 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6308 if (get_work_pwq(work) == pwq) { in show_pwq()
6317 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6318 if (get_work_pwq(work) != pwq) in show_pwq()
6321 pr_cont_work(comma, work, &pcws); in show_pwq()
6322 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
6332 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
6333 pr_cont_work(comma, work, &pcws); in show_pwq()
6334 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
6760 struct work_struct work; member
6766 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
6768 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
6790 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); in work_on_cpu_key()
6791 schedule_work_on(cpu, &wfc.work); in work_on_cpu_key()
6792 flush_work(&wfc.work); in work_on_cpu_key()
6793 destroy_work_on_stack(&wfc.work); in work_on_cpu_key()