Lines Matching refs:worker

217 	struct worker		*manager;	/* L: purely informational */
350 struct worker *rescuer; /* MD: rescue worker */
581 #define for_each_pool_worker(worker, pool) \ argument
582 list_for_each_entry((worker), &(pool)->workers, node) \
972 static inline void worker_set_flags(struct worker *worker, unsigned int flags) in worker_set_flags() argument
974 struct worker_pool *pool = worker->pool; in worker_set_flags()
980 !(worker->flags & WORKER_NOT_RUNNING)) { in worker_set_flags()
984 worker->flags |= flags; in worker_set_flags()
994 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) in worker_clr_flags() argument
996 struct worker_pool *pool = worker->pool; in worker_clr_flags()
997 unsigned int oflags = worker->flags; in worker_clr_flags()
1001 worker->flags &= ~flags; in worker_clr_flags()
1009 if (!(worker->flags & WORKER_NOT_RUNNING)) in worker_clr_flags()
1014 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker()
1019 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
1032 static void worker_enter_idle(struct worker *worker) in worker_enter_idle() argument
1034 struct worker_pool *pool = worker->pool; in worker_enter_idle()
1036 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || in worker_enter_idle()
1037 WARN_ON_ONCE(!list_empty(&worker->entry) && in worker_enter_idle()
1038 (worker->hentry.next || worker->hentry.pprev))) in worker_enter_idle()
1042 worker->flags |= WORKER_IDLE; in worker_enter_idle()
1044 worker->last_active = jiffies; in worker_enter_idle()
1047 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1065 static void worker_leave_idle(struct worker *worker) in worker_leave_idle() argument
1067 struct worker_pool *pool = worker->pool; in worker_leave_idle()
1069 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) in worker_leave_idle()
1071 worker_clr_flags(worker, WORKER_IDLE); in worker_leave_idle()
1073 list_del_init(&worker->entry); in worker_leave_idle()
1109 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work()
1112 struct worker *worker; in find_worker_executing_work() local
1114 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1116 if (worker->current_work == work && in find_worker_executing_work()
1117 worker->current_func == work->func) in find_worker_executing_work()
1118 return worker; in find_worker_executing_work()
1177 static bool assign_work(struct work_struct *work, struct worker *worker, in assign_work() argument
1180 struct worker_pool *pool = worker->pool; in assign_work()
1181 struct worker *collision; in assign_work()
1199 move_linked_works(work, &worker->scheduled, nextp); in assign_work()
1235 struct worker *worker = first_idle_worker(pool); in kick_pool() local
1240 if (!need_more_worker(pool) || !worker) in kick_pool()
1248 p = worker->task; in kick_pool()
1387 struct worker *worker = kthread_data(task); in wq_worker_running() local
1389 if (!READ_ONCE(worker->sleeping)) in wq_worker_running()
1399 if (!(worker->flags & WORKER_NOT_RUNNING)) in wq_worker_running()
1400 worker->pool->nr_running++; in wq_worker_running()
1407 worker->current_at = worker->task->se.sum_exec_runtime; in wq_worker_running()
1409 WRITE_ONCE(worker->sleeping, 0); in wq_worker_running()
1421 struct worker *worker = kthread_data(task); in wq_worker_sleeping() local
1429 if (worker->flags & WORKER_NOT_RUNNING) in wq_worker_sleeping()
1432 pool = worker->pool; in wq_worker_sleeping()
1435 if (READ_ONCE(worker->sleeping)) in wq_worker_sleeping()
1438 WRITE_ONCE(worker->sleeping, 1); in wq_worker_sleeping()
1446 if (worker->flags & WORKER_NOT_RUNNING) { in wq_worker_sleeping()
1453 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; in wq_worker_sleeping()
1467 struct worker *worker = kthread_data(task); in wq_worker_tick() local
1468 struct pool_workqueue *pwq = worker->current_pwq; in wq_worker_tick()
1469 struct worker_pool *pool = worker->pool; in wq_worker_tick()
1491 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || in wq_worker_tick()
1492 worker->task->se.sum_exec_runtime - worker->current_at < in wq_worker_tick()
1498 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in wq_worker_tick()
1499 wq_cpu_intensive_report(worker->current_func); in wq_worker_tick()
1534 struct worker *worker = kthread_data(task); in wq_worker_last_func() local
1536 return worker->last_func; in wq_worker_last_func()
2194 struct worker *worker; in is_chained_work() local
2196 worker = current_wq_worker(); in is_chained_work()
2201 return worker && worker->current_pwq->wq == wq; in is_chained_work()
2279 struct worker *worker; in __queue_work() local
2283 worker = find_worker_executing_work(last_pool, work); in __queue_work()
2285 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
2286 pwq = worker->current_pwq; in __queue_work()
2637 static struct worker *alloc_worker(int node) in alloc_worker()
2639 struct worker *worker; in alloc_worker() local
2641 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); in alloc_worker()
2642 if (worker) { in alloc_worker()
2643 INIT_LIST_HEAD(&worker->entry); in alloc_worker()
2644 INIT_LIST_HEAD(&worker->scheduled); in alloc_worker()
2645 INIT_LIST_HEAD(&worker->node); in alloc_worker()
2647 worker->flags = WORKER_PREP; in alloc_worker()
2649 return worker; in alloc_worker()
2669 static void worker_attach_to_pool(struct worker *worker, in worker_attach_to_pool() argument
2680 worker->flags |= WORKER_UNBOUND; in worker_attach_to_pool()
2683 kthread_set_per_cpu(worker->task, pool->cpu); in worker_attach_to_pool()
2686 if (worker->rescue_wq) in worker_attach_to_pool()
2687 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); in worker_attach_to_pool()
2689 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
2690 worker->pool = pool; in worker_attach_to_pool()
2695 static void unbind_worker(struct worker *worker) in unbind_worker() argument
2699 kthread_set_per_cpu(worker->task, -1); in unbind_worker()
2701 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); in unbind_worker()
2703 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); in unbind_worker()
2707 static void detach_worker(struct worker *worker) in detach_worker() argument
2711 unbind_worker(worker); in detach_worker()
2712 list_del(&worker->node); in detach_worker()
2723 static void worker_detach_from_pool(struct worker *worker) in worker_detach_from_pool() argument
2725 struct worker_pool *pool = worker->pool; in worker_detach_from_pool()
2731 detach_worker(worker); in worker_detach_from_pool()
2732 worker->pool = NULL; in worker_detach_from_pool()
2736 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); in worker_detach_from_pool()
2739 static int format_worker_id(char *buf, size_t size, struct worker *worker, in format_worker_id() argument
2742 if (worker->rescue_wq) in format_worker_id()
2744 worker->rescue_wq->name); in format_worker_id()
2749 pool->cpu, worker->id, in format_worker_id()
2753 pool->id, worker->id); in format_worker_id()
2771 static struct worker *create_worker(struct worker_pool *pool) in create_worker()
2773 struct worker *worker; in create_worker() local
2784 worker = alloc_worker(pool->node); in create_worker()
2785 if (!worker) { in create_worker()
2790 worker->id = id; in create_worker()
2795 format_worker_id(id_buf, sizeof(id_buf), worker, pool); in create_worker()
2796 worker->task = kthread_create_on_node(worker_thread, worker, in create_worker()
2798 if (IS_ERR(worker->task)) { in create_worker()
2799 if (PTR_ERR(worker->task) == -EINTR) { in create_worker()
2804 worker->task); in create_worker()
2809 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
2810 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); in create_worker()
2814 worker_attach_to_pool(worker, pool); in create_worker()
2819 worker->pool->nr_workers++; in create_worker()
2820 worker_enter_idle(worker); in create_worker()
2827 if (worker->task) in create_worker()
2828 wake_up_process(worker->task); in create_worker()
2832 return worker; in create_worker()
2836 kfree(worker); in create_worker()
2842 struct worker *worker; in detach_dying_workers() local
2844 list_for_each_entry(worker, cull_list, entry) in detach_dying_workers()
2845 detach_worker(worker); in detach_dying_workers()
2850 struct worker *worker, *tmp; in reap_dying_workers() local
2852 list_for_each_entry_safe(worker, tmp, cull_list, entry) { in reap_dying_workers()
2853 list_del_init(&worker->entry); in reap_dying_workers()
2854 kthread_stop_put(worker->task); in reap_dying_workers()
2855 kfree(worker); in reap_dying_workers()
2870 static void set_worker_dying(struct worker *worker, struct list_head *list) in set_worker_dying() argument
2872 struct worker_pool *pool = worker->pool; in set_worker_dying()
2878 if (WARN_ON(worker->current_work) || in set_worker_dying()
2879 WARN_ON(!list_empty(&worker->scheduled)) || in set_worker_dying()
2880 WARN_ON(!(worker->flags & WORKER_IDLE))) in set_worker_dying()
2886 worker->flags |= WORKER_DIE; in set_worker_dying()
2888 list_move(&worker->entry, list); in set_worker_dying()
2891 get_task_struct(worker->task); in set_worker_dying()
2915 struct worker *worker; in idle_worker_timeout() local
2919 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_worker_timeout()
2920 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_worker_timeout()
2958 struct worker *worker; in idle_cull_fn() local
2961 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_cull_fn()
2962 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_cull_fn()
2969 set_worker_dying(worker, &cull_list); in idle_cull_fn()
3124 static bool manage_workers(struct worker *worker) in manage_workers() argument
3126 struct worker_pool *pool = worker->pool; in manage_workers()
3132 pool->manager = worker; in manage_workers()
3156 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
3161 struct worker_pool *pool = worker->pool; in process_one_work()
3183 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
3184 worker->current_work = work; in process_one_work()
3185 worker->current_func = work->func; in process_one_work()
3186 worker->current_pwq = pwq; in process_one_work()
3187 if (worker->task) in process_one_work()
3188 worker->current_at = worker->task->se.sum_exec_runtime; in process_one_work()
3190 worker->current_color = get_work_color(work_data); in process_one_work()
3196 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
3207 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
3257 worker->current_func(work); in process_one_work()
3262 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
3268 if (unlikely((worker->task && in_atomic()) || in process_one_work()
3276 worker->current_func); in process_one_work()
3289 if (worker->task) in process_one_work()
3301 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
3304 worker->last_func = worker->current_func; in process_one_work()
3307 hash_del(&worker->hentry); in process_one_work()
3308 worker->current_work = NULL; in process_one_work()
3309 worker->current_func = NULL; in process_one_work()
3310 worker->current_pwq = NULL; in process_one_work()
3311 worker->current_color = INT_MAX; in process_one_work()
3329 static void process_scheduled_works(struct worker *worker) in process_scheduled_works() argument
3334 while ((work = list_first_entry_or_null(&worker->scheduled, in process_scheduled_works()
3337 worker->pool->watchdog_ts = jiffies; in process_scheduled_works()
3340 process_one_work(worker, work); in process_scheduled_works()
3368 struct worker *worker = __worker; in worker_thread() local
3369 struct worker_pool *pool = worker->pool; in worker_thread()
3377 if (unlikely(worker->flags & WORKER_DIE)) { in worker_thread()
3384 worker->pool = NULL; in worker_thread()
3385 ida_free(&pool->worker_ida, worker->id); in worker_thread()
3389 worker_leave_idle(worker); in worker_thread()
3396 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
3404 WARN_ON_ONCE(!list_empty(&worker->scheduled)); in worker_thread()
3413 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); in worker_thread()
3420 if (assign_work(work, worker, NULL)) in worker_thread()
3421 process_scheduled_works(worker); in worker_thread()
3424 worker_set_flags(worker, WORKER_PREP); in worker_thread()
3433 worker_enter_idle(worker); in worker_thread()
3440 static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescuer) in assign_rescuer_work()
3484 struct worker *rescuer = __rescuer; in rescuer_thread()
3585 static void bh_worker(struct worker *worker) in bh_worker() argument
3587 struct worker_pool *pool = worker->pool; in bh_worker()
3593 worker_leave_idle(worker); in bh_worker()
3602 WARN_ON_ONCE(!list_empty(&worker->scheduled)); in bh_worker()
3603 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); in bh_worker()
3610 if (assign_work(work, worker, NULL)) in bh_worker()
3611 process_scheduled_works(worker); in bh_worker()
3615 worker_set_flags(worker, WORKER_PREP); in bh_worker()
3617 worker_enter_idle(worker); in bh_worker()
3639 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in workqueue_softirq_action()
3666 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in drain_dead_softirq_workfn()
3739 struct worker *worker; in check_flush_dependency() local
3744 worker = current_wq_worker(); in check_flush_dependency()
3750 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & in check_flush_dependency()
3753 worker->current_pwq->wq->name, worker->current_func, in check_flush_dependency()
3795 struct work_struct *target, struct worker *worker) in insert_wq_barrier() argument
3827 if (worker) { in insert_wq_barrier()
3828 head = worker->scheduled.next; in insert_wq_barrier()
3829 work_color = worker->current_color; in insert_wq_barrier()
4178 struct worker *worker = NULL; in start_flush_work() local
4197 worker = find_worker_executing_work(pool, work); in start_flush_work()
4198 if (!worker) in start_flush_work()
4200 pwq = worker->current_pwq; in start_flush_work()
4206 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
4965 struct worker *worker; in put_unbound_pool() local
5011 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
5012 set_worker_dying(worker, &cull_list); in put_unbound_pool()
5587 struct worker *rescuer; in init_rescuer()
6032 struct worker *worker = current_wq_worker(); in current_work() local
6034 return worker ? worker->current_work : NULL; in current_work()
6048 struct worker *worker = current_wq_worker(); in current_is_workqueue_rescuer() local
6050 return worker && worker->rescue_wq; in current_is_workqueue_rescuer()
6137 struct worker *worker = current_wq_worker(); in set_worker_desc() local
6140 if (worker) { in set_worker_desc()
6142 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); in set_worker_desc()
6168 struct worker *worker; in print_worker_info() local
6177 worker = kthread_probe_data(task); in print_worker_info()
6183 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); in print_worker_info()
6184 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
6187 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); in print_worker_info()
6210 static void pr_cont_worker_id(struct worker *worker) in pr_cont_worker_id() argument
6212 struct worker_pool *pool = worker->pool; in pr_cont_worker_id()
6218 pr_cont("%d%s", task_pid_nr(worker->task), in pr_cont_worker_id()
6219 worker->rescue_wq ? "(RESCUER)" : ""); in pr_cont_worker_id()
6271 struct worker *worker; in show_pwq() local
6282 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6283 if (worker->current_pwq == pwq) { in show_pwq()
6292 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6293 if (worker->current_pwq != pwq) in show_pwq()
6297 pr_cont_worker_id(worker); in show_pwq()
6298 pr_cont(":%ps", worker->current_func); in show_pwq()
6299 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
6391 struct worker *worker; in show_one_worker_pool() local
6416 list_for_each_entry(worker, &pool->idle_list, entry) { in show_one_worker_pool()
6418 pr_cont_worker_id(worker); in show_one_worker_pool()
6488 struct worker *worker = kthread_data(task); in wq_worker_comm() local
6489 struct worker_pool *pool = worker->pool; in wq_worker_comm()
6492 off = format_worker_id(buf, size, worker, pool); in wq_worker_comm()
6501 if (worker->desc[0] != '\0') { in wq_worker_comm()
6502 if (worker->current_work) in wq_worker_comm()
6504 worker->desc); in wq_worker_comm()
6507 worker->desc); in wq_worker_comm()
6538 struct worker *worker; in unbind_workers() local
6552 for_each_pool_worker(worker, pool) in unbind_workers()
6553 worker->flags |= WORKER_UNBOUND; in unbind_workers()
6576 for_each_pool_worker(worker, pool) in unbind_workers()
6577 unbind_worker(worker); in unbind_workers()
6591 struct worker *worker; in rebind_workers() local
6602 for_each_pool_worker(worker, pool) { in rebind_workers()
6603 kthread_set_per_cpu(worker->task, pool->cpu); in rebind_workers()
6604 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, in rebind_workers()
6612 for_each_pool_worker(worker, pool) { in rebind_workers()
6613 unsigned int worker_flags = worker->flags; in rebind_workers()
6633 WRITE_ONCE(worker->flags, worker_flags); in rebind_workers()
6652 struct worker *worker; in restore_unbound_workers_cpumask() local
6663 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
6664 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); in restore_unbound_workers_cpumask()
6938 struct worker *worker; in workqueue_apply_unbound_cpumask() local
6952 for_each_pool_worker(worker, pool) in workqueue_apply_unbound_cpumask()
6953 unbind_worker(worker); in workqueue_apply_unbound_cpumask()
7519 struct worker *worker; in show_cpu_pool_hog() local
7525 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_cpu_pool_hog()
7526 if (task_is_running(worker->task)) { in show_cpu_pool_hog()
7535 sched_show_task(worker->task); in show_cpu_pool_hog()