Lines Matching refs:pool

257 	struct worker_pool	*pool;		/* I: the associated pool */  member
528 static void show_one_worker_pool(struct worker_pool *pool);
544 #define for_each_bh_worker_pool(pool, cpu) \ argument
545 for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \
546 (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
547 (pool)++)
549 #define for_each_cpu_worker_pool(pool, cpu) \ argument
550 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
551 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
552 (pool)++)
566 #define for_each_pool(pool, pi) \ argument
567 idr_for_each_entry(&worker_pool_idr, pool, pi) \
581 #define for_each_pool_worker(worker, pool) \ argument
582 list_for_each_entry((worker), &(pool)->workers, node) \
706 static int worker_pool_assign_id(struct worker_pool *pool) in worker_pool_assign_id() argument
712 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, in worker_pool_assign_id()
715 pool->id = ret; in worker_pool_assign_id()
748 return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask; in unbound_effective_cpumask()
767 static unsigned long pool_offq_flags(struct worker_pool *pool) in pool_offq_flags() argument
769 return (pool->flags & POOL_BH) ? WORK_OFFQ_BH : 0; in pool_offq_flags()
887 return work_struct_pwq(data)->pool; in get_work_pool()
932 static bool need_more_worker(struct worker_pool *pool) in need_more_worker() argument
934 return !list_empty(&pool->worklist) && !pool->nr_running; in need_more_worker()
938 static bool may_start_working(struct worker_pool *pool) in may_start_working() argument
940 return pool->nr_idle; in may_start_working()
944 static bool keep_working(struct worker_pool *pool) in keep_working() argument
946 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); in keep_working()
950 static bool need_to_create_worker(struct worker_pool *pool) in need_to_create_worker() argument
952 return need_more_worker(pool) && !may_start_working(pool); in need_to_create_worker()
956 static bool too_many_workers(struct worker_pool *pool) in too_many_workers() argument
958 bool managing = pool->flags & POOL_MANAGER_ACTIVE; in too_many_workers()
959 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ in too_many_workers()
960 int nr_busy = pool->nr_workers - nr_idle; in too_many_workers()
974 struct worker_pool *pool = worker->pool; in worker_set_flags() local
976 lockdep_assert_held(&pool->lock); in worker_set_flags()
981 pool->nr_running--; in worker_set_flags()
996 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
999 lockdep_assert_held(&pool->lock); in worker_clr_flags()
1010 pool->nr_running++; in worker_clr_flags()
1014 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() argument
1016 if (unlikely(list_empty(&pool->idle_list))) in first_idle_worker()
1019 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
1034 struct worker_pool *pool = worker->pool; in worker_enter_idle() local
1043 pool->nr_idle++; in worker_enter_idle()
1047 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1049 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) in worker_enter_idle()
1050 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); in worker_enter_idle()
1053 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); in worker_enter_idle()
1067 struct worker_pool *pool = worker->pool; in worker_leave_idle() local
1072 pool->nr_idle--; in worker_leave_idle()
1109 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work() argument
1114 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1180 struct worker_pool *pool = worker->pool; in assign_work() local
1183 lockdep_assert_held(&pool->lock); in assign_work()
1193 collision = find_worker_executing_work(pool, work); in assign_work()
1203 static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) in bh_pool_irq_work() argument
1205 int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0; in bh_pool_irq_work()
1207 return &per_cpu(bh_pool_irq_works, pool->cpu)[high]; in bh_pool_irq_work()
1210 static void kick_bh_pool(struct worker_pool *pool) in kick_bh_pool() argument
1214 if (unlikely(pool->cpu != smp_processor_id() && in kick_bh_pool()
1215 !(pool->flags & POOL_BH_DRAINING))) { in kick_bh_pool()
1216 irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); in kick_bh_pool()
1220 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in kick_bh_pool()
1233 static bool kick_pool(struct worker_pool *pool) in kick_pool() argument
1235 struct worker *worker = first_idle_worker(pool); in kick_pool()
1238 lockdep_assert_held(&pool->lock); in kick_pool()
1240 if (!need_more_worker(pool) || !worker) in kick_pool()
1243 if (pool->flags & POOL_BH) { in kick_pool()
1244 kick_bh_pool(pool); in kick_pool()
1267 if (!pool->attrs->affn_strict && in kick_pool()
1268 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { in kick_pool()
1269 struct work_struct *work = list_first_entry(&pool->worklist, in kick_pool()
1271 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask, in kick_pool()
1400 worker->pool->nr_running++; in wq_worker_running()
1422 struct worker_pool *pool; in wq_worker_sleeping() local
1432 pool = worker->pool; in wq_worker_sleeping()
1439 raw_spin_lock_irq(&pool->lock); in wq_worker_sleeping()
1447 raw_spin_unlock_irq(&pool->lock); in wq_worker_sleeping()
1451 pool->nr_running--; in wq_worker_sleeping()
1452 if (kick_pool(pool)) in wq_worker_sleeping()
1455 raw_spin_unlock_irq(&pool->lock); in wq_worker_sleeping()
1469 struct worker_pool *pool = worker->pool; in wq_worker_tick() local
1496 raw_spin_lock(&pool->lock); in wq_worker_tick()
1502 if (kick_pool(pool)) in wq_worker_tick()
1505 raw_spin_unlock(&pool->lock); in wq_worker_tick()
1625 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1639 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1662 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1664 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1680 if (list_empty(&pwq->pool->worklist)) in __pwq_activate_work()
1681 pwq->pool->watchdog_ts = jiffies; in __pwq_activate_work()
1682 move_linked_works(work, &pwq->pool->worklist, NULL); in __pwq_activate_work()
1713 struct worker_pool *pool = pwq->pool; in pwq_tryinc_nr_active() local
1714 struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); in pwq_tryinc_nr_active()
1717 lockdep_assert_held(&pool->lock); in pwq_tryinc_nr_active()
1833 raw_spin_lock_irq(&pwq->pool->lock); in unplug_oldest_pwq()
1837 kick_pool(pwq->pool); in unplug_oldest_pwq()
1839 raw_spin_unlock_irq(&pwq->pool->lock); in unplug_oldest_pwq()
1872 if (pwq->pool != locked_pool) { in node_activate_pending_pwq()
1874 locked_pool = pwq->pool; in node_activate_pending_pwq()
1911 if (pwq->pool != caller_pool) in node_activate_pending_pwq()
1912 kick_pool(pwq->pool); in node_activate_pending_pwq()
1932 struct worker_pool *pool = pwq->pool; in pwq_dec_nr_active() local
1933 struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node); in pwq_dec_nr_active()
1935 lockdep_assert_held(&pool->lock); in pwq_dec_nr_active()
1970 node_activate_pending_pwq(nna, pool); in pwq_dec_nr_active()
2050 struct worker_pool *pool; in try_to_grab_pending() local
2077 pool = get_work_pool(work); in try_to_grab_pending()
2078 if (!pool) in try_to_grab_pending()
2081 raw_spin_lock(&pool->lock); in try_to_grab_pending()
2091 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
2111 move_linked_works(work, &pwq->pool->worklist, NULL); in try_to_grab_pending()
2119 set_work_pool_and_keep_pending(work, pool->id, in try_to_grab_pending()
2120 pool_offq_flags(pool)); in try_to_grab_pending()
2125 raw_spin_unlock(&pool->lock); in try_to_grab_pending()
2129 raw_spin_unlock(&pool->lock); in try_to_grab_pending()
2239 struct worker_pool *last_pool, *pool; in __queue_work() local
2272 pool = pwq->pool; in __queue_work()
2284 if (last_pool && last_pool != pool && !(wq->flags & __WQ_ORDERED)) { in __queue_work()
2293 pool = pwq->pool; in __queue_work()
2294 WARN_ON_ONCE(pool != last_pool); in __queue_work()
2298 raw_spin_lock(&pool->lock); in __queue_work()
2301 raw_spin_lock(&pool->lock); in __queue_work()
2313 raw_spin_unlock(&pool->lock); in __queue_work()
2337 if (list_empty(&pool->worklist)) in __queue_work()
2338 pool->watchdog_ts = jiffies; in __queue_work()
2341 insert_work(pwq, work, &pool->worklist, work_flags); in __queue_work()
2342 kick_pool(pool); in __queue_work()
2349 raw_spin_unlock(&pool->lock); in __queue_work()
2658 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) in pool_allowed_cpus() argument
2660 if (pool->cpu < 0 && pool->attrs->affn_strict) in pool_allowed_cpus()
2661 return pool->attrs->__pod_cpumask; in pool_allowed_cpus()
2663 return pool->attrs->cpumask; in pool_allowed_cpus()
2676 struct worker_pool *pool) in worker_attach_to_pool() argument
2685 if (pool->flags & POOL_DISASSOCIATED) { in worker_attach_to_pool()
2688 WARN_ON_ONCE(pool->flags & POOL_BH); in worker_attach_to_pool()
2689 kthread_set_per_cpu(worker->task, pool->cpu); in worker_attach_to_pool()
2693 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); in worker_attach_to_pool()
2695 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
2696 worker->pool = pool; in worker_attach_to_pool()
2731 struct worker_pool *pool = worker->pool; in worker_detach_from_pool() local
2734 WARN_ON_ONCE(pool->flags & POOL_BH); in worker_detach_from_pool()
2738 worker->pool = NULL; in worker_detach_from_pool()
2746 struct worker_pool *pool) in format_worker_id() argument
2752 if (pool) { in format_worker_id()
2753 if (pool->cpu >= 0) in format_worker_id()
2755 pool->cpu, worker->id, in format_worker_id()
2756 pool->attrs->nice < 0 ? "H" : ""); in format_worker_id()
2759 pool->id, worker->id); in format_worker_id()
2777 static struct worker *create_worker(struct worker_pool *pool) in create_worker() argument
2783 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); in create_worker()
2790 worker = alloc_worker(pool->node); in create_worker()
2798 if (!(pool->flags & POOL_BH)) { in create_worker()
2801 format_worker_id(id_buf, sizeof(id_buf), worker, pool); in create_worker()
2803 pool->node, "%s", id_buf); in create_worker()
2815 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
2816 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); in create_worker()
2820 worker_attach_to_pool(worker, pool); in create_worker()
2823 raw_spin_lock_irq(&pool->lock); in create_worker()
2825 worker->pool->nr_workers++; in create_worker()
2836 raw_spin_unlock_irq(&pool->lock); in create_worker()
2841 ida_free(&pool->worker_ida, id); in create_worker()
2878 struct worker_pool *pool = worker->pool; in set_worker_dying() local
2880 lockdep_assert_held(&pool->lock); in set_worker_dying()
2889 pool->nr_workers--; in set_worker_dying()
2890 pool->nr_idle--; in set_worker_dying()
2912 struct worker_pool *pool = from_timer(pool, t, idle_timer); in idle_worker_timeout() local
2915 if (work_pending(&pool->idle_cull_work)) in idle_worker_timeout()
2918 raw_spin_lock_irq(&pool->lock); in idle_worker_timeout()
2920 if (too_many_workers(pool)) { in idle_worker_timeout()
2925 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_worker_timeout()
2930 mod_timer(&pool->idle_timer, expires); in idle_worker_timeout()
2932 raw_spin_unlock_irq(&pool->lock); in idle_worker_timeout()
2935 queue_work(system_unbound_wq, &pool->idle_cull_work); in idle_worker_timeout()
2951 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); in idle_cull_fn() local
2961 raw_spin_lock_irq(&pool->lock); in idle_cull_fn()
2963 while (too_many_workers(pool)) { in idle_cull_fn()
2967 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_cull_fn()
2971 mod_timer(&pool->idle_timer, expires); in idle_cull_fn()
2978 raw_spin_unlock_irq(&pool->lock); in idle_cull_fn()
3011 struct worker_pool *pool = from_timer(pool, t, mayday_timer); in pool_mayday_timeout() local
3014 raw_spin_lock_irq(&pool->lock); in pool_mayday_timeout()
3017 if (need_to_create_worker(pool)) { in pool_mayday_timeout()
3024 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
3029 raw_spin_unlock_irq(&pool->lock); in pool_mayday_timeout()
3031 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); in pool_mayday_timeout()
3052 static void maybe_create_worker(struct worker_pool *pool) in maybe_create_worker() argument
3053 __releases(&pool->lock) in maybe_create_worker()
3054 __acquires(&pool->lock) in maybe_create_worker()
3057 raw_spin_unlock_irq(&pool->lock); in maybe_create_worker()
3060 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); in maybe_create_worker()
3063 if (create_worker(pool) || !need_to_create_worker(pool)) in maybe_create_worker()
3068 if (!need_to_create_worker(pool)) in maybe_create_worker()
3072 del_timer_sync(&pool->mayday_timer); in maybe_create_worker()
3073 raw_spin_lock_irq(&pool->lock); in maybe_create_worker()
3079 if (need_to_create_worker(pool)) in maybe_create_worker()
3107 struct worker_pool *pool = worker->pool; in manage_workers() local
3109 if (pool->flags & POOL_MANAGER_ACTIVE) in manage_workers()
3112 pool->flags |= POOL_MANAGER_ACTIVE; in manage_workers()
3113 pool->manager = worker; in manage_workers()
3115 maybe_create_worker(pool); in manage_workers()
3117 pool->manager = NULL; in manage_workers()
3118 pool->flags &= ~POOL_MANAGER_ACTIVE; in manage_workers()
3138 __releases(&pool->lock) in process_one_work()
3139 __acquires(&pool->lock) in process_one_work()
3142 struct worker_pool *pool = worker->pool; in process_one_work() local
3145 bool bh_draining = pool->flags & POOL_BH_DRAINING; in process_one_work()
3159 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in process_one_work()
3160 raw_smp_processor_id() != pool->cpu); in process_one_work()
3164 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
3196 kick_pool(pool); in process_one_work()
3204 set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool)); in process_one_work()
3207 raw_spin_unlock_irq(&pool->lock); in process_one_work()
3273 raw_spin_lock_irq(&pool->lock); in process_one_work()
3316 worker->pool->watchdog_ts = jiffies; in process_scheduled_works()
3348 struct worker_pool *pool = worker->pool; in worker_thread() local
3353 raw_spin_lock_irq(&pool->lock); in worker_thread()
3357 raw_spin_unlock_irq(&pool->lock); in worker_thread()
3363 worker->pool = NULL; in worker_thread()
3364 ida_free(&pool->worker_ida, worker->id); in worker_thread()
3371 if (!need_more_worker(pool)) in worker_thread()
3375 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
3396 list_first_entry(&pool->worklist, in worker_thread()
3401 } while (keep_working(pool)); in worker_thread()
3414 raw_spin_unlock_irq(&pool->lock); in worker_thread()
3472 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
3480 worker_attach_to_pool(rescuer, pool); in rescuer_thread()
3482 raw_spin_lock_irq(&pool->lock); in rescuer_thread()
3489 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in rescuer_thread()
3507 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
3525 kick_pool(pool); in rescuer_thread()
3527 raw_spin_unlock_irq(&pool->lock); in rescuer_thread()
3556 struct worker_pool *pool = worker->pool; in bh_worker() local
3560 raw_spin_lock_irq(&pool->lock); in bh_worker()
3567 if (!need_more_worker(pool)) in bh_worker()
3575 list_first_entry(&pool->worklist, in bh_worker()
3580 } while (keep_working(pool) && in bh_worker()
3586 kick_pool(pool); in bh_worker()
3587 raw_spin_unlock_irq(&pool->lock); in bh_worker()
3603 struct worker_pool *pool = in workqueue_softirq_action() local
3605 if (need_more_worker(pool)) in workqueue_softirq_action()
3606 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in workqueue_softirq_action()
3611 struct worker_pool *pool; member
3619 struct worker_pool *pool = dead_work->pool; in drain_dead_softirq_workfn() local
3629 raw_spin_lock_irq(&pool->lock); in drain_dead_softirq_workfn()
3630 pool->flags |= POOL_BH_DRAINING; in drain_dead_softirq_workfn()
3631 raw_spin_unlock_irq(&pool->lock); in drain_dead_softirq_workfn()
3633 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in drain_dead_softirq_workfn()
3635 raw_spin_lock_irq(&pool->lock); in drain_dead_softirq_workfn()
3636 pool->flags &= ~POOL_BH_DRAINING; in drain_dead_softirq_workfn()
3637 repeat = need_more_worker(pool); in drain_dead_softirq_workfn()
3638 raw_spin_unlock_irq(&pool->lock); in drain_dead_softirq_workfn()
3646 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in drain_dead_softirq_workfn()
3668 struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i]; in workqueue_softirq_dead() local
3671 if (!need_more_worker(pool)) in workqueue_softirq_dead()
3675 dead_work.pool = pool; in workqueue_softirq_dead()
3678 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in workqueue_softirq_dead()
3865 if (current_pool != pwq->pool) { in flush_workqueue_prep_pwqs()
3868 current_pool = pwq->pool; in flush_workqueue_prep_pwqs()
4120 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
4122 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
4146 struct worker_pool *pool; in start_flush_work() local
4151 pool = get_work_pool(work); in start_flush_work()
4152 if (!pool) { in start_flush_work()
4157 raw_spin_lock_irq(&pool->lock); in start_flush_work()
4161 if (unlikely(pwq->pool != pool)) in start_flush_work()
4164 worker = find_worker_executing_work(pool, work); in start_flush_work()
4174 raw_spin_unlock_irq(&pool->lock); in start_flush_work()
4193 raw_spin_unlock_irq(&pool->lock); in start_flush_work()
4763 static int init_worker_pool(struct worker_pool *pool) in init_worker_pool() argument
4765 raw_spin_lock_init(&pool->lock); in init_worker_pool()
4766 pool->id = -1; in init_worker_pool()
4767 pool->cpu = -1; in init_worker_pool()
4768 pool->node = NUMA_NO_NODE; in init_worker_pool()
4769 pool->flags |= POOL_DISASSOCIATED; in init_worker_pool()
4770 pool->watchdog_ts = jiffies; in init_worker_pool()
4771 INIT_LIST_HEAD(&pool->worklist); in init_worker_pool()
4772 INIT_LIST_HEAD(&pool->idle_list); in init_worker_pool()
4773 hash_init(pool->busy_hash); in init_worker_pool()
4775 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); in init_worker_pool()
4776 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); in init_worker_pool()
4778 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); in init_worker_pool()
4780 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
4782 ida_init(&pool->worker_ida); in init_worker_pool()
4783 INIT_HLIST_NODE(&pool->hash_node); in init_worker_pool()
4784 pool->refcnt = 1; in init_worker_pool()
4787 pool->attrs = alloc_workqueue_attrs(); in init_worker_pool()
4788 if (!pool->attrs) in init_worker_pool()
4791 wqattrs_clear_for_pool(pool->attrs); in init_worker_pool()
4909 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); in rcu_free_pool() local
4911 ida_destroy(&pool->worker_ida); in rcu_free_pool()
4912 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
4913 kfree(pool); in rcu_free_pool()
4927 static void put_unbound_pool(struct worker_pool *pool) in put_unbound_pool() argument
4934 if (--pool->refcnt) in put_unbound_pool()
4938 if (WARN_ON(!(pool->cpu < 0)) || in put_unbound_pool()
4939 WARN_ON(!list_empty(&pool->worklist))) in put_unbound_pool()
4943 if (pool->id >= 0) in put_unbound_pool()
4944 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
4945 hash_del(&pool->hash_node); in put_unbound_pool()
4962 !(pool->flags & POOL_MANAGER_ACTIVE), in put_unbound_pool()
4966 raw_spin_lock_irq(&pool->lock); in put_unbound_pool()
4967 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { in put_unbound_pool()
4968 pool->flags |= POOL_MANAGER_ACTIVE; in put_unbound_pool()
4971 raw_spin_unlock_irq(&pool->lock); in put_unbound_pool()
4975 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
4977 WARN_ON(pool->nr_workers || pool->nr_idle); in put_unbound_pool()
4978 raw_spin_unlock_irq(&pool->lock); in put_unbound_pool()
4987 del_timer_sync(&pool->idle_timer); in put_unbound_pool()
4988 cancel_work_sync(&pool->idle_cull_work); in put_unbound_pool()
4989 del_timer_sync(&pool->mayday_timer); in put_unbound_pool()
4992 call_rcu(&pool->rcu, rcu_free_pool); in put_unbound_pool()
5013 struct worker_pool *pool; in get_unbound_pool() local
5019 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { in get_unbound_pool()
5020 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
5021 pool->refcnt++; in get_unbound_pool()
5022 return pool; in get_unbound_pool()
5035 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); in get_unbound_pool()
5036 if (!pool || init_worker_pool(pool) < 0) in get_unbound_pool()
5039 pool->node = node; in get_unbound_pool()
5040 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
5041 wqattrs_clear_for_pool(pool->attrs); in get_unbound_pool()
5043 if (worker_pool_assign_id(pool) < 0) in get_unbound_pool()
5047 if (wq_online && !create_worker(pool)) in get_unbound_pool()
5051 hash_add(unbound_pool_hash, &pool->hash_node, hash); in get_unbound_pool()
5053 return pool; in get_unbound_pool()
5055 if (pool) in get_unbound_pool()
5056 put_unbound_pool(pool); in get_unbound_pool()
5069 struct worker_pool *pool = pwq->pool; in pwq_release_workfn() local
5092 put_unbound_pool(pool); in pwq_release_workfn()
5098 wq_node_nr_active(pwq->wq, pwq->pool->node); in pwq_release_workfn()
5119 struct worker_pool *pool) in init_pwq() argument
5125 pwq->pool = pool; in init_pwq()
5158 struct worker_pool *pool; in alloc_unbound_pwq() local
5163 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
5164 if (!pool) in alloc_unbound_pwq()
5167 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
5169 put_unbound_pool(pool); in alloc_unbound_pwq()
5173 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
5443 if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) in unbound_wq_update_pwq()
5462 raw_spin_lock_irq(&pwq->pool->lock); in unbound_wq_update_pwq()
5464 raw_spin_unlock_irq(&pwq->pool->lock); in unbound_wq_update_pwq()
5492 struct worker_pool *pool; in alloc_and_link_pwqs() local
5494 pool = &(per_cpu_ptr(pools, cpu)[highpri]); in alloc_and_link_pwqs()
5498 pool->node); in alloc_and_link_pwqs()
5502 init_pwq(*pwq_p, wq, pool); in alloc_and_link_pwqs()
5647 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); in wq_adjust_max_active()
5650 kick_pool(pwq->pool); in wq_adjust_max_active()
5652 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); in wq_adjust_max_active()
5881 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
5886 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
5892 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
6067 struct worker_pool *pool; in work_busy() local
6075 pool = get_work_pool(work); in work_busy()
6076 if (pool) { in work_busy()
6077 raw_spin_lock_irqsave(&pool->lock, irq_flags); in work_busy()
6078 if (find_worker_executing_work(pool, work)) in work_busy()
6080 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); in work_busy()
6160 static void pr_cont_pool_info(struct worker_pool *pool) in pr_cont_pool_info() argument
6162 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
6163 if (pool->node != NUMA_NO_NODE) in pr_cont_pool_info()
6164 pr_cont(" node=%d", pool->node); in pr_cont_pool_info()
6165 pr_cont(" flags=0x%x", pool->flags); in pr_cont_pool_info()
6166 if (pool->flags & POOL_BH) in pr_cont_pool_info()
6168 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); in pr_cont_pool_info()
6170 pr_cont(" nice=%d", pool->attrs->nice); in pr_cont_pool_info()
6175 struct worker_pool *pool = worker->pool; in pr_cont_worker_id() local
6177 if (pool->flags & WQ_BH) in pr_cont_worker_id()
6179 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); in pr_cont_worker_id()
6232 struct worker_pool *pool = pwq->pool; in show_pwq() local
6238 pr_info(" pwq %d:", pool->id); in show_pwq()
6239 pr_cont_pool_info(pool); in show_pwq()
6245 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6255 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6270 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6280 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6326 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); in show_one_workqueue()
6337 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); in show_one_workqueue()
6352 static void show_one_worker_pool(struct worker_pool *pool) in show_one_worker_pool() argument
6359 raw_spin_lock_irqsave(&pool->lock, irq_flags); in show_one_worker_pool()
6360 if (pool->nr_workers == pool->nr_idle) in show_one_worker_pool()
6364 if (!list_empty(&pool->worklist)) in show_one_worker_pool()
6365 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; in show_one_worker_pool()
6373 pr_info("pool %d:", pool->id); in show_one_worker_pool()
6374 pr_cont_pool_info(pool); in show_one_worker_pool()
6375 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); in show_one_worker_pool()
6376 if (pool->manager) in show_one_worker_pool()
6378 task_pid_nr(pool->manager->task)); in show_one_worker_pool()
6379 list_for_each_entry(worker, &pool->idle_list, entry) { in show_one_worker_pool()
6387 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); in show_one_worker_pool()
6405 struct worker_pool *pool; in show_all_workqueues() local
6415 for_each_pool(pool, pi) in show_all_workqueues()
6416 show_one_worker_pool(pool); in show_all_workqueues()
6452 struct worker_pool *pool = worker->pool; in wq_worker_comm() local
6455 off = format_worker_id(buf, size, worker, pool); in wq_worker_comm()
6457 if (pool) { in wq_worker_comm()
6458 raw_spin_lock_irq(&pool->lock); in wq_worker_comm()
6472 raw_spin_unlock_irq(&pool->lock); in wq_worker_comm()
6500 struct worker_pool *pool; in unbind_workers() local
6503 for_each_cpu_worker_pool(pool, cpu) { in unbind_workers()
6505 raw_spin_lock_irq(&pool->lock); in unbind_workers()
6515 for_each_pool_worker(worker, pool) in unbind_workers()
6518 pool->flags |= POOL_DISASSOCIATED; in unbind_workers()
6528 pool->nr_running = 0; in unbind_workers()
6535 kick_pool(pool); in unbind_workers()
6537 raw_spin_unlock_irq(&pool->lock); in unbind_workers()
6539 for_each_pool_worker(worker, pool) in unbind_workers()
6552 static void rebind_workers(struct worker_pool *pool) in rebind_workers() argument
6565 for_each_pool_worker(worker, pool) { in rebind_workers()
6566 kthread_set_per_cpu(worker->task, pool->cpu); in rebind_workers()
6568 pool_allowed_cpus(pool)) < 0); in rebind_workers()
6571 raw_spin_lock_irq(&pool->lock); in rebind_workers()
6573 pool->flags &= ~POOL_DISASSOCIATED; in rebind_workers()
6575 for_each_pool_worker(worker, pool) { in rebind_workers()
6599 raw_spin_unlock_irq(&pool->lock); in rebind_workers()
6612 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) in restore_unbound_workers_cpumask() argument
6620 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
6623 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
6626 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
6632 struct worker_pool *pool; in workqueue_prepare_cpu() local
6634 for_each_cpu_worker_pool(pool, cpu) { in workqueue_prepare_cpu()
6635 if (pool->nr_workers) in workqueue_prepare_cpu()
6637 if (!create_worker(pool)) in workqueue_prepare_cpu()
6645 struct worker_pool *pool; in workqueue_online_cpu() local
6653 for_each_pool(pool, pi) { in workqueue_online_cpu()
6655 if (pool->flags & POOL_BH) in workqueue_online_cpu()
6659 if (pool->cpu == cpu) in workqueue_online_cpu()
6660 rebind_workers(pool); in workqueue_online_cpu()
6661 else if (pool->cpu < 0) in workqueue_online_cpu()
6662 restore_unbound_workers_cpumask(pool, cpu); in workqueue_online_cpu()
7487 static void show_cpu_pool_hog(struct worker_pool *pool) in show_cpu_pool_hog() argument
7493 raw_spin_lock_irqsave(&pool->lock, irq_flags); in show_cpu_pool_hog()
7495 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_cpu_pool_hog()
7504 pr_info("pool %d:\n", pool->id); in show_cpu_pool_hog()
7511 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); in show_cpu_pool_hog()
7516 struct worker_pool *pool; in show_cpu_pools_hogs() local
7523 for_each_pool(pool, pi) { in show_cpu_pools_hogs()
7524 if (pool->cpu_stall) in show_cpu_pools_hogs()
7525 show_cpu_pool_hog(pool); in show_cpu_pools_hogs()
7557 struct worker_pool *pool; in wq_watchdog_timer_fn() local
7565 for_each_pool(pool, pi) { in wq_watchdog_timer_fn()
7568 pool->cpu_stall = false; in wq_watchdog_timer_fn()
7569 if (list_empty(&pool->worklist)) in wq_watchdog_timer_fn()
7579 if (pool->cpu >= 0) in wq_watchdog_timer_fn()
7580 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); in wq_watchdog_timer_fn()
7583 pool_ts = READ_ONCE(pool->watchdog_ts); in wq_watchdog_timer_fn()
7593 if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) { in wq_watchdog_timer_fn()
7594 pool->cpu_stall = true; in wq_watchdog_timer_fn()
7598 pr_cont_pool_info(pool); in wq_watchdog_timer_fn()
7708 static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice) in init_cpu_worker_pool() argument
7710 BUG_ON(init_worker_pool(pool)); in init_cpu_worker_pool()
7711 pool->cpu = cpu; in init_cpu_worker_pool()
7712 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in init_cpu_worker_pool()
7713 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); in init_cpu_worker_pool()
7714 pool->attrs->nice = nice; in init_cpu_worker_pool()
7715 pool->attrs->affn_strict = true; in init_cpu_worker_pool()
7716 pool->node = cpu_to_node(cpu); in init_cpu_worker_pool()
7720 BUG_ON(worker_pool_assign_id(pool)); in init_cpu_worker_pool()
7785 struct worker_pool *pool; in workqueue_init_early() local
7788 for_each_bh_worker_pool(pool, cpu) { in workqueue_init_early()
7789 init_cpu_worker_pool(pool, cpu, std_nice[i]); in workqueue_init_early()
7790 pool->flags |= POOL_BH; in workqueue_init_early()
7791 init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]); in workqueue_init_early()
7796 for_each_cpu_worker_pool(pool, cpu) in workqueue_init_early()
7797 init_cpu_worker_pool(pool, cpu, std_nice[i++]); in workqueue_init_early()
7891 struct worker_pool *pool; in workqueue_init() local
7903 for_each_bh_worker_pool(pool, cpu) in workqueue_init()
7904 pool->node = cpu_to_node(cpu); in workqueue_init()
7905 for_each_cpu_worker_pool(pool, cpu) in workqueue_init()
7906 pool->node = cpu_to_node(cpu); in workqueue_init()
7924 for_each_bh_worker_pool(pool, cpu) in workqueue_init()
7925 BUG_ON(!create_worker(pool)); in workqueue_init()
7928 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
7929 pool->flags &= ~POOL_DISASSOCIATED; in workqueue_init()
7930 BUG_ON(!create_worker(pool)); in workqueue_init()
7934 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) in workqueue_init()
7935 BUG_ON(!create_worker(pool)); in workqueue_init()