Lines Matching full:pool
3 * kernel/workqueue.c - generic async execution with shared worker pool
19 * executed in process context. The worker pool is shared and
65 * A bound pool is either associated or disassociated with its CPU.
72 * be executing on any CPU. The pool behaves as an unbound one.
79 * BH pool is per-CPU and always DISASSOCIATED.
81 POOL_BH = 1 << 0, /* is a BH pool */
108 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
148 * L: pool->lock protected. Access with pool->lock held.
150 * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for
153 * K: Only modified by worker while holding pool->lock. Can be safely read by
154 * self, while holding pool->lock or from IRQ context if %current is the
185 raw_spinlock_t lock; /* the pool lock */
188 int id; /* I: pool ID */
192 bool cpu_stall; /* WD: stalled cpu bound pool */
197 * but w/ pool->lock held. The readers grab pool->lock and are
229 * Destruction of pool is RCU protected to allow dereferences
253 * The per-pool workqueue. While queued, bits below WORK_PWQ_SHIFT
259 struct worker_pool *pool; /* I: the associated pool */ member
272 * pwq->inactive_works instead of pool->worklist and marked with
279 * pool->worklist or worker->scheduled. Those work itmes are only struct
329 raw_spinlock_t lock; /* nests inside pool locks */
492 /* PL: hash of all unbound pools keyed by pool->attrs */
503 * process context while holding a pool lock. Bounce to a dedicated kthread
534 static void show_one_worker_pool(struct worker_pool *pool);
544 #define for_each_bh_worker_pool(pool, cpu) \ argument
545 for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \
546 (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
547 (pool)++)
549 #define for_each_cpu_worker_pool(pool, cpu) \ argument
550 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
551 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
552 (pool)++)
556 * @pool: iteration cursor
560 * locked. If the pool needs to be used beyond the locking in effect, the
561 * caller is responsible for guaranteeing that the pool stays online.
566 #define for_each_pool(pool, pi) \ argument
567 idr_for_each_entry(&worker_pool_idr, pool, pi) \
574 * @pool: worker_pool to iterate workers of
581 #define for_each_pool_worker(worker, pool) \ argument
582 list_for_each_entry((worker), &(pool)->workers, node) \
700 * worker_pool_assign_id - allocate ID and assign it to @pool
701 * @pool: the pool pointer of interest
706 static int worker_pool_assign_id(struct worker_pool *pool) in worker_pool_assign_id() argument
712 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, in worker_pool_assign_id()
715 pool->id = ret; in worker_pool_assign_id()
744 * default pwq is always mapped to the pool with the current effective cpumask.
748 return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask; in unbound_effective_cpumask()
767 static unsigned long pool_offq_flags(struct worker_pool *pool) in pool_offq_flags() argument
769 return (pool->flags & POOL_BH) ? WORK_OFFQ_BH : 0; in pool_offq_flags()
775 * is cleared and the high bits contain OFFQ flags and pool ID.
778 * can be used to set the pwq, pool or clear work->data. These functions should
781 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
782 * corresponding to a work. Pool is available once the work has been
872 * All fields of the returned pool are accessible as long as the above
873 * mentioned locking is in effect. If the returned pool needs to be used
875 * returned pool is and stays online.
887 return work_struct_pwq(data)->pool; in get_work_pool()
921 * they're being called with pool->lock held.
932 static bool need_more_worker(struct worker_pool *pool) in need_more_worker() argument
934 return !list_empty(&pool->worklist) && !pool->nr_running; in need_more_worker()
938 static bool may_start_working(struct worker_pool *pool) in may_start_working() argument
940 return pool->nr_idle; in may_start_working()
944 static bool keep_working(struct worker_pool *pool) in keep_working() argument
946 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); in keep_working()
950 static bool need_to_create_worker(struct worker_pool *pool) in need_to_create_worker() argument
952 return need_more_worker(pool) && !may_start_working(pool); in need_to_create_worker()
956 static bool too_many_workers(struct worker_pool *pool) in too_many_workers() argument
958 bool managing = pool->flags & POOL_MANAGER_ACTIVE; in too_many_workers()
959 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ in too_many_workers()
960 int nr_busy = pool->nr_workers - nr_idle; in too_many_workers()
974 struct worker_pool *pool = worker->pool; in worker_set_flags() local
976 lockdep_assert_held(&pool->lock); in worker_set_flags()
981 pool->nr_running--; in worker_set_flags()
996 struct worker_pool *pool = worker->pool; in worker_clr_flags() local
999 lockdep_assert_held(&pool->lock); in worker_clr_flags()
1010 pool->nr_running++; in worker_clr_flags()
1013 /* Return the first idle worker. Called with pool->lock held. */
1014 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker() argument
1016 if (unlikely(list_empty(&pool->idle_list))) in first_idle_worker()
1019 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
1030 * raw_spin_lock_irq(pool->lock).
1034 struct worker_pool *pool = worker->pool; in worker_enter_idle() local
1043 pool->nr_idle++; in worker_enter_idle()
1047 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1049 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) in worker_enter_idle()
1050 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); in worker_enter_idle()
1053 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); in worker_enter_idle()
1063 * raw_spin_lock_irq(pool->lock).
1067 struct worker_pool *pool = worker->pool; in worker_leave_idle() local
1072 pool->nr_idle--; in worker_leave_idle()
1078 * @pool: pool of interest
1081 * Find a worker which is executing @work on @pool by searching
1082 * @pool->busy_hash which is keyed by the address of @work. For a worker
1103 * raw_spin_lock_irq(pool->lock).
1109 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work() argument
1114 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1135 * raw_spin_lock_irq(pool->lock).
1168 * executed by another worker in the same pool, it'll be punted there.
1180 struct worker_pool *pool = worker->pool; in assign_work() local
1183 lockdep_assert_held(&pool->lock); in assign_work()
1187 * __queue_work() ensures that @work doesn't jump to a different pool in assign_work()
1188 * while still running in the previous pool. Here, we should ensure that in assign_work()
1190 * pool. Check whether anyone is already processing the work. If so, in assign_work()
1193 collision = find_worker_executing_work(pool, work); in assign_work()
1203 static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) in bh_pool_irq_work() argument
1205 int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0; in bh_pool_irq_work()
1207 return &per_cpu(bh_pool_irq_works, pool->cpu)[high]; in bh_pool_irq_work()
1210 static void kick_bh_pool(struct worker_pool *pool) in kick_bh_pool() argument
1214 if (unlikely(pool->cpu != smp_processor_id() && in kick_bh_pool()
1215 !(pool->flags & POOL_BH_DRAINING))) { in kick_bh_pool()
1216 irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); in kick_bh_pool()
1220 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in kick_bh_pool()
1228 * @pool: pool to kick
1230 * @pool may have pending work items. Wake up worker if necessary. Returns
1233 static bool kick_pool(struct worker_pool *pool) in kick_pool() argument
1235 struct worker *worker = first_idle_worker(pool); in kick_pool()
1238 lockdep_assert_held(&pool->lock); in kick_pool()
1240 if (!need_more_worker(pool) || !worker) in kick_pool()
1243 if (pool->flags & POOL_BH) { in kick_pool()
1244 kick_bh_pool(pool); in kick_pool()
1264 * If @pool has non-strict affinity, @worker might have ended up outside in kick_pool()
1267 if (!pool->attrs->affn_strict && in kick_pool()
1268 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { in kick_pool()
1269 struct work_struct *work = list_first_entry(&pool->worklist, in kick_pool()
1271 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask, in kick_pool()
1395 * and leave with an unexpected pool->nr_running == 1 on the newly unbound in wq_worker_running()
1396 * pool. Protect against such race. in wq_worker_running()
1400 worker->pool->nr_running++; in wq_worker_running()
1422 struct worker_pool *pool; in wq_worker_sleeping() local
1432 pool = worker->pool; in wq_worker_sleeping()
1439 raw_spin_lock_irq(&pool->lock); in wq_worker_sleeping()
1447 raw_spin_unlock_irq(&pool->lock); in wq_worker_sleeping()
1451 pool->nr_running--; in wq_worker_sleeping()
1452 if (kick_pool(pool)) in wq_worker_sleeping()
1455 raw_spin_unlock_irq(&pool->lock); in wq_worker_sleeping()
1469 struct worker_pool *pool = worker->pool; in wq_worker_tick() local
1486 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also in wq_worker_tick()
1496 raw_spin_lock(&pool->lock); in wq_worker_tick()
1502 if (kick_pool(pool)) in wq_worker_tick()
1505 raw_spin_unlock(&pool->lock); in wq_worker_tick()
1621 * @pwq has positive refcnt and be holding the matching pool->lock.
1625 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1635 * destruction. The caller should be holding the matching pool->lock.
1639 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1643 * @pwq can't be released under pool->lock, bounce to a dedicated in put_pwq()
1650 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1662 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1664 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1680 if (list_empty(&pwq->pool->worklist)) in __pwq_activate_work()
1681 pwq->pool->watchdog_ts = jiffies; in __pwq_activate_work()
1682 move_linked_works(work, &pwq->pool->worklist, NULL); in __pwq_activate_work()
1710 struct worker_pool *pool = pwq->pool; in pwq_tryinc_nr_active() local
1711 struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); in pwq_tryinc_nr_active()
1714 lockdep_assert_held(&pool->lock); in pwq_tryinc_nr_active()
1830 raw_spin_lock_irq(&pwq->pool->lock); in unplug_oldest_pwq()
1834 kick_pool(pwq->pool); in unplug_oldest_pwq()
1836 raw_spin_unlock_irq(&pwq->pool->lock); in unplug_oldest_pwq()
1864 * If @pwq is for a different pool than @locked_pool, we need to lock in node_activate_pending_pwq()
1865 * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock in node_activate_pending_pwq()
1867 * nested inside pool locks. in node_activate_pending_pwq()
1869 if (pwq->pool != locked_pool) { in node_activate_pending_pwq()
1871 locked_pool = pwq->pool; in node_activate_pending_pwq()
1907 /* if activating a foreign pool, make sure it's running */ in node_activate_pending_pwq()
1908 if (pwq->pool != caller_pool) in node_activate_pending_pwq()
1909 kick_pool(pwq->pool); in node_activate_pending_pwq()
1925 * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
1929 struct worker_pool *pool = pwq->pool; in pwq_dec_nr_active() local
1930 struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node); in pwq_dec_nr_active()
1932 lockdep_assert_held(&pool->lock); in pwq_dec_nr_active()
1967 node_activate_pending_pwq(nna, pool); in pwq_dec_nr_active()
1979 * For unbound workqueues, this function may temporarily drop @pwq->pool->lock
1984 * raw_spin_lock_irq(pool->lock).
2047 struct worker_pool *pool; in try_to_grab_pending() local
2074 pool = get_work_pool(work); in try_to_grab_pending()
2075 if (!pool) in try_to_grab_pending()
2078 raw_spin_lock(&pool->lock); in try_to_grab_pending()
2082 * to pwq on queueing and to pool on dequeueing are done under in try_to_grab_pending()
2083 * pwq->pool->lock. This in turn guarantees that, if work->data in try_to_grab_pending()
2084 * points to pwq which is associated with a locked pool, the work in try_to_grab_pending()
2085 * item is currently queued on that pool. in try_to_grab_pending()
2088 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
2108 move_linked_works(work, &pwq->pool->worklist, NULL); in try_to_grab_pending()
2113 * work->data points to pwq iff queued. Let's point to pool. As in try_to_grab_pending()
2116 set_work_pool_and_keep_pending(work, pool->id, in try_to_grab_pending()
2117 pool_offq_flags(pool)); in try_to_grab_pending()
2122 raw_spin_unlock(&pool->lock); in try_to_grab_pending()
2126 raw_spin_unlock(&pool->lock); in try_to_grab_pending()
2162 * insert_work - insert a work into a pool
2172 * raw_spin_lock_irq(pool->lock).
2233 struct worker_pool *last_pool, *pool; in __queue_work() local
2266 pool = pwq->pool; in __queue_work()
2269 * If @work was previously on a different pool, it might still be in __queue_work()
2271 * pool to guarantee non-reentrancy. in __queue_work()
2278 if (last_pool && last_pool != pool && !(wq->flags & __WQ_ORDERED)) { in __queue_work()
2287 pool = pwq->pool; in __queue_work()
2288 WARN_ON_ONCE(pool != last_pool); in __queue_work()
2292 raw_spin_lock(&pool->lock); in __queue_work()
2295 raw_spin_lock(&pool->lock); in __queue_work()
2307 raw_spin_unlock(&pool->lock); in __queue_work()
2331 if (list_empty(&pool->worklist)) in __queue_work()
2332 pool->watchdog_ts = jiffies; in __queue_work()
2335 insert_work(pwq, work, &pool->worklist, work_flags); in __queue_work()
2336 kick_pool(pool); in __queue_work()
2343 raw_spin_unlock(&pool->lock); in __queue_work()
2652 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) in pool_allowed_cpus() argument
2654 if (pool->cpu < 0 && pool->attrs->affn_strict) in pool_allowed_cpus()
2655 return pool->attrs->__pod_cpumask; in pool_allowed_cpus()
2657 return pool->attrs->cpumask; in pool_allowed_cpus()
2661 * worker_attach_to_pool() - attach a worker to a pool
2663 * @pool: the target pool
2665 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
2666 * cpu-binding of @worker are kept coordinated with the pool across
2670 struct worker_pool *pool) in worker_attach_to_pool() argument
2679 if (pool->flags & POOL_DISASSOCIATED) { in worker_attach_to_pool()
2682 WARN_ON_ONCE(pool->flags & POOL_BH); in worker_attach_to_pool()
2683 kthread_set_per_cpu(worker->task, pool->cpu); in worker_attach_to_pool()
2687 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); in worker_attach_to_pool()
2689 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
2690 worker->pool = pool; in worker_attach_to_pool()
2716 * worker_detach_from_pool() - detach a worker from its pool
2717 * @worker: worker which is attached to its pool
2720 * caller worker shouldn't access to the pool after detached except it has
2721 * other reference to the pool.
2725 struct worker_pool *pool = worker->pool; in worker_detach_from_pool() local
2728 WARN_ON_ONCE(pool->flags & POOL_BH); in worker_detach_from_pool()
2732 worker->pool = NULL; in worker_detach_from_pool()
2735 /* clear leftover flags without pool->lock after it is detached */ in worker_detach_from_pool()
2740 struct worker_pool *pool) in format_worker_id() argument
2746 if (pool) { in format_worker_id()
2747 if (pool->cpu >= 0) in format_worker_id()
2749 pool->cpu, worker->id, in format_worker_id()
2750 pool->attrs->nice < 0 ? "H" : ""); in format_worker_id()
2753 pool->id, worker->id); in format_worker_id()
2761 * @pool: pool the new worker will belong to
2763 * Create and start a new worker which is attached to @pool.
2771 static struct worker *create_worker(struct worker_pool *pool) in create_worker() argument
2777 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); in create_worker()
2784 worker = alloc_worker(pool->node); in create_worker()
2792 if (!(pool->flags & POOL_BH)) { in create_worker()
2795 format_worker_id(id_buf, sizeof(id_buf), worker, pool); in create_worker()
2797 pool->node, "%s", id_buf); in create_worker()
2809 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
2810 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); in create_worker()
2813 /* successful, attach the worker to the pool */ in create_worker()
2814 worker_attach_to_pool(worker, pool); in create_worker()
2817 raw_spin_lock_irq(&pool->lock); in create_worker()
2819 worker->pool->nr_workers++; in create_worker()
2824 * check if not woken up soon. As kick_pool() is noop if @pool is empty, in create_worker()
2830 raw_spin_unlock_irq(&pool->lock); in create_worker()
2835 ida_free(&pool->worker_ida, id); in create_worker()
2862 * @list: transfer worker away from its pool->idle_list and into list
2864 * Tag @worker for destruction and adjust @pool stats accordingly. The worker
2868 * raw_spin_lock_irq(pool->lock).
2872 struct worker_pool *pool = worker->pool; in set_worker_dying() local
2874 lockdep_assert_held(&pool->lock); in set_worker_dying()
2883 pool->nr_workers--; in set_worker_dying()
2884 pool->nr_idle--; in set_worker_dying()
2896 * @t: The pool's idle_timer that just expired
2900 * pool is at the too_many_workers() tipping point would cause too much timer
2906 struct worker_pool *pool = timer_container_of(pool, t, idle_timer); in idle_worker_timeout() local
2909 if (work_pending(&pool->idle_cull_work)) in idle_worker_timeout()
2912 raw_spin_lock_irq(&pool->lock); in idle_worker_timeout()
2914 if (too_many_workers(pool)) { in idle_worker_timeout()
2919 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_worker_timeout()
2924 mod_timer(&pool->idle_timer, expires); in idle_worker_timeout()
2926 raw_spin_unlock_irq(&pool->lock); in idle_worker_timeout()
2929 queue_work(system_dfl_wq, &pool->idle_cull_work); in idle_worker_timeout()
2934 * @work: the pool's work for handling these idle workers
2936 * This goes through a pool's idle workers and gets rid of those that have been
2945 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); in idle_cull_fn() local
2955 raw_spin_lock_irq(&pool->lock); in idle_cull_fn()
2957 while (too_many_workers(pool)) { in idle_cull_fn()
2961 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_cull_fn()
2965 mod_timer(&pool->idle_timer, expires); in idle_cull_fn()
2972 raw_spin_unlock_irq(&pool->lock); in idle_cull_fn()
3005 struct worker_pool *pool = timer_container_of(pool, t, mayday_timer); in pool_mayday_timeout() local
3008 raw_spin_lock_irq(&pool->lock); in pool_mayday_timeout()
3011 if (need_to_create_worker(pool)) { in pool_mayday_timeout()
3018 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
3023 raw_spin_unlock_irq(&pool->lock); in pool_mayday_timeout()
3025 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); in pool_mayday_timeout()
3030 * @pool: pool to create a new worker for
3032 * Create a new worker for @pool if necessary. @pool is guaranteed to
3035 * sent to all rescuers with works scheduled on @pool to resolve
3042 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3046 static void maybe_create_worker(struct worker_pool *pool) in maybe_create_worker() argument
3047 __releases(&pool->lock) in maybe_create_worker()
3048 __acquires(&pool->lock) in maybe_create_worker()
3051 raw_spin_unlock_irq(&pool->lock); in maybe_create_worker()
3054 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); in maybe_create_worker()
3057 if (create_worker(pool) || !need_to_create_worker(pool)) in maybe_create_worker()
3062 if (!need_to_create_worker(pool)) in maybe_create_worker()
3066 timer_delete_sync(&pool->mayday_timer); in maybe_create_worker()
3067 raw_spin_lock_irq(&pool->lock); in maybe_create_worker()
3070 * created as @pool->lock was dropped and the new worker might have in maybe_create_worker()
3073 if (need_to_create_worker(pool)) in maybe_create_worker()
3078 static void worker_lock_callback(struct worker_pool *pool) in worker_lock_callback() argument
3080 spin_lock(&pool->cb_lock); in worker_lock_callback()
3083 static void worker_unlock_callback(struct worker_pool *pool) in worker_unlock_callback() argument
3085 spin_unlock(&pool->cb_lock); in worker_unlock_callback()
3088 static void workqueue_callback_cancel_wait_running(struct worker_pool *pool) in workqueue_callback_cancel_wait_running() argument
3090 spin_lock(&pool->cb_lock); in workqueue_callback_cancel_wait_running()
3091 spin_unlock(&pool->cb_lock); in workqueue_callback_cancel_wait_running()
3096 static void worker_lock_callback(struct worker_pool *pool) { } in worker_lock_callback() argument
3097 static void worker_unlock_callback(struct worker_pool *pool) { } in worker_unlock_callback() argument
3098 static void workqueue_callback_cancel_wait_running(struct worker_pool *pool) { } in workqueue_callback_cancel_wait_running() argument
3103 * manage_workers - manage worker pool
3106 * Assume the manager role and manage the worker pool @worker belongs
3108 * pool. The exclusion is handled automatically by this function.
3115 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3119 * %false if the pool doesn't need management and the caller can safely
3126 struct worker_pool *pool = worker->pool; in manage_workers() local
3128 if (pool->flags & POOL_MANAGER_ACTIVE) in manage_workers()
3131 pool->flags |= POOL_MANAGER_ACTIVE; in manage_workers()
3132 pool->manager = worker; in manage_workers()
3134 maybe_create_worker(pool); in manage_workers()
3136 pool->manager = NULL; in manage_workers()
3137 pool->flags &= ~POOL_MANAGER_ACTIVE; in manage_workers()
3154 * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
3157 __releases(&pool->lock) in process_one_work()
3158 __acquires(&pool->lock) in process_one_work()
3161 struct worker_pool *pool = worker->pool; in process_one_work() local
3164 bool bh_draining = pool->flags & POOL_BH_DRAINING; in process_one_work()
3178 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && in process_one_work()
3179 raw_smp_processor_id() != pool->cpu); in process_one_work()
3183 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
3210 * Kick @pool if necessary. It's always noop for per-cpu worker pools in process_one_work()
3215 kick_pool(pool); in process_one_work()
3218 * Record the last pool and clear PENDING which should be the last in process_one_work()
3219 * update to @work. Also, do this inside @pool->lock so that in process_one_work()
3223 set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool)); in process_one_work()
3226 raw_spin_unlock_irq(&pool->lock); in process_one_work()
3292 raw_spin_lock_irq(&pool->lock); in process_one_work()
3326 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3337 worker->pool->watchdog_ts = jiffies; in process_scheduled_works()
3369 struct worker_pool *pool = worker->pool; in worker_thread() local
3374 raw_spin_lock_irq(&pool->lock); in worker_thread()
3378 raw_spin_unlock_irq(&pool->lock); in worker_thread()
3381 * The worker is dead and PF_WQ_WORKER is cleared, worker->pool in worker_thread()
3384 worker->pool = NULL; in worker_thread()
3385 ida_free(&pool->worker_ida, worker->id); in worker_thread()
3392 if (!need_more_worker(pool)) in worker_thread()
3396 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
3417 list_first_entry(&pool->worklist, in worker_thread()
3422 } while (keep_working(pool)); in worker_thread()
3427 * pool->lock is held and there's no work to process and no need to in worker_thread()
3429 * pool->lock or from local cpu, so setting the current state in worker_thread()
3430 * before releasing pool->lock is enough to prevent losing any in worker_thread()
3435 raw_spin_unlock_irq(&pool->lock); in worker_thread()
3442 struct worker_pool *pool = pwq->pool; in assign_rescuer_work() local
3446 if (!pwq->nr_active || !need_to_create_worker(pool)) in assign_rescuer_work()
3453 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in assign_rescuer_work()
3468 * Regular work processing on a pool may block trying to create a new
3474 * When such condition is possible, the pool summons rescuers of all
3475 * workqueues which have works queued on the pool and let them process
3514 struct worker_pool *pool = pwq->pool; in rescuer_thread() local
3521 worker_attach_to_pool(rescuer, pool); in rescuer_thread()
3523 raw_spin_lock_irq(&pool->lock); in rescuer_thread()
3539 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
3553 * Leave this pool. Notify regular workers; otherwise, we end up in rescuer_thread()
3556 kick_pool(pool); in rescuer_thread()
3558 raw_spin_unlock_irq(&pool->lock); in rescuer_thread()
3563 * Put the reference grabbed by send_mayday(). @pool might in rescuer_thread()
3587 struct worker_pool *pool = worker->pool; in bh_worker() local
3591 worker_lock_callback(pool); in bh_worker()
3592 raw_spin_lock_irq(&pool->lock); in bh_worker()
3599 if (!need_more_worker(pool)) in bh_worker()
3607 list_first_entry(&pool->worklist, in bh_worker()
3612 } while (keep_working(pool) && in bh_worker()
3618 kick_pool(pool); in bh_worker()
3619 raw_spin_unlock_irq(&pool->lock); in bh_worker()
3620 worker_unlock_callback(pool); in bh_worker()
3636 struct worker_pool *pool = in workqueue_softirq_action() local
3638 if (need_more_worker(pool)) in workqueue_softirq_action()
3639 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in workqueue_softirq_action()
3644 struct worker_pool *pool; member
3652 struct worker_pool *pool = dead_work->pool; in drain_dead_softirq_workfn() local
3656 * @pool's CPU is dead and we want to execute its still pending work in drain_dead_softirq_workfn()
3658 * its CPU is dead, @pool can't be kicked and, as work execution path in drain_dead_softirq_workfn()
3660 * @pool with %POOL_BH_DRAINING for the special treatments. in drain_dead_softirq_workfn()
3662 raw_spin_lock_irq(&pool->lock); in drain_dead_softirq_workfn()
3663 pool->flags |= POOL_BH_DRAINING; in drain_dead_softirq_workfn()
3664 raw_spin_unlock_irq(&pool->lock); in drain_dead_softirq_workfn()
3666 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in drain_dead_softirq_workfn()
3668 raw_spin_lock_irq(&pool->lock); in drain_dead_softirq_workfn()
3669 pool->flags &= ~POOL_BH_DRAINING; in drain_dead_softirq_workfn()
3670 repeat = need_more_worker(pool); in drain_dead_softirq_workfn()
3671 raw_spin_unlock_irq(&pool->lock); in drain_dead_softirq_workfn()
3679 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in drain_dead_softirq_workfn()
3694 * items which shouldn't be requeued on the same pool. Shouldn't take long.
3701 struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i]; in workqueue_softirq_dead() local
3704 if (!need_more_worker(pool)) in workqueue_softirq_dead()
3708 dead_work.pool = pool; in workqueue_softirq_dead()
3711 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) in workqueue_softirq_dead()
3791 * raw_spin_lock_irq(pool->lock).
3803 * debugobject calls are safe here even with pool->lock locked in insert_wq_barrier()
3891 * Most of the time, pwqs within the same pool will be linked in flush_workqueue_prep_pwqs()
3893 * of pwq iters, the pool is the same, only doing lock/unlock in flush_workqueue_prep_pwqs()
3894 * if the pool has changed. This can largely reduce expensive in flush_workqueue_prep_pwqs()
3898 if (current_pool != pwq->pool) { in flush_workqueue_prep_pwqs()
3901 current_pool = pwq->pool; in flush_workqueue_prep_pwqs()
4153 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
4155 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
4179 struct worker_pool *pool; in start_flush_work() local
4184 pool = get_work_pool(work); in start_flush_work()
4185 if (!pool) { in start_flush_work()
4190 raw_spin_lock_irq(&pool->lock); in start_flush_work()
4194 if (unlikely(pwq->pool != pool)) in start_flush_work()
4197 worker = find_worker_executing_work(pool, work); in start_flush_work()
4207 raw_spin_unlock_irq(&pool->lock); in start_flush_work()
4226 raw_spin_unlock_irq(&pool->lock); in start_flush_work()
4263 struct worker_pool *pool; in __flush_work() local
4266 pool = get_work_pool(work); in __flush_work()
4267 if (pool) in __flush_work()
4268 workqueue_callback_cancel_wait_running(pool); in __flush_work()
4693 * fields as copying is used for both pool and wq attrs. Instead, in copy_workqueue_attrs()
4788 * @pool: worker_pool to initialize
4790 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
4793 * inside @pool proper are initialized and put_unbound_pool() can be called
4794 * on @pool safely to release it.
4796 static int init_worker_pool(struct worker_pool *pool) in init_worker_pool() argument
4798 raw_spin_lock_init(&pool->lock); in init_worker_pool()
4799 pool->id = -1; in init_worker_pool()
4800 pool->cpu = -1; in init_worker_pool()
4801 pool->node = NUMA_NO_NODE; in init_worker_pool()
4802 pool->flags |= POOL_DISASSOCIATED; in init_worker_pool()
4803 pool->watchdog_ts = jiffies; in init_worker_pool()
4804 INIT_LIST_HEAD(&pool->worklist); in init_worker_pool()
4805 INIT_LIST_HEAD(&pool->idle_list); in init_worker_pool()
4806 hash_init(pool->busy_hash); in init_worker_pool()
4808 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); in init_worker_pool()
4809 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); in init_worker_pool()
4811 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); in init_worker_pool()
4813 INIT_LIST_HEAD(&pool->workers); in init_worker_pool()
4815 ida_init(&pool->worker_ida); in init_worker_pool()
4816 INIT_HLIST_NODE(&pool->hash_node); in init_worker_pool()
4817 pool->refcnt = 1; in init_worker_pool()
4819 spin_lock_init(&pool->cb_lock); in init_worker_pool()
4823 pool->attrs = alloc_workqueue_attrs(); in init_worker_pool()
4824 if (!pool->attrs) in init_worker_pool()
4827 wqattrs_clear_for_pool(pool->attrs); in init_worker_pool()
4945 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); in rcu_free_pool() local
4947 ida_destroy(&pool->worker_ida); in rcu_free_pool()
4948 free_workqueue_attrs(pool->attrs); in rcu_free_pool()
4949 kfree(pool); in rcu_free_pool()
4954 * @pool: worker_pool to put
4956 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
4963 static void put_unbound_pool(struct worker_pool *pool) in put_unbound_pool() argument
4970 if (--pool->refcnt) in put_unbound_pool()
4974 if (WARN_ON(!(pool->cpu < 0)) || in put_unbound_pool()
4975 WARN_ON(!list_empty(&pool->worklist))) in put_unbound_pool()
4979 if (pool->id >= 0) in put_unbound_pool()
4980 idr_remove(&worker_pool_idr, pool->id); in put_unbound_pool()
4981 hash_del(&pool->hash_node); in put_unbound_pool()
4985 * @pool's workers from blocking on attach_mutex. We're the last in put_unbound_pool()
4986 * manager and @pool gets freed with the flag set. in put_unbound_pool()
4990 * pwq->refcnt == pool->refcnt == 0 in put_unbound_pool()
4991 * which implies no work queued to the pool, which implies no worker can in put_unbound_pool()
4994 * drops pool->lock in put_unbound_pool()
4998 !(pool->flags & POOL_MANAGER_ACTIVE), in put_unbound_pool()
5002 raw_spin_lock_irq(&pool->lock); in put_unbound_pool()
5003 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { in put_unbound_pool()
5004 pool->flags |= POOL_MANAGER_ACTIVE; in put_unbound_pool()
5007 raw_spin_unlock_irq(&pool->lock); in put_unbound_pool()
5011 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
5013 WARN_ON(pool->nr_workers || pool->nr_idle); in put_unbound_pool()
5014 raw_spin_unlock_irq(&pool->lock); in put_unbound_pool()
5023 timer_delete_sync(&pool->idle_timer); in put_unbound_pool()
5024 cancel_work_sync(&pool->idle_cull_work); in put_unbound_pool()
5025 timer_delete_sync(&pool->mayday_timer); in put_unbound_pool()
5028 call_rcu(&pool->rcu, rcu_free_pool); in put_unbound_pool()
5049 struct worker_pool *pool; in get_unbound_pool() local
5054 /* do we already have a matching pool? */ in get_unbound_pool()
5055 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { in get_unbound_pool()
5056 if (wqattrs_equal(pool->attrs, attrs)) { in get_unbound_pool()
5057 pool->refcnt++; in get_unbound_pool()
5058 return pool; in get_unbound_pool()
5071 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); in get_unbound_pool()
5072 if (!pool || init_worker_pool(pool) < 0) in get_unbound_pool()
5075 pool->node = node; in get_unbound_pool()
5076 copy_workqueue_attrs(pool->attrs, attrs); in get_unbound_pool()
5077 wqattrs_clear_for_pool(pool->attrs); in get_unbound_pool()
5079 if (worker_pool_assign_id(pool) < 0) in get_unbound_pool()
5083 if (wq_online && !create_worker(pool)) in get_unbound_pool()
5087 hash_add(unbound_pool_hash, &pool->hash_node, hash); in get_unbound_pool()
5089 return pool; in get_unbound_pool()
5091 if (pool) in get_unbound_pool()
5092 put_unbound_pool(pool); in get_unbound_pool()
5105 struct worker_pool *pool = pwq->pool; in pwq_release_workfn() local
5128 put_unbound_pool(pool); in pwq_release_workfn()
5134 wq_node_nr_active(pwq->wq, pwq->pool->node); in pwq_release_workfn()
5153 /* initialize newly allocated @pwq which is associated with @wq and @pool */
5155 struct worker_pool *pool) in init_pwq() argument
5161 pwq->pool = pool; in init_pwq()
5190 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
5194 struct worker_pool *pool; in alloc_unbound_pwq() local
5199 pool = get_unbound_pool(attrs); in alloc_unbound_pwq()
5200 if (!pool) in alloc_unbound_pwq()
5203 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
5205 put_unbound_pool(pool); in alloc_unbound_pwq()
5209 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
5474 if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) in unbound_wq_update_pwq()
5493 raw_spin_lock_irq(&pwq->pool->lock); in unbound_wq_update_pwq()
5495 raw_spin_unlock_irq(&pwq->pool->lock); in unbound_wq_update_pwq()
5523 struct worker_pool *pool; in alloc_and_link_pwqs() local
5525 pool = &(per_cpu_ptr(pools, cpu)[highpri]); in alloc_and_link_pwqs()
5529 pool->node); in alloc_and_link_pwqs()
5533 init_pwq(*pwq_p, wq, pool); in alloc_and_link_pwqs()
5681 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); in wq_adjust_max_active()
5684 kick_pool(pwq->pool); in wq_adjust_max_active()
5686 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); in wq_adjust_max_active()
5920 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
5925 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
5931 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
6104 struct worker_pool *pool; in work_busy() local
6112 pool = get_work_pool(work); in work_busy()
6113 if (pool) { in work_busy()
6114 raw_spin_lock_irqsave(&pool->lock, irq_flags); in work_busy()
6115 if (find_worker_executing_work(pool, work)) in work_busy()
6117 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); in work_busy()
6197 static void pr_cont_pool_info(struct worker_pool *pool) in pr_cont_pool_info() argument
6199 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); in pr_cont_pool_info()
6200 if (pool->node != NUMA_NO_NODE) in pr_cont_pool_info()
6201 pr_cont(" node=%d", pool->node); in pr_cont_pool_info()
6202 pr_cont(" flags=0x%x", pool->flags); in pr_cont_pool_info()
6203 if (pool->flags & POOL_BH) in pr_cont_pool_info()
6205 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); in pr_cont_pool_info()
6207 pr_cont(" nice=%d", pool->attrs->nice); in pr_cont_pool_info()
6212 struct worker_pool *pool = worker->pool; in pr_cont_worker_id() local
6214 if (pool->flags & WQ_BH) in pr_cont_worker_id()
6216 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); in pr_cont_worker_id()
6269 struct worker_pool *pool = pwq->pool; in show_pwq() local
6275 pr_info(" pwq %d:", pool->id); in show_pwq()
6276 pr_cont_pool_info(pool); in show_pwq()
6282 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6292 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6307 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6317 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6363 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); in show_one_workqueue()
6374 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); in show_one_workqueue()
6386 * show_one_worker_pool - dump state of specified worker pool
6387 * @pool: worker pool whose state will be printed
6389 static void show_one_worker_pool(struct worker_pool *pool) in show_one_worker_pool() argument
6396 raw_spin_lock_irqsave(&pool->lock, irq_flags); in show_one_worker_pool()
6397 if (pool->nr_workers == pool->nr_idle) in show_one_worker_pool()
6401 if (!list_empty(&pool->worklist)) in show_one_worker_pool()
6402 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; in show_one_worker_pool()
6410 pr_info("pool %d:", pool->id); in show_one_worker_pool()
6411 pr_cont_pool_info(pool); in show_one_worker_pool()
6412 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); in show_one_worker_pool()
6413 if (pool->manager) in show_one_worker_pool()
6415 task_pid_nr(pool->manager->task)); in show_one_worker_pool()
6416 list_for_each_entry(worker, &pool->idle_list, entry) { in show_one_worker_pool()
6424 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); in show_one_worker_pool()
6442 struct worker_pool *pool; in show_all_workqueues() local
6452 for_each_pool(pool, pi) in show_all_workqueues()
6453 show_one_worker_pool(pool); in show_all_workqueues()
6484 /* stabilize PF_WQ_WORKER and worker pool association */ in wq_worker_comm()
6489 struct worker_pool *pool = worker->pool; in wq_worker_comm() local
6492 off = format_worker_id(buf, size, worker, pool); in wq_worker_comm()
6494 if (pool) { in wq_worker_comm()
6495 raw_spin_lock_irq(&pool->lock); in wq_worker_comm()
6509 raw_spin_unlock_irq(&pool->lock); in wq_worker_comm()
6525 * pool which make migrating pending and scheduled works very
6537 struct worker_pool *pool; in unbind_workers() local
6540 for_each_cpu_worker_pool(pool, cpu) { in unbind_workers()
6542 raw_spin_lock_irq(&pool->lock); in unbind_workers()
6552 for_each_pool_worker(worker, pool) in unbind_workers()
6555 pool->flags |= POOL_DISASSOCIATED; in unbind_workers()
6561 * long as the worklist is not empty. This pool now behaves as in unbind_workers()
6562 * an unbound (in terms of concurrency management) pool which in unbind_workers()
6563 * are served by workers tied to the pool. in unbind_workers()
6565 pool->nr_running = 0; in unbind_workers()
6572 kick_pool(pool); in unbind_workers()
6574 raw_spin_unlock_irq(&pool->lock); in unbind_workers()
6576 for_each_pool_worker(worker, pool) in unbind_workers()
6584 * rebind_workers - rebind all workers of a pool to the associated CPU
6585 * @pool: pool of interest
6587 * @pool->cpu is coming online. Rebind all workers to the CPU.
6589 static void rebind_workers(struct worker_pool *pool) in rebind_workers() argument
6602 for_each_pool_worker(worker, pool) { in rebind_workers()
6603 kthread_set_per_cpu(worker->task, pool->cpu); in rebind_workers()
6605 pool_allowed_cpus(pool)) < 0); in rebind_workers()
6608 raw_spin_lock_irq(&pool->lock); in rebind_workers()
6610 pool->flags &= ~POOL_DISASSOCIATED; in rebind_workers()
6612 for_each_pool_worker(worker, pool) { in rebind_workers()
6636 raw_spin_unlock_irq(&pool->lock); in rebind_workers()
6641 * @pool: unbound pool of interest
6644 * An unbound pool may end up with a cpumask which doesn't have any online
6645 * CPUs. When a worker of such pool get scheduled, the scheduler resets
6646 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
6649 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) in restore_unbound_workers_cpumask() argument
6656 /* is @cpu allowed for @pool? */ in restore_unbound_workers_cpumask()
6657 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) in restore_unbound_workers_cpumask()
6660 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); in restore_unbound_workers_cpumask()
6663 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
6669 struct worker_pool *pool; in workqueue_prepare_cpu() local
6671 for_each_cpu_worker_pool(pool, cpu) { in workqueue_prepare_cpu()
6672 if (pool->nr_workers) in workqueue_prepare_cpu()
6674 if (!create_worker(pool)) in workqueue_prepare_cpu()
6682 struct worker_pool *pool; in workqueue_online_cpu() local
6690 for_each_pool(pool, pi) { in workqueue_online_cpu()
6692 if (pool->flags & POOL_BH) in workqueue_online_cpu()
6696 if (pool->cpu == cpu) in workqueue_online_cpu()
6697 rebind_workers(pool); in workqueue_online_cpu()
6698 else if (pool->cpu < 0) in workqueue_online_cpu()
6699 restore_unbound_workers_cpumask(pool, cpu); in workqueue_online_cpu()
6806 * pool->worklist.
6809 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6879 * frozen works are transferred to their respective pool worklists.
6882 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6937 struct worker_pool *pool; in workqueue_apply_unbound_cpumask() local
6944 if (wq->rescuer && !wq->rescuer->pool) in workqueue_apply_unbound_cpumask()
6949 for_each_cpu_worker_pool(pool, cpu) { in workqueue_apply_unbound_cpumask()
6950 if (!(pool->flags & POOL_DISASSOCIATED)) in workqueue_apply_unbound_cpumask()
6952 for_each_pool_worker(worker, pool) in workqueue_apply_unbound_cpumask()
7517 static void show_cpu_pool_hog(struct worker_pool *pool) in show_cpu_pool_hog() argument
7523 raw_spin_lock_irqsave(&pool->lock, irq_flags); in show_cpu_pool_hog()
7525 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_cpu_pool_hog()
7534 pr_info("pool %d:\n", pool->id); in show_cpu_pool_hog()
7541 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); in show_cpu_pool_hog()
7546 struct worker_pool *pool; in show_cpu_pools_hogs() local
7553 for_each_pool(pool, pi) { in show_cpu_pools_hogs()
7554 if (pool->cpu_stall) in show_cpu_pools_hogs()
7555 show_cpu_pool_hog(pool); in show_cpu_pools_hogs()
7587 struct worker_pool *pool; in wq_watchdog_timer_fn() local
7593 for_each_pool(pool, pi) { in wq_watchdog_timer_fn()
7596 pool->cpu_stall = false; in wq_watchdog_timer_fn()
7597 if (list_empty(&pool->worklist)) in wq_watchdog_timer_fn()
7606 /* get the latest of pool and touched timestamps */ in wq_watchdog_timer_fn()
7607 if (pool->cpu >= 0) in wq_watchdog_timer_fn()
7608 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); in wq_watchdog_timer_fn()
7611 pool_ts = READ_ONCE(pool->watchdog_ts); in wq_watchdog_timer_fn()
7621 if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) { in wq_watchdog_timer_fn()
7622 pool->cpu_stall = true; in wq_watchdog_timer_fn()
7625 pr_emerg("BUG: workqueue lockup - pool"); in wq_watchdog_timer_fn()
7626 pr_cont_pool_info(pool); in wq_watchdog_timer_fn()
7734 static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice) in init_cpu_worker_pool() argument
7736 BUG_ON(init_worker_pool(pool)); in init_cpu_worker_pool()
7737 pool->cpu = cpu; in init_cpu_worker_pool()
7738 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); in init_cpu_worker_pool()
7739 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); in init_cpu_worker_pool()
7740 pool->attrs->nice = nice; in init_cpu_worker_pool()
7741 pool->attrs->affn_strict = true; in init_cpu_worker_pool()
7742 pool->node = cpu_to_node(cpu); in init_cpu_worker_pool()
7744 /* alloc pool ID */ in init_cpu_worker_pool()
7746 BUG_ON(worker_pool_assign_id(pool)); in init_cpu_worker_pool()
7812 struct worker_pool *pool; in workqueue_init_early() local
7815 for_each_bh_worker_pool(pool, cpu) { in workqueue_init_early()
7816 init_cpu_worker_pool(pool, cpu, std_nice[i]); in workqueue_init_early()
7817 pool->flags |= POOL_BH; in workqueue_init_early()
7818 init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]); in workqueue_init_early()
7823 for_each_cpu_worker_pool(pool, cpu) in workqueue_init_early()
7824 init_cpu_worker_pool(pool, cpu, std_nice[i++]); in workqueue_init_early()
7919 struct worker_pool *pool; in workqueue_init() local
7931 for_each_bh_worker_pool(pool, cpu) in workqueue_init()
7932 pool->node = cpu_to_node(cpu); in workqueue_init()
7933 for_each_cpu_worker_pool(pool, cpu) in workqueue_init()
7934 pool->node = cpu_to_node(cpu); in workqueue_init()
7946 * Create the initial workers. A BH pool has one pseudo worker that in workqueue_init()
7952 for_each_bh_worker_pool(pool, cpu) in workqueue_init()
7953 BUG_ON(!create_worker(pool)); in workqueue_init()
7956 for_each_cpu_worker_pool(pool, cpu) { in workqueue_init()
7957 pool->flags &= ~POOL_DISASSOCIATED; in workqueue_init()
7958 BUG_ON(!create_worker(pool)); in workqueue_init()
7962 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) in workqueue_init()
7963 BUG_ON(!create_worker(pool)); in workqueue_init()
8054 * worker pool. Explicitly call unbound_wq_update_pwq() on all workqueue in workqueue_init_topology()