Lines Matching refs:wq
260 struct workqueue_struct *wq; /* I: the owning workqueue */ member
532 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
598 #define for_each_pwq(pwq, wq) \ argument
599 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
600 lockdep_is_held(&(wq->mutex)))
722 unbound_pwq_slot(struct workqueue_struct *wq, int cpu) in unbound_pwq_slot() argument
725 return per_cpu_ptr(wq->cpu_pwq, cpu); in unbound_pwq_slot()
727 return &wq->dfl_pwq; in unbound_pwq_slot()
731 static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) in unbound_pwq() argument
733 return rcu_dereference_check(*unbound_pwq_slot(wq, cpu), in unbound_pwq()
735 lockdep_is_held(&wq->mutex)); in unbound_pwq()
746 static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq) in unbound_effective_cpumask() argument
748 return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask; in unbound_effective_cpumask()
1552 static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq, in wq_node_nr_active() argument
1555 if (!(wq->flags & WQ_UNBOUND)) in wq_node_nr_active()
1561 return wq->node_nr_active[node]; in wq_node_nr_active()
1573 static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) in wq_update_node_max_active() argument
1575 struct cpumask *effective = unbound_effective_cpumask(wq); in wq_update_node_max_active()
1576 int min_active = READ_ONCE(wq->min_active); in wq_update_node_max_active()
1577 int max_active = READ_ONCE(wq->max_active); in wq_update_node_max_active()
1580 lockdep_assert_held(&wq->mutex); in wq_update_node_max_active()
1595 wq_node_nr_active(wq, node)->max = min_active; in wq_update_node_max_active()
1597 wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active; in wq_update_node_max_active()
1608 wq_node_nr_active(wq, node)->max = in wq_update_node_max_active()
1613 wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active; in wq_update_node_max_active()
1709 struct workqueue_struct *wq = pwq->wq; in pwq_tryinc_nr_active() local
1711 struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); in pwq_tryinc_nr_active()
1718 obtained = pwq->nr_active < READ_ONCE(wq->max_active); in pwq_tryinc_nr_active()
1821 static void unplug_oldest_pwq(struct workqueue_struct *wq) in unplug_oldest_pwq() argument
1825 lockdep_assert_held(&wq->mutex); in unplug_oldest_pwq()
1828 pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue, in unplug_oldest_pwq()
1930 struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node); in pwq_dec_nr_active()
2010 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
2011 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
2192 static bool is_chained_work(struct workqueue_struct *wq) in is_chained_work() argument
2201 return worker && worker->current_pwq->wq == wq; in is_chained_work()
2229 static void __queue_work(int cpu, struct workqueue_struct *wq, in __queue_work() argument
2250 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && in __queue_work()
2251 WARN_ONCE(!is_chained_work(wq), "workqueue: cannot queue %ps on wq %s\n", in __queue_work()
2252 work->func, wq->name))) { in __queue_work()
2259 if (wq->flags & WQ_UNBOUND) in __queue_work()
2265 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); in __queue_work()
2278 if (last_pool && last_pool != pool && !(wq->flags & __WQ_ORDERED)) { in __queue_work()
2285 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
2306 if (wq->flags & WQ_UNBOUND) { in __queue_work()
2313 wq->name, cpu); in __queue_work()
2376 bool queue_work_on(int cpu, struct workqueue_struct *wq, in queue_work_on() argument
2386 __queue_work(cpu, wq, work); in queue_work_on()
2444 bool queue_work_node(int node, struct workqueue_struct *wq, in queue_work_node() argument
2459 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); in queue_work_node()
2467 __queue_work(cpu, wq, work); in queue_work_node()
2481 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
2485 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, in __queue_delayed_work() argument
2491 WARN_ON_ONCE(!wq); in __queue_delayed_work()
2503 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
2508 dwork->wq = wq; in __queue_delayed_work()
2543 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, in queue_delayed_work_on() argument
2555 __queue_delayed_work(cpu, wq, dwork, delay); in queue_delayed_work_on()
2582 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, in mod_delayed_work_on() argument
2591 __queue_delayed_work(cpu, wq, dwork, delay); in mod_delayed_work_on()
2604 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); in rcu_work_rcufn()
2618 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) in queue_rcu_work() argument
2628 rwork->wq = wq; in queue_rcu_work()
2982 struct workqueue_struct *wq = pwq->wq; in send_mayday() local
2986 if (!wq->rescuer) in send_mayday()
2997 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2998 wake_up_process(wq->rescuer->task); in send_mayday()
3196 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
3206 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) in process_one_work()
3232 lock_map_acquire(pwq->wq->lockdep_map); in process_one_work()
3266 lock_map_release(pwq->wq->lockdep_map); in process_one_work()
3485 struct workqueue_struct *wq = rescuer->rescue_wq; in rescuer_thread() local
3511 while (!list_empty(&wq->maydays)) { in rescuer_thread()
3512 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread()
3546 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
3750 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & in check_flush_dependency()
3753 worker->current_pwq->wq->name, worker->current_func, in check_flush_dependency()
3813 (pwq->wq->flags & WQ_BH) ? &bh_key : &thr_key); in insert_wq_barrier()
3877 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, in flush_workqueue_prep_pwqs() argument
3885 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); in flush_workqueue_prep_pwqs()
3886 atomic_set(&wq->nr_pwqs_to_flush, 1); in flush_workqueue_prep_pwqs()
3897 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
3910 atomic_inc(&wq->nr_pwqs_to_flush); in flush_workqueue_prep_pwqs()
3925 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) in flush_workqueue_prep_pwqs()
3926 complete(&wq->first_flusher->done); in flush_workqueue_prep_pwqs()
3931 static void touch_wq_lockdep_map(struct workqueue_struct *wq) in touch_wq_lockdep_map() argument
3934 if (unlikely(!wq->lockdep_map)) in touch_wq_lockdep_map()
3937 if (wq->flags & WQ_BH) in touch_wq_lockdep_map()
3940 lock_map_acquire(wq->lockdep_map); in touch_wq_lockdep_map()
3941 lock_map_release(wq->lockdep_map); in touch_wq_lockdep_map()
3943 if (wq->flags & WQ_BH) in touch_wq_lockdep_map()
3949 struct workqueue_struct *wq) in touch_work_lockdep_map() argument
3952 if (wq->flags & WQ_BH) in touch_work_lockdep_map()
3958 if (wq->flags & WQ_BH) in touch_work_lockdep_map()
3970 void __flush_workqueue(struct workqueue_struct *wq) in __flush_workqueue() argument
3975 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, (*wq->lockdep_map)), in __flush_workqueue()
3982 touch_wq_lockdep_map(wq); in __flush_workqueue()
3984 mutex_lock(&wq->mutex); in __flush_workqueue()
3989 next_color = work_next_color(wq->work_color); in __flush_workqueue()
3991 if (next_color != wq->flush_color) { in __flush_workqueue()
3997 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); in __flush_workqueue()
3998 this_flusher.flush_color = wq->work_color; in __flush_workqueue()
3999 wq->work_color = next_color; in __flush_workqueue()
4001 if (!wq->first_flusher) { in __flush_workqueue()
4003 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); in __flush_workqueue()
4005 wq->first_flusher = &this_flusher; in __flush_workqueue()
4007 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, in __flush_workqueue()
4008 wq->work_color)) { in __flush_workqueue()
4010 wq->flush_color = next_color; in __flush_workqueue()
4011 wq->first_flusher = NULL; in __flush_workqueue()
4016 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); in __flush_workqueue()
4017 list_add_tail(&this_flusher.list, &wq->flusher_queue); in __flush_workqueue()
4018 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); in __flush_workqueue()
4026 list_add_tail(&this_flusher.list, &wq->flusher_overflow); in __flush_workqueue()
4029 check_flush_dependency(wq, NULL, false); in __flush_workqueue()
4031 mutex_unlock(&wq->mutex); in __flush_workqueue()
4041 if (READ_ONCE(wq->first_flusher) != &this_flusher) in __flush_workqueue()
4044 mutex_lock(&wq->mutex); in __flush_workqueue()
4047 if (wq->first_flusher != &this_flusher) in __flush_workqueue()
4050 WRITE_ONCE(wq->first_flusher, NULL); in __flush_workqueue()
4053 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); in __flush_workqueue()
4059 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { in __flush_workqueue()
4060 if (next->flush_color != wq->flush_color) in __flush_workqueue()
4066 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && in __flush_workqueue()
4067 wq->flush_color != work_next_color(wq->work_color)); in __flush_workqueue()
4070 wq->flush_color = work_next_color(wq->flush_color); in __flush_workqueue()
4073 if (!list_empty(&wq->flusher_overflow)) { in __flush_workqueue()
4080 list_for_each_entry(tmp, &wq->flusher_overflow, list) in __flush_workqueue()
4081 tmp->flush_color = wq->work_color; in __flush_workqueue()
4083 wq->work_color = work_next_color(wq->work_color); in __flush_workqueue()
4085 list_splice_tail_init(&wq->flusher_overflow, in __flush_workqueue()
4086 &wq->flusher_queue); in __flush_workqueue()
4087 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); in __flush_workqueue()
4090 if (list_empty(&wq->flusher_queue)) { in __flush_workqueue()
4091 WARN_ON_ONCE(wq->flush_color != wq->work_color); in __flush_workqueue()
4099 WARN_ON_ONCE(wq->flush_color == wq->work_color); in __flush_workqueue()
4100 WARN_ON_ONCE(wq->flush_color != next->flush_color); in __flush_workqueue()
4103 wq->first_flusher = next; in __flush_workqueue()
4105 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) in __flush_workqueue()
4112 wq->first_flusher = NULL; in __flush_workqueue()
4116 mutex_unlock(&wq->mutex); in __flush_workqueue()
4131 void drain_workqueue(struct workqueue_struct *wq) in drain_workqueue() argument
4141 mutex_lock(&wq->mutex); in drain_workqueue()
4142 if (!wq->nr_drainers++) in drain_workqueue()
4143 wq->flags |= __WQ_DRAINING; in drain_workqueue()
4144 mutex_unlock(&wq->mutex); in drain_workqueue()
4146 __flush_workqueue(wq); in drain_workqueue()
4148 mutex_lock(&wq->mutex); in drain_workqueue()
4150 for_each_pwq(pwq, wq) { in drain_workqueue()
4163 wq->name, __func__, flush_cnt); in drain_workqueue()
4165 mutex_unlock(&wq->mutex); in drain_workqueue()
4169 if (!--wq->nr_drainers) in drain_workqueue()
4170 wq->flags &= ~__WQ_DRAINING; in drain_workqueue()
4171 mutex_unlock(&wq->mutex); in drain_workqueue()
4181 struct workqueue_struct *wq; in start_flush_work() local
4203 wq = pwq->wq; in start_flush_work()
4204 check_flush_dependency(wq, work, from_cancel); in start_flush_work()
4209 touch_work_lockdep_map(work, wq); in start_flush_work()
4220 if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer)) in start_flush_work()
4221 touch_wq_lockdep_map(wq); in start_flush_work()
4318 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
4833 static void wq_init_lockdep(struct workqueue_struct *wq) in wq_init_lockdep() argument
4837 lockdep_register_key(&wq->key); in wq_init_lockdep()
4838 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); in wq_init_lockdep()
4840 lock_name = wq->name; in wq_init_lockdep()
4842 wq->lock_name = lock_name; in wq_init_lockdep()
4843 wq->lockdep_map = &wq->__lockdep_map; in wq_init_lockdep()
4844 lockdep_init_map(wq->lockdep_map, lock_name, &wq->key, 0); in wq_init_lockdep()
4847 static void wq_unregister_lockdep(struct workqueue_struct *wq) in wq_unregister_lockdep() argument
4849 if (wq->lockdep_map != &wq->__lockdep_map) in wq_unregister_lockdep()
4852 lockdep_unregister_key(&wq->key); in wq_unregister_lockdep()
4855 static void wq_free_lockdep(struct workqueue_struct *wq) in wq_free_lockdep() argument
4857 if (wq->lockdep_map != &wq->__lockdep_map) in wq_free_lockdep()
4860 if (wq->lock_name != wq->name) in wq_free_lockdep()
4861 kfree(wq->lock_name); in wq_free_lockdep()
4864 static void wq_init_lockdep(struct workqueue_struct *wq) in wq_init_lockdep() argument
4868 static void wq_unregister_lockdep(struct workqueue_struct *wq) in wq_unregister_lockdep() argument
4872 static void wq_free_lockdep(struct workqueue_struct *wq) in wq_free_lockdep() argument
4931 struct workqueue_struct *wq = in rcu_free_wq() local
4934 if (wq->flags & WQ_UNBOUND) in rcu_free_wq()
4935 free_node_nr_active(wq->node_nr_active); in rcu_free_wq()
4937 wq_free_lockdep(wq); in rcu_free_wq()
4938 free_percpu(wq->cpu_pwq); in rcu_free_wq()
4939 free_workqueue_attrs(wq->unbound_attrs); in rcu_free_wq()
4940 kfree(wq); in rcu_free_wq()
5104 struct workqueue_struct *wq = pwq->wq; in pwq_release_workfn() local
5113 mutex_lock(&wq->mutex); in pwq_release_workfn()
5115 is_last = list_empty(&wq->pwqs); in pwq_release_workfn()
5120 if (!is_last && (wq->flags & __WQ_ORDERED)) in pwq_release_workfn()
5121 unplug_oldest_pwq(wq); in pwq_release_workfn()
5123 mutex_unlock(&wq->mutex); in pwq_release_workfn()
5126 if (wq->flags & WQ_UNBOUND) { in pwq_release_workfn()
5134 wq_node_nr_active(pwq->wq, pwq->pool->node); in pwq_release_workfn()
5148 wq_unregister_lockdep(wq); in pwq_release_workfn()
5149 call_rcu(&wq->rcu, rcu_free_wq); in pwq_release_workfn()
5154 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
5162 pwq->wq = wq; in init_pwq()
5175 struct workqueue_struct *wq = pwq->wq; in link_pwq() local
5177 lockdep_assert_held(&wq->mutex); in link_pwq()
5184 pwq->work_color = wq->work_color; in link_pwq()
5187 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
5191 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, in alloc_unbound_pwq() argument
5209 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
5252 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, in install_unbound_pwq() argument
5255 struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu); in install_unbound_pwq()
5259 lockdep_assert_held(&wq->mutex); in install_unbound_pwq()
5271 struct workqueue_struct *wq; /* target workqueue */ member
5296 apply_wqattrs_prepare(struct workqueue_struct *wq, in apply_wqattrs_prepare() argument
5324 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); in apply_wqattrs_prepare()
5334 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); in apply_wqattrs_prepare()
5352 if ((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)) in apply_wqattrs_prepare()
5355 ctx->wq = wq; in apply_wqattrs_prepare()
5370 mutex_lock(&ctx->wq->mutex); in apply_wqattrs_commit()
5372 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); in apply_wqattrs_commit()
5376 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, in apply_wqattrs_commit()
5378 ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); in apply_wqattrs_commit()
5381 wq_update_node_max_active(ctx->wq, -1); in apply_wqattrs_commit()
5383 mutex_unlock(&ctx->wq->mutex); in apply_wqattrs_commit()
5386 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, in apply_workqueue_attrs_locked() argument
5392 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) in apply_workqueue_attrs_locked()
5395 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); in apply_workqueue_attrs_locked()
5421 int apply_workqueue_attrs(struct workqueue_struct *wq, in apply_workqueue_attrs() argument
5427 ret = apply_workqueue_attrs_locked(wq, attrs); in apply_workqueue_attrs()
5452 static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu) in unbound_wq_update_pwq() argument
5459 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) in unbound_wq_update_pwq()
5469 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); in unbound_wq_update_pwq()
5474 if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) in unbound_wq_update_pwq()
5478 pwq = alloc_unbound_pwq(wq, target_attrs); in unbound_wq_update_pwq()
5481 wq->name); in unbound_wq_update_pwq()
5486 mutex_lock(&wq->mutex); in unbound_wq_update_pwq()
5487 old_pwq = install_unbound_pwq(wq, cpu, pwq); in unbound_wq_update_pwq()
5491 mutex_lock(&wq->mutex); in unbound_wq_update_pwq()
5492 pwq = unbound_pwq(wq, -1); in unbound_wq_update_pwq()
5496 old_pwq = install_unbound_pwq(wq, cpu, pwq); in unbound_wq_update_pwq()
5498 mutex_unlock(&wq->mutex); in unbound_wq_update_pwq()
5502 static int alloc_and_link_pwqs(struct workqueue_struct *wq) in alloc_and_link_pwqs() argument
5504 bool highpri = wq->flags & WQ_HIGHPRI; in alloc_and_link_pwqs()
5509 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); in alloc_and_link_pwqs()
5510 if (!wq->cpu_pwq) in alloc_and_link_pwqs()
5513 if (!(wq->flags & WQ_UNBOUND)) { in alloc_and_link_pwqs()
5516 if (wq->flags & WQ_BH) in alloc_and_link_pwqs()
5526 pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu); in alloc_and_link_pwqs()
5533 init_pwq(*pwq_p, wq, pool); in alloc_and_link_pwqs()
5535 mutex_lock(&wq->mutex); in alloc_and_link_pwqs()
5537 mutex_unlock(&wq->mutex); in alloc_and_link_pwqs()
5542 if (wq->flags & __WQ_ORDERED) { in alloc_and_link_pwqs()
5545 ret = apply_workqueue_attrs_locked(wq, ordered_wq_attrs[highpri]); in alloc_and_link_pwqs()
5547 dfl_pwq = rcu_access_pointer(wq->dfl_pwq); in alloc_and_link_pwqs()
5548 WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node || in alloc_and_link_pwqs()
5549 wq->pwqs.prev != &dfl_pwq->pwqs_node), in alloc_and_link_pwqs()
5550 "ordering guarantee broken for workqueue %s\n", wq->name); in alloc_and_link_pwqs()
5552 ret = apply_workqueue_attrs_locked(wq, unbound_std_wq_attrs[highpri]); in alloc_and_link_pwqs()
5558 if (wq->cpu_pwq) { in alloc_and_link_pwqs()
5560 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); in alloc_and_link_pwqs()
5565 free_percpu(wq->cpu_pwq); in alloc_and_link_pwqs()
5566 wq->cpu_pwq = NULL; in alloc_and_link_pwqs()
5585 static int init_rescuer(struct workqueue_struct *wq) in init_rescuer() argument
5593 if (!(wq->flags & WQ_MEM_RECLAIM)) in init_rescuer()
5599 wq->name); in init_rescuer()
5603 rescuer->rescue_wq = wq; in init_rescuer()
5610 wq->name, ERR_PTR(ret)); in init_rescuer()
5615 wq->rescuer = rescuer; in init_rescuer()
5636 static void wq_adjust_max_active(struct workqueue_struct *wq) in wq_adjust_max_active() argument
5641 lockdep_assert_held(&wq->mutex); in wq_adjust_max_active()
5643 if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { in wq_adjust_max_active()
5647 new_max = wq->saved_max_active; in wq_adjust_max_active()
5648 new_min = wq->saved_min_active; in wq_adjust_max_active()
5651 if (wq->max_active == new_max && wq->min_active == new_min) in wq_adjust_max_active()
5660 WRITE_ONCE(wq->max_active, new_max); in wq_adjust_max_active()
5661 WRITE_ONCE(wq->min_active, new_min); in wq_adjust_max_active()
5663 if (wq->flags & WQ_UNBOUND) in wq_adjust_max_active()
5664 wq_update_node_max_active(wq, -1); in wq_adjust_max_active()
5677 for_each_pwq(pwq, wq) { in wq_adjust_max_active()
5696 struct workqueue_struct *wq; in __alloc_workqueue() local
5713 wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1); in __alloc_workqueue()
5715 wq_size = sizeof(*wq); in __alloc_workqueue()
5717 wq = kzalloc_noprof(wq_size, GFP_KERNEL); in __alloc_workqueue()
5718 if (!wq) in __alloc_workqueue()
5722 wq->unbound_attrs = alloc_workqueue_attrs_noprof(); in __alloc_workqueue()
5723 if (!wq->unbound_attrs) in __alloc_workqueue()
5727 name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); in __alloc_workqueue()
5731 wq->name); in __alloc_workqueue()
5741 max_active = wq_clamp_max_active(max_active, flags, wq->name); in __alloc_workqueue()
5745 wq->flags = flags; in __alloc_workqueue()
5746 wq->max_active = max_active; in __alloc_workqueue()
5747 wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE); in __alloc_workqueue()
5748 wq->saved_max_active = wq->max_active; in __alloc_workqueue()
5749 wq->saved_min_active = wq->min_active; in __alloc_workqueue()
5750 mutex_init(&wq->mutex); in __alloc_workqueue()
5751 atomic_set(&wq->nr_pwqs_to_flush, 0); in __alloc_workqueue()
5752 INIT_LIST_HEAD(&wq->pwqs); in __alloc_workqueue()
5753 INIT_LIST_HEAD(&wq->flusher_queue); in __alloc_workqueue()
5754 INIT_LIST_HEAD(&wq->flusher_overflow); in __alloc_workqueue()
5755 INIT_LIST_HEAD(&wq->maydays); in __alloc_workqueue()
5757 INIT_LIST_HEAD(&wq->list); in __alloc_workqueue()
5760 if (alloc_node_nr_active(wq->node_nr_active) < 0) in __alloc_workqueue()
5770 if (alloc_and_link_pwqs(wq) < 0) in __alloc_workqueue()
5773 mutex_lock(&wq->mutex); in __alloc_workqueue()
5774 wq_adjust_max_active(wq); in __alloc_workqueue()
5775 mutex_unlock(&wq->mutex); in __alloc_workqueue()
5777 list_add_tail_rcu(&wq->list, &workqueues); in __alloc_workqueue()
5779 if (wq_online && init_rescuer(wq) < 0) in __alloc_workqueue()
5784 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) in __alloc_workqueue()
5787 return wq; in __alloc_workqueue()
5796 if (wq->flags & WQ_UNBOUND) { in __alloc_workqueue()
5798 free_node_nr_active(wq->node_nr_active); in __alloc_workqueue()
5801 free_workqueue_attrs(wq->unbound_attrs); in __alloc_workqueue()
5802 kfree(wq); in __alloc_workqueue()
5807 destroy_workqueue(wq); in __alloc_workqueue()
5816 struct workqueue_struct *wq; in alloc_workqueue_noprof() local
5820 wq = __alloc_workqueue(fmt, flags, max_active, args); in alloc_workqueue_noprof()
5822 if (!wq) in alloc_workqueue_noprof()
5825 wq_init_lockdep(wq); in alloc_workqueue_noprof()
5827 return wq; in alloc_workqueue_noprof()
5837 struct workqueue_struct *wq; in alloc_workqueue_lockdep_map() local
5841 wq = __alloc_workqueue(fmt, flags, max_active, args); in alloc_workqueue_lockdep_map()
5843 if (!wq) in alloc_workqueue_lockdep_map()
5846 wq->lockdep_map = lockdep_map; in alloc_workqueue_lockdep_map()
5848 return wq; in alloc_workqueue_lockdep_map()
5861 if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1)) in pwq_busy()
5886 void destroy_workqueue(struct workqueue_struct *wq) in destroy_workqueue() argument
5895 workqueue_sysfs_unregister(wq); in destroy_workqueue()
5898 mutex_lock(&wq->mutex); in destroy_workqueue()
5899 wq->flags |= __WQ_DESTROYING; in destroy_workqueue()
5900 mutex_unlock(&wq->mutex); in destroy_workqueue()
5903 drain_workqueue(wq); in destroy_workqueue()
5906 if (wq->rescuer) { in destroy_workqueue()
5908 kthread_stop(wq->rescuer->task); in destroy_workqueue()
5909 kfree(wq->rescuer); in destroy_workqueue()
5910 wq->rescuer = NULL; in destroy_workqueue()
5918 mutex_lock(&wq->mutex); in destroy_workqueue()
5919 for_each_pwq(pwq, wq) { in destroy_workqueue()
5923 __func__, wq->name); in destroy_workqueue()
5926 mutex_unlock(&wq->mutex); in destroy_workqueue()
5928 show_one_workqueue(wq); in destroy_workqueue()
5933 mutex_unlock(&wq->mutex); in destroy_workqueue()
5939 list_del_rcu(&wq->list); in destroy_workqueue()
5950 put_pwq_unlocked(unbound_pwq(wq, cpu)); in destroy_workqueue()
5951 RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL); in destroy_workqueue()
5954 put_pwq_unlocked(unbound_pwq(wq, -1)); in destroy_workqueue()
5955 RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL); in destroy_workqueue()
5972 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) in workqueue_set_max_active() argument
5975 if (WARN_ON(wq->flags & WQ_BH)) in workqueue_set_max_active()
5978 if (WARN_ON(wq->flags & __WQ_ORDERED)) in workqueue_set_max_active()
5981 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); in workqueue_set_max_active()
5983 mutex_lock(&wq->mutex); in workqueue_set_max_active()
5985 wq->saved_max_active = max_active; in workqueue_set_max_active()
5986 if (wq->flags & WQ_UNBOUND) in workqueue_set_max_active()
5987 wq->saved_min_active = min(wq->saved_min_active, max_active); in workqueue_set_max_active()
5989 wq_adjust_max_active(wq); in workqueue_set_max_active()
5991 mutex_unlock(&wq->mutex); in workqueue_set_max_active()
6009 void workqueue_set_min_active(struct workqueue_struct *wq, int min_active) in workqueue_set_min_active() argument
6012 if (WARN_ON((wq->flags & (WQ_BH | WQ_UNBOUND | __WQ_ORDERED)) != in workqueue_set_min_active()
6016 mutex_lock(&wq->mutex); in workqueue_set_min_active()
6017 wq->saved_min_active = clamp(min_active, 0, wq->saved_max_active); in workqueue_set_min_active()
6018 wq_adjust_max_active(wq); in workqueue_set_min_active()
6019 mutex_unlock(&wq->mutex); in workqueue_set_min_active()
6072 bool workqueue_congested(int cpu, struct workqueue_struct *wq) in workqueue_congested() argument
6082 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); in workqueue_congested()
6167 struct workqueue_struct *wq = NULL; in print_worker_info() local
6185 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
6186 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); in print_worker_info()
6345 void show_one_workqueue(struct workqueue_struct *wq) in show_one_workqueue() argument
6351 for_each_pwq(pwq, wq) { in show_one_workqueue()
6360 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); in show_one_workqueue()
6362 for_each_pwq(pwq, wq) { in show_one_workqueue()
6441 struct workqueue_struct *wq; in show_all_workqueues() local
6449 list_for_each_entry_rcu(wq, &workqueues, list) in show_all_workqueues()
6450 show_one_workqueue(wq); in show_all_workqueues()
6466 struct workqueue_struct *wq; in show_freezable_workqueues() local
6472 list_for_each_entry_rcu(wq, &workqueues, list) { in show_freezable_workqueues()
6473 if (!(wq->flags & WQ_FREEZABLE)) in show_freezable_workqueues()
6475 show_one_workqueue(wq); in show_freezable_workqueues()
6683 struct workqueue_struct *wq; in workqueue_online_cpu() local
6704 list_for_each_entry(wq, &workqueues, list) { in workqueue_online_cpu()
6705 struct workqueue_attrs *attrs = wq->unbound_attrs; in workqueue_online_cpu()
6712 unbound_wq_update_pwq(wq, tcpu); in workqueue_online_cpu()
6714 mutex_lock(&wq->mutex); in workqueue_online_cpu()
6715 wq_update_node_max_active(wq, -1); in workqueue_online_cpu()
6716 mutex_unlock(&wq->mutex); in workqueue_online_cpu()
6726 struct workqueue_struct *wq; in workqueue_offline_cpu() local
6739 list_for_each_entry(wq, &workqueues, list) { in workqueue_offline_cpu()
6740 struct workqueue_attrs *attrs = wq->unbound_attrs; in workqueue_offline_cpu()
6747 unbound_wq_update_pwq(wq, tcpu); in workqueue_offline_cpu()
6749 mutex_lock(&wq->mutex); in workqueue_offline_cpu()
6750 wq_update_node_max_active(wq, cpu); in workqueue_offline_cpu()
6751 mutex_unlock(&wq->mutex); in workqueue_offline_cpu()
6813 struct workqueue_struct *wq; in freeze_workqueues_begin() local
6820 list_for_each_entry(wq, &workqueues, list) { in freeze_workqueues_begin()
6821 mutex_lock(&wq->mutex); in freeze_workqueues_begin()
6822 wq_adjust_max_active(wq); in freeze_workqueues_begin()
6823 mutex_unlock(&wq->mutex); in freeze_workqueues_begin()
6845 struct workqueue_struct *wq; in freeze_workqueues_busy() local
6852 list_for_each_entry(wq, &workqueues, list) { in freeze_workqueues_busy()
6853 if (!(wq->flags & WQ_FREEZABLE)) in freeze_workqueues_busy()
6860 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
6886 struct workqueue_struct *wq; in thaw_workqueues() local
6896 list_for_each_entry(wq, &workqueues, list) { in thaw_workqueues()
6897 mutex_lock(&wq->mutex); in thaw_workqueues()
6898 wq_adjust_max_active(wq); in thaw_workqueues()
6899 mutex_unlock(&wq->mutex); in thaw_workqueues()
6911 struct workqueue_struct *wq; in workqueue_apply_unbound_cpumask() local
6916 list_for_each_entry(wq, &workqueues, list) { in workqueue_apply_unbound_cpumask()
6917 if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING)) in workqueue_apply_unbound_cpumask()
6920 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); in workqueue_apply_unbound_cpumask()
6943 list_for_each_entry(wq, &workqueues, list) { in workqueue_apply_unbound_cpumask()
6944 if (wq->rescuer && !wq->rescuer->pool) in workqueue_apply_unbound_cpumask()
6945 unbind_worker(wq->rescuer); in workqueue_apply_unbound_cpumask()
7011 struct workqueue_struct *wq; in wq_affn_dfl_set() local
7025 list_for_each_entry(wq, &workqueues, list) { in wq_affn_dfl_set()
7027 unbound_wq_update_pwq(wq, cpu); in wq_affn_dfl_set()
7065 struct workqueue_struct *wq; member
7073 return wq_dev->wq; in dev_to_wq()
7079 struct workqueue_struct *wq = dev_to_wq(dev); in per_cpu_show() local
7081 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); in per_cpu_show()
7088 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_show() local
7090 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); in max_active_show()
7097 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_store() local
7103 workqueue_set_max_active(wq, val); in max_active_store()
7118 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_show() local
7121 mutex_lock(&wq->mutex); in wq_nice_show()
7122 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); in wq_nice_show()
7123 mutex_unlock(&wq->mutex); in wq_nice_show()
7129 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) in wq_sysfs_prep_attrs() argument
7139 copy_workqueue_attrs(attrs, wq->unbound_attrs); in wq_sysfs_prep_attrs()
7146 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_store() local
7152 attrs = wq_sysfs_prep_attrs(wq); in wq_nice_store()
7158 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_nice_store()
7171 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_show() local
7174 mutex_lock(&wq->mutex); in wq_cpumask_show()
7176 cpumask_pr_args(wq->unbound_attrs->cpumask)); in wq_cpumask_show()
7177 mutex_unlock(&wq->mutex); in wq_cpumask_show()
7185 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_store() local
7191 attrs = wq_sysfs_prep_attrs(wq); in wq_cpumask_store()
7197 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_cpumask_store()
7208 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affn_scope_show() local
7211 mutex_lock(&wq->mutex); in wq_affn_scope_show()
7212 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL) in wq_affn_scope_show()
7218 wq_affn_names[wq->unbound_attrs->affn_scope]); in wq_affn_scope_show()
7219 mutex_unlock(&wq->mutex); in wq_affn_scope_show()
7228 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affn_scope_store() local
7237 attrs = wq_sysfs_prep_attrs(wq); in wq_affn_scope_store()
7240 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_affn_scope_store()
7250 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affinity_strict_show() local
7253 wq->unbound_attrs->affn_strict); in wq_affinity_strict_show()
7260 struct workqueue_struct *wq = dev_to_wq(dev); in wq_affinity_strict_store() local
7268 attrs = wq_sysfs_prep_attrs(wq); in wq_affinity_strict_store()
7271 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_affinity_strict_store()
7411 int workqueue_sysfs_register(struct workqueue_struct *wq) in workqueue_sysfs_register() argument
7420 if (WARN_ON(wq->flags & __WQ_ORDERED)) in workqueue_sysfs_register()
7423 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); in workqueue_sysfs_register()
7427 wq_dev->wq = wq; in workqueue_sysfs_register()
7430 dev_set_name(&wq_dev->dev, "%s", wq->name); in workqueue_sysfs_register()
7441 wq->wq_dev = NULL; in workqueue_sysfs_register()
7445 if (wq->flags & WQ_UNBOUND) { in workqueue_sysfs_register()
7452 wq->wq_dev = NULL; in workqueue_sysfs_register()
7469 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) in workqueue_sysfs_unregister() argument
7471 struct wq_device *wq_dev = wq->wq_dev; in workqueue_sysfs_unregister()
7473 if (!wq->wq_dev) in workqueue_sysfs_unregister()
7476 wq->wq_dev = NULL; in workqueue_sysfs_unregister()
7480 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } in workqueue_sysfs_unregister() argument
7918 struct workqueue_struct *wq; in workqueue_init() local
7937 list_for_each_entry(wq, &workqueues, list) { in workqueue_init()
7938 WARN(init_rescuer(wq), in workqueue_init()
7940 wq->name); in workqueue_init()
8040 struct workqueue_struct *wq; in workqueue_init_topology() local
8057 list_for_each_entry(wq, &workqueues, list) { in workqueue_init_topology()
8059 unbound_wq_update_pwq(wq, cpu); in workqueue_init_topology()
8060 if (wq->flags & WQ_UNBOUND) { in workqueue_init_topology()
8061 mutex_lock(&wq->mutex); in workqueue_init_topology()
8062 wq_update_node_max_active(wq, -1); in workqueue_init_topology()
8063 mutex_unlock(&wq->mutex); in workqueue_init_topology()