Lines Matching refs:pwq

533 static void show_pwq(struct pool_workqueue *pwq);
598 #define for_each_pwq(pwq, wq) \ argument
599 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
792 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
795 set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING | in set_work_pwq()
1468 struct pool_workqueue *pwq = worker->current_pwq; in wq_worker_tick() local
1471 if (!pwq) in wq_worker_tick()
1474 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; in wq_worker_tick()
1500 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; in wq_worker_tick()
1503 pwq->stats[PWQ_STAT_CM_WAKEUP]++; in wq_worker_tick()
1623 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1625 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1626 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1627 pwq->refcnt++; in get_pwq()
1637 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1639 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1640 if (likely(--pwq->refcnt)) in put_pwq()
1646 kthread_queue_work(pwq_release_worker, &pwq->release_work); in put_pwq()
1655 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1657 if (pwq) { in put_pwq_unlocked()
1662 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1663 put_pwq(pwq); in put_pwq_unlocked()
1664 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1668 static bool pwq_is_empty(struct pool_workqueue *pwq) in pwq_is_empty() argument
1670 return !pwq->nr_active && list_empty(&pwq->inactive_works); in pwq_is_empty()
1673 static void __pwq_activate_work(struct pool_workqueue *pwq, in __pwq_activate_work() argument
1680 if (list_empty(&pwq->pool->worklist)) in __pwq_activate_work()
1681 pwq->pool->watchdog_ts = jiffies; in __pwq_activate_work()
1682 move_linked_works(work, &pwq->pool->worklist, NULL); in __pwq_activate_work()
1707 static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) in pwq_tryinc_nr_active() argument
1709 struct workqueue_struct *wq = pwq->wq; in pwq_tryinc_nr_active()
1710 struct worker_pool *pool = pwq->pool; in pwq_tryinc_nr_active()
1718 obtained = pwq->nr_active < READ_ONCE(wq->max_active); in pwq_tryinc_nr_active()
1722 if (unlikely(pwq->plugged)) in pwq_tryinc_nr_active()
1734 if (!list_empty(&pwq->pending_node) && likely(!fill)) in pwq_tryinc_nr_active()
1750 if (list_empty(&pwq->pending_node)) in pwq_tryinc_nr_active()
1751 list_add_tail(&pwq->pending_node, &nna->pending_pwqs); in pwq_tryinc_nr_active()
1764 list_del_init(&pwq->pending_node); in pwq_tryinc_nr_active()
1770 pwq->nr_active++; in pwq_tryinc_nr_active()
1785 static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) in pwq_activate_first_inactive() argument
1788 list_first_entry_or_null(&pwq->inactive_works, in pwq_activate_first_inactive()
1791 if (work && pwq_tryinc_nr_active(pwq, fill)) { in pwq_activate_first_inactive()
1792 __pwq_activate_work(pwq, work); in pwq_activate_first_inactive()
1823 struct pool_workqueue *pwq; in unplug_oldest_pwq() local
1828 pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue, in unplug_oldest_pwq()
1830 raw_spin_lock_irq(&pwq->pool->lock); in unplug_oldest_pwq()
1831 if (pwq->plugged) { in unplug_oldest_pwq()
1832 pwq->plugged = false; in unplug_oldest_pwq()
1833 if (pwq_activate_first_inactive(pwq, true)) in unplug_oldest_pwq()
1834 kick_pool(pwq->pool); in unplug_oldest_pwq()
1836 raw_spin_unlock_irq(&pwq->pool->lock); in unplug_oldest_pwq()
1851 struct pool_workqueue *pwq; in node_activate_pending_pwq() local
1858 pwq = list_first_entry_or_null(&nna->pending_pwqs, in node_activate_pending_pwq()
1860 if (!pwq) in node_activate_pending_pwq()
1869 if (pwq->pool != locked_pool) { in node_activate_pending_pwq()
1871 locked_pool = pwq->pool; in node_activate_pending_pwq()
1884 work = list_first_entry_or_null(&pwq->inactive_works, in node_activate_pending_pwq()
1887 list_del_init(&pwq->pending_node); in node_activate_pending_pwq()
1899 pwq->nr_active++; in node_activate_pending_pwq()
1900 __pwq_activate_work(pwq, work); in node_activate_pending_pwq()
1902 if (list_empty(&pwq->inactive_works)) in node_activate_pending_pwq()
1903 list_del_init(&pwq->pending_node); in node_activate_pending_pwq()
1905 list_move_tail(&pwq->pending_node, &nna->pending_pwqs); in node_activate_pending_pwq()
1908 if (pwq->pool != caller_pool) in node_activate_pending_pwq()
1909 kick_pool(pwq->pool); in node_activate_pending_pwq()
1927 static void pwq_dec_nr_active(struct pool_workqueue *pwq) in pwq_dec_nr_active() argument
1929 struct worker_pool *pool = pwq->pool; in pwq_dec_nr_active()
1930 struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node); in pwq_dec_nr_active()
1938 pwq->nr_active--; in pwq_dec_nr_active()
1945 pwq_activate_first_inactive(pwq, false); in pwq_dec_nr_active()
1986 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) in pwq_dec_nr_in_flight() argument
1991 pwq_dec_nr_active(pwq); in pwq_dec_nr_in_flight()
1993 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1996 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
2000 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
2004 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
2010 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
2011 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
2013 put_pwq(pwq); in pwq_dec_nr_in_flight()
2048 struct pool_workqueue *pwq; in try_to_grab_pending() local
2087 pwq = get_work_pwq(work); in try_to_grab_pending()
2088 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
2108 move_linked_works(work, &pwq->pool->worklist, NULL); in try_to_grab_pending()
2120 pwq_dec_nr_in_flight(pwq, work_data); in try_to_grab_pending()
2174 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
2183 set_work_pwq(work, pwq, extra_flags); in insert_work()
2185 get_pwq(pwq); in insert_work()
2232 struct pool_workqueue *pwq; in __queue_work() local
2265 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); in __queue_work()
2266 pool = pwq->pool; in __queue_work()
2286 pwq = worker->current_pwq; in __queue_work()
2287 pool = pwq->pool; in __queue_work()
2305 if (unlikely(!pwq->refcnt)) { in __queue_work()
2317 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
2322 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
2323 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
2330 if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) { in __queue_work()
2335 insert_work(pwq, work, &pool->worklist, work_flags); in __queue_work()
2339 insert_work(pwq, work, &pwq->inactive_works, work_flags); in __queue_work()
2981 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
2982 struct workqueue_struct *wq = pwq->wq; in send_mayday()
2990 if (list_empty(&pwq->mayday_node)) { in send_mayday()
2996 get_pwq(pwq); in send_mayday()
2997 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2999 pwq->stats[PWQ_STAT_MAYDAY]++; in send_mayday()
3160 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
3186 worker->current_pwq = pwq; in process_one_work()
3196 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
3206 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) in process_one_work()
3225 pwq->stats[PWQ_STAT_STARTED]++; in process_one_work()
3232 lock_map_acquire(pwq->wq->lockdep_map); in process_one_work()
3266 lock_map_release(pwq->wq->lockdep_map); in process_one_work()
3294 pwq->stats[PWQ_STAT_COMPLETED]++; in process_one_work()
3314 pwq_dec_nr_in_flight(pwq, work_data); in process_one_work()
3440 static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescuer) in assign_rescuer_work() argument
3442 struct worker_pool *pool = pwq->pool; in assign_rescuer_work()
3446 if (!pwq->nr_active || !need_to_create_worker(pool)) in assign_rescuer_work()
3454 if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n)) in assign_rescuer_work()
3455 pwq->stats[PWQ_STAT_RESCUED]++; in assign_rescuer_work()
3512 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
3514 struct worker_pool *pool = pwq->pool; in rescuer_thread()
3517 list_del_init(&pwq->mayday_node); in rescuer_thread()
3527 if (assign_rescuer_work(pwq, rescuer)) { in rescuer_thread()
3539 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
3544 if (list_empty(&pwq->mayday_node)) { in rescuer_thread()
3545 get_pwq(pwq); in rescuer_thread()
3546 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
3566 put_pwq_unlocked(pwq); in rescuer_thread()
3793 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
3813 (pwq->wq->flags & WQ_BH) ? &bh_key : &thr_key); in insert_wq_barrier()
3840 pwq->nr_in_flight[work_color]++; in insert_wq_barrier()
3843 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
3881 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
3897 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
3898 if (current_pool != pwq->pool) { in flush_workqueue_prep_pwqs()
3901 current_pool = pwq->pool; in flush_workqueue_prep_pwqs()
3906 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
3908 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
3909 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
3916 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
3917 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
4134 struct pool_workqueue *pwq; in drain_workqueue() local
4150 for_each_pwq(pwq, wq) { in drain_workqueue()
4153 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
4154 drained = pwq_is_empty(pwq); in drain_workqueue()
4155 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
4180 struct pool_workqueue *pwq; in start_flush_work() local
4192 pwq = get_work_pwq(work); in start_flush_work()
4193 if (pwq) { in start_flush_work()
4194 if (unlikely(pwq->pool != pool)) in start_flush_work()
4200 pwq = worker->current_pwq; in start_flush_work()
4203 wq = pwq->wq; in start_flush_work()
4206 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
5102 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_release_workfn() local
5104 struct workqueue_struct *wq = pwq->wq; in pwq_release_workfn()
5105 struct worker_pool *pool = pwq->pool; in pwq_release_workfn()
5112 if (!list_empty(&pwq->pwqs_node)) { in pwq_release_workfn()
5114 list_del_rcu(&pwq->pwqs_node); in pwq_release_workfn()
5132 if (!list_empty(&pwq->pending_node)) { in pwq_release_workfn()
5134 wq_node_nr_active(pwq->wq, pwq->pool->node); in pwq_release_workfn()
5137 list_del_init(&pwq->pending_node); in pwq_release_workfn()
5141 kfree_rcu(pwq, rcu); in pwq_release_workfn()
5154 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
5157 BUG_ON((unsigned long)pwq & ~WORK_STRUCT_PWQ_MASK); in init_pwq()
5159 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
5161 pwq->pool = pool; in init_pwq()
5162 pwq->wq = wq; in init_pwq()
5163 pwq->flush_color = -1; in init_pwq()
5164 pwq->refcnt = 1; in init_pwq()
5165 INIT_LIST_HEAD(&pwq->inactive_works); in init_pwq()
5166 INIT_LIST_HEAD(&pwq->pending_node); in init_pwq()
5167 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
5168 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
5169 kthread_init_work(&pwq->release_work, pwq_release_workfn); in init_pwq()
5173 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
5175 struct workqueue_struct *wq = pwq->wq; in link_pwq()
5180 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
5184 pwq->work_color = wq->work_color; in link_pwq()
5187 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
5195 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
5203 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
5204 if (!pwq) { in alloc_unbound_pwq()
5209 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
5210 return pwq; in alloc_unbound_pwq()
5253 int cpu, struct pool_workqueue *pwq) in install_unbound_pwq() argument
5262 link_pwq(pwq); in install_unbound_pwq()
5265 rcu_assign_pointer(*slot, pwq); in install_unbound_pwq()
5454 struct pool_workqueue *old_pwq = NULL, *pwq; in unbound_wq_update_pwq() local
5478 pwq = alloc_unbound_pwq(wq, target_attrs); in unbound_wq_update_pwq()
5479 if (!pwq) { in unbound_wq_update_pwq()
5487 old_pwq = install_unbound_pwq(wq, cpu, pwq); in unbound_wq_update_pwq()
5492 pwq = unbound_pwq(wq, -1); in unbound_wq_update_pwq()
5493 raw_spin_lock_irq(&pwq->pool->lock); in unbound_wq_update_pwq()
5494 get_pwq(pwq); in unbound_wq_update_pwq()
5495 raw_spin_unlock_irq(&pwq->pool->lock); in unbound_wq_update_pwq()
5496 old_pwq = install_unbound_pwq(wq, cpu, pwq); in unbound_wq_update_pwq()
5560 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); in alloc_and_link_pwqs() local
5562 if (pwq) in alloc_and_link_pwqs()
5563 kmem_cache_free(pwq_cache, pwq); in alloc_and_link_pwqs()
5674 struct pool_workqueue *pwq; in wq_adjust_max_active() local
5677 for_each_pwq(pwq, wq) { in wq_adjust_max_active()
5681 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); in wq_adjust_max_active()
5682 if (pwq_activate_first_inactive(pwq, true)) { in wq_adjust_max_active()
5684 kick_pool(pwq->pool); in wq_adjust_max_active()
5686 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); in wq_adjust_max_active()
5853 static bool pwq_busy(struct pool_workqueue *pwq) in pwq_busy() argument
5858 if (pwq->nr_in_flight[i]) in pwq_busy()
5861 if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1)) in pwq_busy()
5863 if (!pwq_is_empty(pwq)) in pwq_busy()
5888 struct pool_workqueue *pwq; in destroy_workqueue() local
5919 for_each_pwq(pwq, wq) { in destroy_workqueue()
5920 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
5921 if (WARN_ON(pwq_busy(pwq))) { in destroy_workqueue()
5924 show_pwq(pwq); in destroy_workqueue()
5925 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
5931 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
6074 struct pool_workqueue *pwq; in workqueue_congested() local
6082 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); in workqueue_congested()
6083 ret = !list_empty(&pwq->inactive_works); in workqueue_congested()
6166 struct pool_workqueue *pwq = NULL; in print_worker_info() local
6184 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
6185 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
6266 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
6269 struct worker_pool *pool = pwq->pool; in show_pwq()
6279 pwq->nr_active, pwq->refcnt, in show_pwq()
6280 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
6283 if (worker->current_pwq == pwq) { in show_pwq()
6293 if (worker->current_pwq != pwq) in show_pwq()
6308 if (get_work_pwq(work) == pwq) { in show_pwq()
6318 if (get_work_pwq(work) != pwq) in show_pwq()
6328 if (!list_empty(&pwq->inactive_works)) { in show_pwq()
6332 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
6347 struct pool_workqueue *pwq; in show_one_workqueue() local
6351 for_each_pwq(pwq, wq) { in show_one_workqueue()
6352 if (!pwq_is_empty(pwq)) { in show_one_workqueue()
6362 for_each_pwq(pwq, wq) { in show_one_workqueue()
6363 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); in show_one_workqueue()
6364 if (!pwq_is_empty(pwq)) { in show_one_workqueue()
6371 show_pwq(pwq); in show_one_workqueue()
6374 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); in show_one_workqueue()
6846 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
6860 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
6861 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
6862 if (pwq->nr_active) { in freeze_workqueues_busy()