| H A D | workqueue.c | 266 * point to the pwq; thus, pwqs need to be aligned at two's power of the 282 * When pwq->nr_active >= max_active, new work item is queued to 283 * pwq->inactive_works instead of pool->worklist and marked with 287 * nr_active and all work items in pwq->inactive_works are marked with 289 * in pwq->inactive_works. Some of them are ready to run in 293 * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 305 * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 307 * RCU protected so that the first pwq can be determined without 331 * The following struct is used to enforce per-node max_active. When a pwq wants 334 * ->max, the pwq i 616 for_each_pwq(pwq,wq) global() argument 810 set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long flags) set_work_pwq() argument 1502 struct pool_workqueue *pwq = worker->current_pwq; wq_worker_tick() local 1657 get_pwq(struct pool_workqueue * pwq) get_pwq() argument 1671 put_pwq(struct pool_workqueue * pwq) put_pwq() argument 1689 put_pwq_unlocked(struct pool_workqueue * pwq) put_pwq_unlocked() argument 1702 pwq_is_empty(struct pool_workqueue * pwq) pwq_is_empty() argument 1707 __pwq_activate_work(struct pool_workqueue * pwq,struct work_struct * work) __pwq_activate_work() argument 1741 pwq_tryinc_nr_active(struct pool_workqueue * pwq,bool fill) pwq_tryinc_nr_active() argument 1819 pwq_activate_first_inactive(struct pool_workqueue * pwq,bool fill) pwq_activate_first_inactive() argument 1857 struct pool_workqueue *pwq; unplug_oldest_pwq() local 1897 struct pool_workqueue *pwq; node_activate_pending_pwq() local 1973 pwq_dec_nr_active(struct pool_workqueue * pwq) pwq_dec_nr_active() argument 2032 pwq_dec_nr_in_flight(struct pool_workqueue * pwq,unsigned long work_data) pwq_dec_nr_in_flight() argument 2094 struct pool_workqueue *pwq; try_to_grab_pending() local 2220 insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags) insert_work() argument 2278 struct pool_workqueue *pwq; __queue_work() local 3024 send_mayday(struct pool_workqueue * pwq) send_mayday() argument 3204 struct pool_workqueue *pwq = get_work_pwq(work); process_one_work() local 3485 assign_rescuer_work(struct pool_workqueue * pwq,struct worker * rescuer) assign_rescuer_work() argument 3592 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, rescuer_thread() local 3870 insert_wq_barrier(struct pool_workqueue * pwq,struct wq_barrier * barr,struct work_struct * target,struct worker * worker) insert_wq_barrier() argument 3958 struct pool_workqueue *pwq; flush_workqueue_prep_pwqs() local 4211 struct pool_workqueue *pwq; drain_workqueue() local 4257 struct pool_workqueue *pwq; start_flush_work() local 5179 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, pwq_release_workfn() local 5231 init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool) init_pwq() argument 5263 link_pwq(struct pool_workqueue * pwq) link_pwq() argument 5285 struct pool_workqueue *pwq; alloc_unbound_pwq() local 5343 install_unbound_pwq(struct workqueue_struct * wq,int cpu,struct pool_workqueue * pwq) install_unbound_pwq() argument 5544 struct pool_workqueue *old_pwq = NULL, *pwq; unbound_wq_update_pwq() local 5650 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); alloc_and_link_pwqs() local 5772 struct pool_workqueue *pwq; wq_adjust_max_active() local 5978 pwq_busy(struct pool_workqueue * pwq) pwq_busy() argument 6013 struct pool_workqueue *pwq; destroy_workqueue() local 6199 struct pool_workqueue *pwq; workqueue_congested() local 6291 struct pool_workqueue *pwq = NULL; print_worker_info() local 6391 show_pwq(struct pool_workqueue * pwq) show_pwq() argument 6474 struct pool_workqueue *pwq; show_one_workqueue() local 6973 struct pool_workqueue *pwq; freeze_workqueues_busy() local [all...] |