Lines Matching full:work
66 struct delayed_work work; member
158 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
160 return __io_get_work_hash(atomic_read(&work->flags)); in io_get_work_hash()
272 * If there's work to do, returns true with acct->lock acquired. If not,
306 * starting work or finishing work. In either case, if it does in io_acct_activate_free_worker()
307 * to go sleep, we'll kick off a new task for this work anyway. in io_acct_activate_free_worker()
324 * Most likely an attempt to queue unbounded work on an io_wq that in io_wq_create_worker()
405 * work item after we canceled in io_wq_exit_workers(). in io_queue_worker_create()
422 /* Defer if current and next work are both hashed to the same chain */
423 static bool io_wq_hash_defer(struct io_wq_work *work, struct io_wq_acct *acct) in io_wq_hash_defer() argument
430 work_flags = atomic_read(&work->flags); in io_wq_hash_defer()
434 /* should not happen, io_acct_run_queue() said we had work */ in io_wq_hash_defer()
472 * Worker will start processing some work. Move it to the busy list, if
486 * No work, worker going to sleep. Move to freelist.
519 struct io_wq_work *work, *tail; in io_get_next_work() local
526 work = container_of(node, struct io_wq_work, list); in io_get_next_work()
529 work_flags = atomic_read(&work->flags); in io_get_next_work()
532 return work; in io_get_next_work()
536 /* all items with this hash lie in [work, tail] */ in io_get_next_work()
543 return work; in io_get_next_work()
556 * work being added and clearing the stalled bit. in io_get_next_work()
573 struct io_wq_work *work) in io_assign_current_work() argument
575 if (work) { in io_assign_current_work()
581 worker->cur_work = work; in io_assign_current_work()
596 struct io_wq_work *work; in io_worker_handle_work() local
599 * If we got some work, mark us as busy. If we didn't, but in io_worker_handle_work()
600 * the list isn't empty, it means we stalled on hashed work. in io_worker_handle_work()
601 * Mark us stalled so we don't keep looking for work when we in io_worker_handle_work()
602 * can't make progress, any work completion or insertion will in io_worker_handle_work()
605 work = io_get_next_work(acct, wq); in io_worker_handle_work()
606 if (work) { in io_worker_handle_work()
609 * it becomes the active work. That avoids a window in io_worker_handle_work()
610 * where the work has been removed from our general in io_worker_handle_work()
611 * work list, but isn't yet discoverable as the in io_worker_handle_work()
612 * current work item for this worker. in io_worker_handle_work()
615 worker->cur_work = work; in io_worker_handle_work()
621 if (!work) in io_worker_handle_work()
626 io_assign_current_work(worker, work); in io_worker_handle_work()
632 unsigned int work_flags = atomic_read(&work->flags); in io_worker_handle_work()
637 next_hashed = wq_next_work(work); in io_worker_handle_work()
641 atomic_or(IO_WQ_WORK_CANCEL, &work->flags); in io_worker_handle_work()
642 io_wq_submit_work(work); in io_worker_handle_work()
645 linked = io_wq_free_work(work); in io_worker_handle_work()
646 work = next_hashed; in io_worker_handle_work()
647 if (!work && linked && !io_wq_is_hashed(linked)) { in io_worker_handle_work()
648 work = linked; in io_worker_handle_work()
651 io_assign_current_work(worker, work); in io_worker_handle_work()
664 } while (work); in io_worker_handle_work()
692 * If we have work to do, io_acct_run_queue() returns with in io_wq_worker()
755 * running and we have work pending, wake up a free one or create a new one.
787 static bool io_wq_work_match_all(struct io_wq_work *work, void *data) in io_wq_work_match_all() argument
822 schedule_delayed_work(&worker->work, in queue_create_worker_retry()
868 static void io_workqueue_create(struct work_struct *work) in io_workqueue_create() argument
870 struct io_worker *worker = container_of(work, struct io_worker, in io_workqueue_create()
871 work.work); in io_workqueue_create()
909 INIT_DELAYED_WORK(&worker->work, io_workqueue_create); in create_io_worker()
960 static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq) in io_run_cancel() argument
963 atomic_or(IO_WQ_WORK_CANCEL, &work->flags); in io_run_cancel()
964 io_wq_submit_work(work); in io_run_cancel()
965 work = io_wq_free_work(work); in io_run_cancel()
966 } while (work); in io_run_cancel()
970 struct io_wq_work *work, unsigned int work_flags) in io_wq_insert_work() argument
977 wq_list_add_tail(&work->list, &acct->work_list); in io_wq_insert_work()
983 wq->hash_tail[hash] = work; in io_wq_insert_work()
987 wq_list_add_after(&work->list, &tail->list, &acct->work_list); in io_wq_insert_work()
990 static bool io_wq_work_match_item(struct io_wq_work *work, void *data) in io_wq_work_match_item() argument
992 return work == data; in io_wq_work_match_item()
995 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) in io_wq_enqueue() argument
997 unsigned int work_flags = atomic_read(&work->flags); in io_wq_enqueue()
1001 .data = work, in io_wq_enqueue()
1012 io_run_cancel(work, wq); in io_wq_enqueue()
1017 io_wq_insert_work(wq, acct, work, work_flags); in io_wq_enqueue()
1046 * Work items that hash to the same value will not be done in parallel.
1049 void io_wq_hash_work(struct io_wq_work *work, void *val) in io_wq_hash_work() argument
1054 atomic_or(IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT), &work->flags); in io_wq_hash_work()
1059 struct io_wq_work *work) in __io_wq_worker_cancel() argument
1061 if (work && match->fn(work, match->data)) { in __io_wq_worker_cancel()
1062 atomic_or(IO_WQ_WORK_CANCEL, &work->flags); in __io_wq_worker_cancel()
1076 * may dereference the passed in work. in io_wq_worker_cancel()
1088 struct io_wq_work *work, in io_wq_remove_pending() argument
1091 unsigned int hash = io_get_work_hash(work); in io_wq_remove_pending()
1094 if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) { in io_wq_remove_pending()
1102 wq_list_del(&acct->work_list, &work->list, prev); in io_wq_remove_pending()
1110 struct io_wq_work *work; in io_acct_cancel_pending_work() local
1114 work = container_of(node, struct io_wq_work, list); in io_acct_cancel_pending_work()
1115 if (!match->fn(work, match->data)) in io_acct_cancel_pending_work()
1117 io_wq_remove_pending(wq, acct, work, prev); in io_acct_cancel_pending_work()
1119 io_run_cancel(work, wq); in io_acct_cancel_pending_work()
1175 * from there. CANCEL_OK means that the work is returned as-new, in io_wq_cancel_cb()
1178 * Then check if a free (going busy) or busy worker has the work in io_wq_cancel_cb()
1184 * we'll find a work item regardless of state. in io_wq_cancel_cb()