Lines Matching refs:task

57 	struct task		*tb_running;
64 STAILQ_HEAD(, task) tq_queue;
66 struct task *tq_hint;
127 task_get_busy(struct taskqueue *queue, struct task *task) in task_get_busy() argument
133 if (tb->tb_running == task) in task_get_busy()
231 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task, int flags) in taskqueue_enqueue_locked() argument
233 struct task *ins; in taskqueue_enqueue_locked()
234 struct task *prev; in taskqueue_enqueue_locked()
237 KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func")); in taskqueue_enqueue_locked()
242 tb = task_get_busy(queue, task); in taskqueue_enqueue_locked()
252 if (task->ta_pending) { in taskqueue_enqueue_locked()
257 if (task->ta_pending < USHRT_MAX) in taskqueue_enqueue_locked()
258 task->ta_pending++; in taskqueue_enqueue_locked()
269 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); in taskqueue_enqueue_locked()
270 if (!prev || prev->ta_priority >= task->ta_priority) { in taskqueue_enqueue_locked()
271 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); in taskqueue_enqueue_locked()
274 if (prev && prev->ta_priority >= task->ta_priority) { in taskqueue_enqueue_locked()
281 if (ins->ta_priority < task->ta_priority) in taskqueue_enqueue_locked()
285 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); in taskqueue_enqueue_locked()
286 queue->tq_hint = task; in taskqueue_enqueue_locked()
288 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); in taskqueue_enqueue_locked()
291 task->ta_pending = 1; in taskqueue_enqueue_locked()
304 taskqueue_enqueue_flags(struct taskqueue *queue, struct task *task, int flags) in taskqueue_enqueue_flags() argument
309 res = taskqueue_enqueue_locked(queue, task, flags); in taskqueue_enqueue_flags()
316 taskqueue_enqueue(struct taskqueue *queue, struct task *task) in taskqueue_enqueue() argument
318 return (taskqueue_enqueue_flags(queue, task, 0)); in taskqueue_enqueue()
402 struct task t_barrier; in taskqueue_drain_tq_queue()
488 struct task *task; in taskqueue_run_locked() local
498 while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) { in taskqueue_run_locked()
500 if (queue->tq_hint == task) in taskqueue_run_locked()
502 pending = task->ta_pending; in taskqueue_run_locked()
503 task->ta_pending = 0; in taskqueue_run_locked()
504 tb.tb_running = task; in taskqueue_run_locked()
509 KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); in taskqueue_run_locked()
510 if (!in_net_epoch && TASK_IS_NET(task)) { in taskqueue_run_locked()
513 } else if (in_net_epoch && !TASK_IS_NET(task)) { in taskqueue_run_locked()
517 task->ta_func(task->ta_context, pending); in taskqueue_run_locked()
520 wakeup(task); in taskqueue_run_locked()
542 taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task) in taskqueue_poll_is_busy() argument
547 retval = task->ta_pending > 0 || task_get_busy(queue, task) != NULL; in taskqueue_poll_is_busy()
554 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, in taskqueue_cancel_locked() argument
560 if (task->ta_pending > 0) { in taskqueue_cancel_locked()
561 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); in taskqueue_cancel_locked()
562 if (queue->tq_hint == task) in taskqueue_cancel_locked()
566 *pendp = task->ta_pending; in taskqueue_cancel_locked()
567 task->ta_pending = 0; in taskqueue_cancel_locked()
568 tb = task_get_busy(queue, task); in taskqueue_cancel_locked()
578 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) in taskqueue_cancel() argument
583 error = taskqueue_cancel_locked(queue, task, pendp); in taskqueue_cancel()
611 taskqueue_drain(struct taskqueue *queue, struct task *task) in taskqueue_drain() argument
618 while (task->ta_pending != 0 || task_get_busy(queue, task) != NULL) in taskqueue_drain()
619 TQ_SLEEP(queue, task, "tq_drain"); in taskqueue_drain()