1ca2e0534SDoug Rabson /*- 2ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 3ca2e0534SDoug Rabson * All rights reserved. 4ca2e0534SDoug Rabson * 5ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 6ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 7ca2e0534SDoug Rabson * are met: 8ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 10ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 13ca2e0534SDoug Rabson * 14ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ca2e0534SDoug Rabson * SUCH DAMAGE. 25ca2e0534SDoug Rabson */ 26ca2e0534SDoug Rabson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 30ca2e0534SDoug Rabson #include <sys/param.h> 31ca2e0534SDoug Rabson #include <sys/systm.h> 321de1c550SJohn Baldwin #include <sys/bus.h> 33*5a6f0eeeSAdrian Chadd #include <sys/cpuset.h> 34282873e2SJohn Baldwin #include <sys/interrupt.h> 35ca2e0534SDoug Rabson #include <sys/kernel.h> 36eb5b0e05SJohn Baldwin #include <sys/kthread.h> 37d2849f27SAdrian Chadd #include <sys/limits.h> 381de1c550SJohn Baldwin #include <sys/lock.h> 39ca2e0534SDoug Rabson #include <sys/malloc.h> 401de1c550SJohn Baldwin #include <sys/mutex.h> 4152bc746aSSam Leffler #include <sys/proc.h> 420f92108dSScott Long #include <sys/sched.h> 431de1c550SJohn Baldwin #include <sys/taskqueue.h> 44cb32189eSKenneth D. Merry #include <sys/unistd.h> 450f92108dSScott Long #include <machine/stdarg.h> 46ca2e0534SDoug Rabson 47959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 487874f606SScott Long static void *taskqueue_giant_ih; 49eb5b0e05SJohn Baldwin static void *taskqueue_ih; 506d545f4cSAlexander Motin static void taskqueue_fast_enqueue(void *); 516d545f4cSAlexander Motin static void taskqueue_swi_enqueue(void *); 526d545f4cSAlexander Motin static void taskqueue_swi_giant_enqueue(void *); 538088699fSJohn Baldwin 54bf73d4d2SMatthew D Fleming struct taskqueue_busy { 55bf73d4d2SMatthew D Fleming struct task *tb_running; 56bf73d4d2SMatthew D Fleming TAILQ_ENTRY(taskqueue_busy) tb_link; 57bf73d4d2SMatthew D Fleming }; 58bf73d4d2SMatthew D Fleming 59ca2e0534SDoug Rabson struct taskqueue { 60ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 61ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 62ca2e0534SDoug Rabson void *tq_context; 63bf73d4d2SMatthew D Fleming TAILQ_HEAD(, taskqueue_busy) tq_active; 641de1c550SJohn Baldwin struct mtx tq_mutex; 65175611b6SSam Leffler struct thread **tq_threads; 66175611b6SSam Leffler int tq_tcount; 67694382c8SKip Macy int tq_spin; 680f92108dSScott Long int tq_flags; 69b2ad91f2SKonstantin Belousov int tq_callouts; 70fdbc7174SWill Andrews taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; 71fdbc7174SWill Andrews void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; 72ca2e0534SDoug Rabson }; 73ca2e0534SDoug Rabson 740f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 75478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 766d545f4cSAlexander Motin #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) 770f92108dSScott Long 78b2ad91f2SKonstantin Belousov #define DT_CALLOUT_ARMED (1 << 0) 79b2ad91f2SKonstantin Belousov 80b79b28b6SJuli Mallett #define TQ_LOCK(tq) \ 81b79b28b6SJuli Mallett do { \ 82b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 83b79b28b6SJuli Mallett mtx_lock_spin(&(tq)->tq_mutex); \ 84b79b28b6SJuli Mallett else \ 85b79b28b6SJuli Mallett mtx_lock(&(tq)->tq_mutex); \ 86b79b28b6SJuli Mallett } while (0) 87fdbc7174SWill Andrews #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) 889df1a6ddSScott Long 89b79b28b6SJuli Mallett #define TQ_UNLOCK(tq) \ 90b79b28b6SJuli Mallett do { \ 91b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 92b79b28b6SJuli Mallett mtx_unlock_spin(&(tq)->tq_mutex); \ 93b79b28b6SJuli Mallett else \ 94b79b28b6SJuli Mallett mtx_unlock(&(tq)->tq_mutex); \ 95b79b28b6SJuli Mallett } while (0) 96fdbc7174SWill Andrews #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) 979df1a6ddSScott Long 98b2ad91f2SKonstantin Belousov void 99b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 100b2ad91f2SKonstantin Belousov int priority, task_fn_t func, void *context) 101b2ad91f2SKonstantin Belousov { 102b2ad91f2SKonstantin Belousov 103b2ad91f2SKonstantin Belousov TASK_INIT(&timeout_task->t, priority, func, context); 1046d545f4cSAlexander Motin callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 1056d545f4cSAlexander Motin CALLOUT_RETURNUNLOCKED); 106b2ad91f2SKonstantin Belousov timeout_task->q = queue; 107b2ad91f2SKonstantin Belousov timeout_task->f = 0; 108b2ad91f2SKonstantin Belousov } 109b2ad91f2SKonstantin Belousov 1109df1a6ddSScott Long static __inline int 1119df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 1129df1a6ddSScott Long int t) 1139df1a6ddSScott Long { 114694382c8SKip Macy if (tq->tq_spin) 1159df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 1169df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 1179df1a6ddSScott Long } 1189df1a6ddSScott Long 1199df1a6ddSScott Long static struct taskqueue * 120706b0d31SAndriy Gapon _taskqueue_create(const char *name __unused, int mflags, 12152bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1229df1a6ddSScott Long int mtxflags, const char *mtxname) 123ca2e0534SDoug Rabson { 124ca2e0534SDoug Rabson struct taskqueue *queue; 125ca2e0534SDoug Rabson 1261de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 127ca2e0534SDoug Rabson if (!queue) 128d710cae7SWarner Losh return NULL; 129694382c8SKip Macy 130ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 131bf73d4d2SMatthew D Fleming TAILQ_INIT(&queue->tq_active); 132ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 133ca2e0534SDoug Rabson queue->tq_context = context; 134694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 135694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1366d545f4cSAlexander Motin if (enqueue == taskqueue_fast_enqueue || 1376d545f4cSAlexander Motin enqueue == taskqueue_swi_enqueue || 1386d545f4cSAlexander Motin enqueue == taskqueue_swi_giant_enqueue || 1396d545f4cSAlexander Motin enqueue == taskqueue_thread_enqueue) 1406d545f4cSAlexander Motin queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; 1419df1a6ddSScott Long mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 142ca2e0534SDoug Rabson 143ca2e0534SDoug Rabson return queue; 144ca2e0534SDoug Rabson } 145ca2e0534SDoug Rabson 1469df1a6ddSScott Long struct taskqueue * 1479df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1480f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1499df1a6ddSScott Long { 1500f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1519df1a6ddSScott Long MTX_DEF, "taskqueue"); 1529df1a6ddSScott Long } 1539df1a6ddSScott Long 154fdbc7174SWill Andrews void 155fdbc7174SWill Andrews taskqueue_set_callback(struct taskqueue *queue, 156fdbc7174SWill Andrews enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, 157fdbc7174SWill Andrews void *context) 158fdbc7174SWill Andrews { 159fdbc7174SWill Andrews 160fdbc7174SWill Andrews KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && 161fdbc7174SWill Andrews (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), 162fdbc7174SWill Andrews ("Callback type %d not valid, must be %d-%d", cb_type, 163fdbc7174SWill Andrews TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); 164fdbc7174SWill Andrews KASSERT((queue->tq_callbacks[cb_type] == NULL), 165fdbc7174SWill Andrews ("Re-initialization of taskqueue callback?")); 166fdbc7174SWill Andrews 167fdbc7174SWill Andrews queue->tq_callbacks[cb_type] = callback; 168fdbc7174SWill Andrews queue->tq_cb_contexts[cb_type] = context; 169fdbc7174SWill Andrews } 170fdbc7174SWill Andrews 17152bc746aSSam Leffler /* 17252bc746aSSam Leffler * Signal a taskqueue thread to terminate. 17352bc746aSSam Leffler */ 17452bc746aSSam Leffler static void 175175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 17652bc746aSSam Leffler { 17752bc746aSSam Leffler 178b2ad91f2SKonstantin Belousov while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 1790f92108dSScott Long wakeup(tq); 1800f92108dSScott Long TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 18152bc746aSSam Leffler } 18252bc746aSSam Leffler } 18352bc746aSSam Leffler 184ca2e0534SDoug Rabson void 185ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 186ca2e0534SDoug Rabson { 1871de1c550SJohn Baldwin 1889df1a6ddSScott Long TQ_LOCK(queue); 1890f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 190175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 191bf73d4d2SMatthew D Fleming KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 192b2ad91f2SKonstantin Belousov KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 1931de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 194175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 195ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 196ca2e0534SDoug Rabson } 197ca2e0534SDoug Rabson 198b2ad91f2SKonstantin Belousov static int 199b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 200ca2e0534SDoug Rabson { 201ca2e0534SDoug Rabson struct task *ins; 202ca2e0534SDoug Rabson struct task *prev; 203ca2e0534SDoug Rabson 204ca2e0534SDoug Rabson /* 205ca2e0534SDoug Rabson * Count multiple enqueues. 206ca2e0534SDoug Rabson */ 207694382c8SKip Macy if (task->ta_pending) { 208d2849f27SAdrian Chadd if (task->ta_pending < USHRT_MAX) 209ca2e0534SDoug Rabson task->ta_pending++; 2106d545f4cSAlexander Motin TQ_UNLOCK(queue); 211b2ad91f2SKonstantin Belousov return (0); 212ca2e0534SDoug Rabson } 213ca2e0534SDoug Rabson 214ca2e0534SDoug Rabson /* 215ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 216ca2e0534SDoug Rabson */ 21751b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 218ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 219ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 220ca2e0534SDoug Rabson } else { 221d710cae7SWarner Losh prev = NULL; 222ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 223ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 224ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 225ca2e0534SDoug Rabson break; 226ca2e0534SDoug Rabson 227ca2e0534SDoug Rabson if (prev) 228ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 229ca2e0534SDoug Rabson else 230ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 231ca2e0534SDoug Rabson } 232ca2e0534SDoug Rabson 233ca2e0534SDoug Rabson task->ta_pending = 1; 2346d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) 2356d545f4cSAlexander Motin TQ_UNLOCK(queue); 236694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 237ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 2386d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) 2396d545f4cSAlexander Motin TQ_UNLOCK(queue); 240282873e2SJohn Baldwin 24118093155SAlexander Motin /* Return with lock released. */ 242b2ad91f2SKonstantin Belousov return (0); 243b2ad91f2SKonstantin Belousov } 244b2ad91f2SKonstantin Belousov int 245b2ad91f2SKonstantin Belousov taskqueue_enqueue(struct taskqueue *queue, struct task *task) 246b2ad91f2SKonstantin Belousov { 247b2ad91f2SKonstantin Belousov int res; 248b2ad91f2SKonstantin Belousov 249b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 250b2ad91f2SKonstantin Belousov res = taskqueue_enqueue_locked(queue, task); 25118093155SAlexander Motin /* The lock is released inside. */ 252282873e2SJohn Baldwin 253b2ad91f2SKonstantin Belousov return (res); 254b2ad91f2SKonstantin Belousov } 255b2ad91f2SKonstantin Belousov 256b2ad91f2SKonstantin Belousov static void 257b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg) 258b2ad91f2SKonstantin Belousov { 259b2ad91f2SKonstantin Belousov struct taskqueue *queue; 260b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task; 261b2ad91f2SKonstantin Belousov 262b2ad91f2SKonstantin Belousov timeout_task = arg; 263b2ad91f2SKonstantin Belousov queue = timeout_task->q; 264b2ad91f2SKonstantin Belousov KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 265b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 266b2ad91f2SKonstantin Belousov queue->tq_callouts--; 267b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 26818093155SAlexander Motin /* The lock is released inside. */ 269b2ad91f2SKonstantin Belousov } 270b2ad91f2SKonstantin Belousov 271b2ad91f2SKonstantin Belousov int 272b2ad91f2SKonstantin Belousov taskqueue_enqueue_timeout(struct taskqueue *queue, 273b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, int ticks) 274b2ad91f2SKonstantin Belousov { 275b2ad91f2SKonstantin Belousov int res; 276b2ad91f2SKonstantin Belousov 277b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 278b2ad91f2SKonstantin Belousov KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 279b2ad91f2SKonstantin Belousov ("Migrated queue")); 280b2ad91f2SKonstantin Belousov KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); 281b2ad91f2SKonstantin Belousov timeout_task->q = queue; 282b2ad91f2SKonstantin Belousov res = timeout_task->t.ta_pending; 283b2ad91f2SKonstantin Belousov if (ticks == 0) { 284b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(queue, &timeout_task->t); 28518093155SAlexander Motin /* The lock is released inside. */ 286b2ad91f2SKonstantin Belousov } else { 287b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 288b2ad91f2SKonstantin Belousov res++; 289b2ad91f2SKonstantin Belousov } else { 290b2ad91f2SKonstantin Belousov queue->tq_callouts++; 291b2ad91f2SKonstantin Belousov timeout_task->f |= DT_CALLOUT_ARMED; 292b7c8d2f2SKonstantin Belousov if (ticks < 0) 293b7c8d2f2SKonstantin Belousov ticks = -ticks; /* Ignore overflow. */ 294b2ad91f2SKonstantin Belousov } 295b7c8d2f2SKonstantin Belousov if (ticks > 0) { 296b7c8d2f2SKonstantin Belousov callout_reset(&timeout_task->c, ticks, 297b7c8d2f2SKonstantin Belousov taskqueue_timeout_func, timeout_task); 298b7c8d2f2SKonstantin Belousov } 299b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 3006d545f4cSAlexander Motin } 301b2ad91f2SKonstantin Belousov return (res); 302ca2e0534SDoug Rabson } 303ca2e0534SDoug Rabson 30473f82099SAndriy Gapon static void 30573f82099SAndriy Gapon taskqueue_drain_running(struct taskqueue *queue) 30673f82099SAndriy Gapon { 30773f82099SAndriy Gapon 30873f82099SAndriy Gapon while (!TAILQ_EMPTY(&queue->tq_active)) 30973f82099SAndriy Gapon TQ_SLEEP(queue, &queue->tq_active, &queue->tq_mutex, 31073f82099SAndriy Gapon PWAIT, "-", 0); 31173f82099SAndriy Gapon } 31273f82099SAndriy Gapon 313ca2e0534SDoug Rabson void 314478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 315478cfc73SScott Long { 316478cfc73SScott Long 317478cfc73SScott Long TQ_LOCK(queue); 318478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 319478cfc73SScott Long TQ_UNLOCK(queue); 320478cfc73SScott Long } 321478cfc73SScott Long 322478cfc73SScott Long void 323478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 324478cfc73SScott Long { 325478cfc73SScott Long 326478cfc73SScott Long TQ_LOCK(queue); 327478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 3281d1e92f1SAlexander Motin if (!STAILQ_EMPTY(&queue->tq_queue)) 329478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 330478cfc73SScott Long TQ_UNLOCK(queue); 331478cfc73SScott Long } 332478cfc73SScott Long 333bf73d4d2SMatthew D Fleming static void 334bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue) 335ca2e0534SDoug Rabson { 336bf73d4d2SMatthew D Fleming struct taskqueue_busy tb; 337033459c8SMatthew D Fleming struct task *task; 338242ed5d9SMatthew D Fleming int pending; 339ca2e0534SDoug Rabson 340fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 341bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 342bf73d4d2SMatthew D Fleming TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 343bf73d4d2SMatthew D Fleming 344ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 345ca2e0534SDoug Rabson /* 346ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 347ca2e0534SDoug Rabson * zero its pending count. 348ca2e0534SDoug Rabson */ 349ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 350ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 351ca2e0534SDoug Rabson pending = task->ta_pending; 352ca2e0534SDoug Rabson task->ta_pending = 0; 353bf73d4d2SMatthew D Fleming tb.tb_running = task; 3549df1a6ddSScott Long TQ_UNLOCK(queue); 355ca2e0534SDoug Rabson 356282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 357ca2e0534SDoug Rabson 3589df1a6ddSScott Long TQ_LOCK(queue); 359bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 36014889b42SWarner Losh wakeup(task); 361ca2e0534SDoug Rabson } 362bf73d4d2SMatthew D Fleming TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 36373f82099SAndriy Gapon if (TAILQ_EMPTY(&queue->tq_active)) 36473f82099SAndriy Gapon wakeup(&queue->tq_active); 365bf73d4d2SMatthew D Fleming } 366bf73d4d2SMatthew D Fleming 367bf73d4d2SMatthew D Fleming void 368bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue) 369bf73d4d2SMatthew D Fleming { 370bf73d4d2SMatthew D Fleming 371bf73d4d2SMatthew D Fleming TQ_LOCK(queue); 372bf73d4d2SMatthew D Fleming taskqueue_run_locked(queue); 373bf73d4d2SMatthew D Fleming TQ_UNLOCK(queue); 374bf73d4d2SMatthew D Fleming } 375bf73d4d2SMatthew D Fleming 376bf73d4d2SMatthew D Fleming static int 377bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task) 378bf73d4d2SMatthew D Fleming { 379bf73d4d2SMatthew D Fleming struct taskqueue_busy *tb; 380bf73d4d2SMatthew D Fleming 381fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 382bf73d4d2SMatthew D Fleming TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 383bf73d4d2SMatthew D Fleming if (tb->tb_running == task) 384bf73d4d2SMatthew D Fleming return (1); 385bf73d4d2SMatthew D Fleming } 386bf73d4d2SMatthew D Fleming return (0); 387ca2e0534SDoug Rabson } 388ca2e0534SDoug Rabson 389b2ad91f2SKonstantin Belousov static int 390b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 391b2ad91f2SKonstantin Belousov u_int *pendp) 392b2ad91f2SKonstantin Belousov { 393b2ad91f2SKonstantin Belousov 394b2ad91f2SKonstantin Belousov if (task->ta_pending > 0) 395b2ad91f2SKonstantin Belousov STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 396b2ad91f2SKonstantin Belousov if (pendp != NULL) 397b2ad91f2SKonstantin Belousov *pendp = task->ta_pending; 398b2ad91f2SKonstantin Belousov task->ta_pending = 0; 399b2ad91f2SKonstantin Belousov return (task_is_running(queue, task) ? EBUSY : 0); 400b2ad91f2SKonstantin Belousov } 401b2ad91f2SKonstantin Belousov 402f46276a9SMatthew D Fleming int 403f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 404f46276a9SMatthew D Fleming { 405f46276a9SMatthew D Fleming int error; 406f46276a9SMatthew D Fleming 407f46276a9SMatthew D Fleming TQ_LOCK(queue); 408b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, task, pendp); 409b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 410b2ad91f2SKonstantin Belousov 411b2ad91f2SKonstantin Belousov return (error); 412b2ad91f2SKonstantin Belousov } 413b2ad91f2SKonstantin Belousov 414b2ad91f2SKonstantin Belousov int 415b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue, 416b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, u_int *pendp) 417b2ad91f2SKonstantin Belousov { 418b2ad91f2SKonstantin Belousov u_int pending, pending1; 419b2ad91f2SKonstantin Belousov int error; 420b2ad91f2SKonstantin Belousov 421b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 422b2ad91f2SKonstantin Belousov pending = !!callout_stop(&timeout_task->c); 423b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 424b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 425b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 426b2ad91f2SKonstantin Belousov queue->tq_callouts--; 427b2ad91f2SKonstantin Belousov } 428f46276a9SMatthew D Fleming TQ_UNLOCK(queue); 429f46276a9SMatthew D Fleming 430f46276a9SMatthew D Fleming if (pendp != NULL) 431b2ad91f2SKonstantin Belousov *pendp = pending + pending1; 432f46276a9SMatthew D Fleming return (error); 433f46276a9SMatthew D Fleming } 434f46276a9SMatthew D Fleming 43514889b42SWarner Losh void 43614889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 43714889b42SWarner Losh { 4383d336cd0SPawel Jakub Dawidek 4393d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 4409df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 44152bc746aSSam Leffler 4423d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 443bf73d4d2SMatthew D Fleming while (task->ta_pending != 0 || task_is_running(queue, task)) 4443d336cd0SPawel Jakub Dawidek TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 4453d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 4469df1a6ddSScott Long } 44714889b42SWarner Losh 448b2ad91f2SKonstantin Belousov void 44973f82099SAndriy Gapon taskqueue_drain_all(struct taskqueue *queue) 45073f82099SAndriy Gapon { 45173f82099SAndriy Gapon struct task *task; 45273f82099SAndriy Gapon 45373f82099SAndriy Gapon if (!queue->tq_spin) 45473f82099SAndriy Gapon WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 45573f82099SAndriy Gapon 45673f82099SAndriy Gapon TQ_LOCK(queue); 45773f82099SAndriy Gapon task = STAILQ_LAST(&queue->tq_queue, task, ta_link); 45873f82099SAndriy Gapon if (task != NULL) 45973f82099SAndriy Gapon while (task->ta_pending != 0) 46073f82099SAndriy Gapon TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 46173f82099SAndriy Gapon taskqueue_drain_running(queue); 46273f82099SAndriy Gapon KASSERT(STAILQ_EMPTY(&queue->tq_queue), 46373f82099SAndriy Gapon ("taskqueue queue is not empty after draining")); 46473f82099SAndriy Gapon TQ_UNLOCK(queue); 46573f82099SAndriy Gapon } 46673f82099SAndriy Gapon 46773f82099SAndriy Gapon void 468b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue, 469b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task) 470b2ad91f2SKonstantin Belousov { 471b2ad91f2SKonstantin Belousov 472b2ad91f2SKonstantin Belousov callout_drain(&timeout_task->c); 473b2ad91f2SKonstantin Belousov taskqueue_drain(queue, &timeout_task->t); 474b2ad91f2SKonstantin Belousov } 475b2ad91f2SKonstantin Belousov 476ca2e0534SDoug Rabson static void 477ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 478ca2e0534SDoug Rabson { 479c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 480ca2e0534SDoug Rabson } 481ca2e0534SDoug Rabson 482ca2e0534SDoug Rabson static void 4838088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 484ca2e0534SDoug Rabson { 485bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi); 486ca2e0534SDoug Rabson } 487ca2e0534SDoug Rabson 4887874f606SScott Long static void 4897874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 4907874f606SScott Long { 4917874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 4927874f606SScott Long } 4937874f606SScott Long 4947874f606SScott Long static void 4957874f606SScott Long taskqueue_swi_giant_run(void *dummy) 4967874f606SScott Long { 497bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi_giant); 4987874f606SScott Long } 4997874f606SScott Long 500*5a6f0eeeSAdrian Chadd static int 501*5a6f0eeeSAdrian Chadd _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 502*5a6f0eeeSAdrian Chadd cpuset_t *mask, const char *ktname) 5030f92108dSScott Long { 50475b773aeSSam Leffler struct thread *td; 505175611b6SSam Leffler struct taskqueue *tq; 50600537061SSam Leffler int i, error; 5070f92108dSScott Long 5080f92108dSScott Long if (count <= 0) 5090f92108dSScott Long return (EINVAL); 510175611b6SSam Leffler 5110f92108dSScott Long tq = *tqp; 5120f92108dSScott Long 513175611b6SSam Leffler tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 51400537061SSam Leffler M_NOWAIT | M_ZERO); 515175611b6SSam Leffler if (tq->tq_threads == NULL) { 51600537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 51700537061SSam Leffler return (ENOMEM); 51800537061SSam Leffler } 51900537061SSam Leffler 5200f92108dSScott Long for (i = 0; i < count; i++) { 5210f92108dSScott Long if (count == 1) 522175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 5231bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 5240f92108dSScott Long else 525175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 526175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 527175611b6SSam Leffler "%s_%d", ktname, i); 52875b773aeSSam Leffler if (error) { 52900537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 530175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 531175611b6SSam Leffler ktname, error); 532175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 53375b773aeSSam Leffler } else 534175611b6SSam Leffler tq->tq_tcount++; 53500537061SSam Leffler } 53675b773aeSSam Leffler for (i = 0; i < count; i++) { 537175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 53875b773aeSSam Leffler continue; 539175611b6SSam Leffler td = tq->tq_threads[i]; 540*5a6f0eeeSAdrian Chadd if (mask) { 541*5a6f0eeeSAdrian Chadd error = cpuset_setthread(curthread->td_tid, mask); 542*5a6f0eeeSAdrian Chadd /* 543*5a6f0eeeSAdrian Chadd * Failing to pin is rarely an actual fatal error; 544*5a6f0eeeSAdrian Chadd * it'll just affect performance. 545*5a6f0eeeSAdrian Chadd */ 546*5a6f0eeeSAdrian Chadd if (error) 547*5a6f0eeeSAdrian Chadd printf("%s: curthread=%llu: can't pin; " 548*5a6f0eeeSAdrian Chadd "error=%d\n", 549*5a6f0eeeSAdrian Chadd __func__, 550*5a6f0eeeSAdrian Chadd (unsigned long long) td->td_tid, 551*5a6f0eeeSAdrian Chadd error); 552*5a6f0eeeSAdrian Chadd } 553982d11f8SJeff Roberson thread_lock(td); 55475b773aeSSam Leffler sched_prio(td, pri); 555f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 556982d11f8SJeff Roberson thread_unlock(td); 5570f92108dSScott Long } 5580f92108dSScott Long 5590f92108dSScott Long return (0); 5600f92108dSScott Long } 5610f92108dSScott Long 562*5a6f0eeeSAdrian Chadd int 563*5a6f0eeeSAdrian Chadd taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 564*5a6f0eeeSAdrian Chadd const char *name, ...) 565*5a6f0eeeSAdrian Chadd { 566*5a6f0eeeSAdrian Chadd char ktname[MAXCOMLEN + 1]; 567*5a6f0eeeSAdrian Chadd va_list ap; 568*5a6f0eeeSAdrian Chadd 569*5a6f0eeeSAdrian Chadd va_start(ap, name); 570*5a6f0eeeSAdrian Chadd vsnprintf(ktname, sizeof(ktname), name, ap); 571*5a6f0eeeSAdrian Chadd va_end(ap); 572*5a6f0eeeSAdrian Chadd 573*5a6f0eeeSAdrian Chadd return (_taskqueue_start_threads(tqp, count, pri, NULL, ktname)); 574*5a6f0eeeSAdrian Chadd } 575*5a6f0eeeSAdrian Chadd 576*5a6f0eeeSAdrian Chadd int 577*5a6f0eeeSAdrian Chadd taskqueue_start_threads_pinned(struct taskqueue **tqp, int count, int pri, 578*5a6f0eeeSAdrian Chadd int cpu_id, const char *name, ...) 579*5a6f0eeeSAdrian Chadd { 580*5a6f0eeeSAdrian Chadd char ktname[MAXCOMLEN + 1]; 581*5a6f0eeeSAdrian Chadd va_list ap; 582*5a6f0eeeSAdrian Chadd cpuset_t mask; 583*5a6f0eeeSAdrian Chadd 584*5a6f0eeeSAdrian Chadd va_start(ap, name); 585*5a6f0eeeSAdrian Chadd vsnprintf(ktname, sizeof(ktname), name, ap); 586*5a6f0eeeSAdrian Chadd va_end(ap); 587*5a6f0eeeSAdrian Chadd 588*5a6f0eeeSAdrian Chadd /* 589*5a6f0eeeSAdrian Chadd * In case someone passes in NOCPU, just fall back to the 590*5a6f0eeeSAdrian Chadd * default behaviour of "don't pin". 591*5a6f0eeeSAdrian Chadd */ 592*5a6f0eeeSAdrian Chadd if (cpu_id != NOCPU) { 593*5a6f0eeeSAdrian Chadd CPU_ZERO(&mask); 594*5a6f0eeeSAdrian Chadd CPU_SET(cpu_id, &mask); 595*5a6f0eeeSAdrian Chadd } 596*5a6f0eeeSAdrian Chadd 597*5a6f0eeeSAdrian Chadd return (_taskqueue_start_threads(tqp, count, pri, 598*5a6f0eeeSAdrian Chadd cpu_id == NOCPU ? NULL : &mask, ktname)); 599*5a6f0eeeSAdrian Chadd } 600*5a6f0eeeSAdrian Chadd 601fdbc7174SWill Andrews static inline void 602fdbc7174SWill Andrews taskqueue_run_callback(struct taskqueue *tq, 603fdbc7174SWill Andrews enum taskqueue_callback_type cb_type) 604fdbc7174SWill Andrews { 605fdbc7174SWill Andrews taskqueue_callback_fn tq_callback; 606fdbc7174SWill Andrews 607fdbc7174SWill Andrews TQ_ASSERT_UNLOCKED(tq); 608fdbc7174SWill Andrews tq_callback = tq->tq_callbacks[cb_type]; 609fdbc7174SWill Andrews if (tq_callback != NULL) 610fdbc7174SWill Andrews tq_callback(tq->tq_cb_contexts[cb_type]); 611fdbc7174SWill Andrews } 612fdbc7174SWill Andrews 613227559d1SJohn-Mark Gurney void 614227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 615cb32189eSKenneth D. Merry { 616227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 617bd83e879SJohn Baldwin 618227559d1SJohn-Mark Gurney tqp = arg; 619227559d1SJohn-Mark Gurney tq = *tqp; 620fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); 6219df1a6ddSScott Long TQ_LOCK(tq); 62224ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 623bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 6246a3b2893SPawel Jakub Dawidek /* 6256a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 6266a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 6276a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 6286a3b2893SPawel Jakub Dawidek */ 6296a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 6306a3b2893SPawel Jakub Dawidek break; 6310f180a7cSJohn Baldwin TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 632a1797ef6SAndrew Thompson } 633bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 63452bc746aSSam Leffler 635fdbc7174SWill Andrews /* 636fdbc7174SWill Andrews * This thread is on its way out, so just drop the lock temporarily 637fdbc7174SWill Andrews * in order to call the shutdown callback. This allows the callback 638fdbc7174SWill Andrews * to look at the taskqueue, even just before it dies. 639fdbc7174SWill Andrews */ 640fdbc7174SWill Andrews TQ_UNLOCK(tq); 641fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); 642fdbc7174SWill Andrews TQ_LOCK(tq); 643fdbc7174SWill Andrews 64452bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 645175611b6SSam Leffler tq->tq_tcount--; 646175611b6SSam Leffler wakeup_one(tq->tq_threads); 6479df1a6ddSScott Long TQ_UNLOCK(tq); 64803c7442dSJohn Baldwin kthread_exit(); 649cb32189eSKenneth D. Merry } 650cb32189eSKenneth D. Merry 651227559d1SJohn-Mark Gurney void 652cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 653cb32189eSKenneth D. Merry { 654227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 655bd83e879SJohn Baldwin 656227559d1SJohn-Mark Gurney tqp = context; 657227559d1SJohn-Mark Gurney tq = *tqp; 658227559d1SJohn-Mark Gurney 65952bc746aSSam Leffler wakeup_one(tq); 660cb32189eSKenneth D. Merry } 661cb32189eSKenneth D. Merry 662d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 6637874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 6647874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 6657874f606SScott Long 666d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 6676caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 6687874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 669cb32189eSKenneth D. Merry 670227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 671f82c9e70SSam Leffler 6729df1a6ddSScott Long struct taskqueue * 6739df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 6740f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 6759df1a6ddSScott Long { 6760f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 6779df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 6789df1a6ddSScott Long } 6799df1a6ddSScott Long 6809df1a6ddSScott Long /* NB: for backwards compatibility */ 681f82c9e70SSam Leffler int 682f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 683f82c9e70SSam Leffler { 6849df1a6ddSScott Long return taskqueue_enqueue(queue, task); 685f82c9e70SSam Leffler } 686f82c9e70SSam Leffler 687f82c9e70SSam Leffler static void *taskqueue_fast_ih; 688f82c9e70SSam Leffler 689f82c9e70SSam Leffler static void 6909df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 691f82c9e70SSam Leffler { 692f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 693f82c9e70SSam Leffler } 694f82c9e70SSam Leffler 695f82c9e70SSam Leffler static void 696f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 697f82c9e70SSam Leffler { 698bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_fast); 699f82c9e70SSam Leffler } 700f82c9e70SSam Leffler 701d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 70210f0ab39SJohn Baldwin swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, 7039df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 704159ef108SPawel Jakub Dawidek 705159ef108SPawel Jakub Dawidek int 706159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 707159ef108SPawel Jakub Dawidek { 708159ef108SPawel Jakub Dawidek int i, j, ret = 0; 709159ef108SPawel Jakub Dawidek 710159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 711159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 712159ef108SPawel Jakub Dawidek continue; 713159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 714159ef108SPawel Jakub Dawidek ret = 1; 715159ef108SPawel Jakub Dawidek break; 716159ef108SPawel Jakub Dawidek } 717159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 718159ef108SPawel Jakub Dawidek break; 719159ef108SPawel Jakub Dawidek } 720159ef108SPawel Jakub Dawidek return (ret); 721159ef108SPawel Jakub Dawidek } 722