1ca2e0534SDoug Rabson /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 5ca2e0534SDoug Rabson * All rights reserved. 6ca2e0534SDoug Rabson * 7ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 8ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 9ca2e0534SDoug Rabson * are met: 10ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 12ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 13ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 14ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 15ca2e0534SDoug Rabson * 16ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26ca2e0534SDoug Rabson * SUCH DAMAGE. 27ca2e0534SDoug Rabson */ 28ca2e0534SDoug Rabson 29677b542eSDavid E. O'Brien #include <sys/cdefs.h> 30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 31677b542eSDavid E. O'Brien 32ca2e0534SDoug Rabson #include <sys/param.h> 33ca2e0534SDoug Rabson #include <sys/systm.h> 341de1c550SJohn Baldwin #include <sys/bus.h> 355a6f0eeeSAdrian Chadd #include <sys/cpuset.h> 36282873e2SJohn Baldwin #include <sys/interrupt.h> 37ca2e0534SDoug Rabson #include <sys/kernel.h> 38eb5b0e05SJohn Baldwin #include <sys/kthread.h> 394c7070dbSScott Long #include <sys/libkern.h> 40d2849f27SAdrian Chadd #include <sys/limits.h> 411de1c550SJohn Baldwin #include <sys/lock.h> 42ca2e0534SDoug Rabson #include <sys/malloc.h> 431de1c550SJohn Baldwin #include <sys/mutex.h> 4452bc746aSSam Leffler #include <sys/proc.h> 450f92108dSScott Long #include <sys/sched.h> 464c7070dbSScott Long #include <sys/smp.h> 471de1c550SJohn Baldwin #include <sys/taskqueue.h> 48cb32189eSKenneth D. Merry #include <sys/unistd.h> 490f92108dSScott Long #include <machine/stdarg.h> 50ca2e0534SDoug Rabson 51959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 527874f606SScott Long static void *taskqueue_giant_ih; 53eb5b0e05SJohn Baldwin static void *taskqueue_ih; 546d545f4cSAlexander Motin static void taskqueue_fast_enqueue(void *); 556d545f4cSAlexander Motin static void taskqueue_swi_enqueue(void *); 566d545f4cSAlexander Motin static void taskqueue_swi_giant_enqueue(void *); 578088699fSJohn Baldwin 58bf73d4d2SMatthew D Fleming struct taskqueue_busy { 59bf73d4d2SMatthew D Fleming struct task *tb_running; 60bf73d4d2SMatthew D Fleming TAILQ_ENTRY(taskqueue_busy) tb_link; 61bf73d4d2SMatthew D Fleming }; 62bf73d4d2SMatthew D Fleming 635b326a32SJustin T. Gibbs struct task * const TB_DRAIN_WAITER = (struct task *)0x1; 645b326a32SJustin T. Gibbs 65ca2e0534SDoug Rabson struct taskqueue { 66ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 67ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 68ca2e0534SDoug Rabson void *tq_context; 694c7070dbSScott Long char *tq_name; 70bf73d4d2SMatthew D Fleming TAILQ_HEAD(, taskqueue_busy) tq_active; 711de1c550SJohn Baldwin struct mtx tq_mutex; 72175611b6SSam Leffler struct thread **tq_threads; 73175611b6SSam Leffler int tq_tcount; 74694382c8SKip Macy int tq_spin; 750f92108dSScott Long int tq_flags; 76b2ad91f2SKonstantin Belousov int tq_callouts; 77fdbc7174SWill Andrews taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; 78fdbc7174SWill Andrews void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; 79ca2e0534SDoug Rabson }; 80ca2e0534SDoug Rabson 810f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 82478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 836d545f4cSAlexander Motin #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) 840f92108dSScott Long 85b2ad91f2SKonstantin Belousov #define DT_CALLOUT_ARMED (1 << 0) 8699eca1b2SHans Petter Selasky #define DT_DRAIN_IN_PROGRESS (1 << 1) 87b2ad91f2SKonstantin Belousov 88b79b28b6SJuli Mallett #define TQ_LOCK(tq) \ 89b79b28b6SJuli Mallett do { \ 90b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 91b79b28b6SJuli Mallett mtx_lock_spin(&(tq)->tq_mutex); \ 92b79b28b6SJuli Mallett else \ 93b79b28b6SJuli Mallett mtx_lock(&(tq)->tq_mutex); \ 94b79b28b6SJuli Mallett } while (0) 95fdbc7174SWill Andrews #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) 969df1a6ddSScott Long 97b79b28b6SJuli Mallett #define TQ_UNLOCK(tq) \ 98b79b28b6SJuli Mallett do { \ 99b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 100b79b28b6SJuli Mallett mtx_unlock_spin(&(tq)->tq_mutex); \ 101b79b28b6SJuli Mallett else \ 102b79b28b6SJuli Mallett mtx_unlock(&(tq)->tq_mutex); \ 103b79b28b6SJuli Mallett } while (0) 104fdbc7174SWill Andrews #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) 1059df1a6ddSScott Long 106b2ad91f2SKonstantin Belousov void 107b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 108b2ad91f2SKonstantin Belousov int priority, task_fn_t func, void *context) 109b2ad91f2SKonstantin Belousov { 110b2ad91f2SKonstantin Belousov 111b2ad91f2SKonstantin Belousov TASK_INIT(&timeout_task->t, priority, func, context); 1126d545f4cSAlexander Motin callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 1136d545f4cSAlexander Motin CALLOUT_RETURNUNLOCKED); 114b2ad91f2SKonstantin Belousov timeout_task->q = queue; 115b2ad91f2SKonstantin Belousov timeout_task->f = 0; 116b2ad91f2SKonstantin Belousov } 117b2ad91f2SKonstantin Belousov 1189df1a6ddSScott Long static __inline int 1199df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 1209df1a6ddSScott Long int t) 1219df1a6ddSScott Long { 122694382c8SKip Macy if (tq->tq_spin) 1239df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 1249df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 1259df1a6ddSScott Long } 1269df1a6ddSScott Long 1279df1a6ddSScott Long static struct taskqueue * 1284c7070dbSScott Long _taskqueue_create(const char *name, int mflags, 12952bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1304c7070dbSScott Long int mtxflags, const char *mtxname __unused) 131ca2e0534SDoug Rabson { 132ca2e0534SDoug Rabson struct taskqueue *queue; 1337e52504fSScott Long char *tq_name; 1344c7070dbSScott Long 1357e52504fSScott Long tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO); 136f3c8e16eSMateusz Guzik if (tq_name == NULL) 1377e52504fSScott Long return (NULL); 1387e52504fSScott Long 1391de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 140f3c8e16eSMateusz Guzik if (queue == NULL) { 141f3c8e16eSMateusz Guzik free(tq_name, M_TASKQUEUE); 1427e52504fSScott Long return (NULL); 143f3c8e16eSMateusz Guzik } 144f3c8e16eSMateusz Guzik 145f3c8e16eSMateusz Guzik snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); 146694382c8SKip Macy 147ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 148bf73d4d2SMatthew D Fleming TAILQ_INIT(&queue->tq_active); 149ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 150ca2e0534SDoug Rabson queue->tq_context = context; 1514c7070dbSScott Long queue->tq_name = tq_name; 152694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 153694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1546d545f4cSAlexander Motin if (enqueue == taskqueue_fast_enqueue || 1556d545f4cSAlexander Motin enqueue == taskqueue_swi_enqueue || 1566d545f4cSAlexander Motin enqueue == taskqueue_swi_giant_enqueue || 1576d545f4cSAlexander Motin enqueue == taskqueue_thread_enqueue) 1586d545f4cSAlexander Motin queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; 1594c7070dbSScott Long mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); 160ca2e0534SDoug Rabson 1617e52504fSScott Long return (queue); 162ca2e0534SDoug Rabson } 163ca2e0534SDoug Rabson 1649df1a6ddSScott Long struct taskqueue * 1659df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1660f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1679df1a6ddSScott Long { 1684c7070dbSScott Long 1690f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1704c7070dbSScott Long MTX_DEF, name); 1719df1a6ddSScott Long } 1729df1a6ddSScott Long 173fdbc7174SWill Andrews void 174fdbc7174SWill Andrews taskqueue_set_callback(struct taskqueue *queue, 175fdbc7174SWill Andrews enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, 176fdbc7174SWill Andrews void *context) 177fdbc7174SWill Andrews { 178fdbc7174SWill Andrews 179fdbc7174SWill Andrews KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && 180fdbc7174SWill Andrews (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), 181fdbc7174SWill Andrews ("Callback type %d not valid, must be %d-%d", cb_type, 182fdbc7174SWill Andrews TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); 183fdbc7174SWill Andrews KASSERT((queue->tq_callbacks[cb_type] == NULL), 184fdbc7174SWill Andrews ("Re-initialization of taskqueue callback?")); 185fdbc7174SWill Andrews 186fdbc7174SWill Andrews queue->tq_callbacks[cb_type] = callback; 187fdbc7174SWill Andrews queue->tq_cb_contexts[cb_type] = context; 188fdbc7174SWill Andrews } 189fdbc7174SWill Andrews 19052bc746aSSam Leffler /* 19152bc746aSSam Leffler * Signal a taskqueue thread to terminate. 19252bc746aSSam Leffler */ 19352bc746aSSam Leffler static void 194175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 19552bc746aSSam Leffler { 19652bc746aSSam Leffler 197b2ad91f2SKonstantin Belousov while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 1980f92108dSScott Long wakeup(tq); 1990f92108dSScott Long TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 20052bc746aSSam Leffler } 20152bc746aSSam Leffler } 20252bc746aSSam Leffler 203ca2e0534SDoug Rabson void 204ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 205ca2e0534SDoug Rabson { 2061de1c550SJohn Baldwin 2079df1a6ddSScott Long TQ_LOCK(queue); 2080f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 209175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 210bf73d4d2SMatthew D Fleming KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 211b2ad91f2SKonstantin Belousov KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 2121de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 213175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 2144c7070dbSScott Long free(queue->tq_name, M_TASKQUEUE); 215ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 216ca2e0534SDoug Rabson } 217ca2e0534SDoug Rabson 218b2ad91f2SKonstantin Belousov static int 219b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 220ca2e0534SDoug Rabson { 221ca2e0534SDoug Rabson struct task *ins; 222ca2e0534SDoug Rabson struct task *prev; 223ca2e0534SDoug Rabson 2244c7070dbSScott Long KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func")); 225ca2e0534SDoug Rabson /* 226ca2e0534SDoug Rabson * Count multiple enqueues. 227ca2e0534SDoug Rabson */ 228694382c8SKip Macy if (task->ta_pending) { 2297107bed0SAndriy Gapon if (task->ta_pending < USHRT_MAX) 230ca2e0534SDoug Rabson task->ta_pending++; 2316d545f4cSAlexander Motin TQ_UNLOCK(queue); 232b2ad91f2SKonstantin Belousov return (0); 233ca2e0534SDoug Rabson } 234ca2e0534SDoug Rabson 235ca2e0534SDoug Rabson /* 236ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 237ca2e0534SDoug Rabson */ 23851b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 239ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 240ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 241ca2e0534SDoug Rabson } else { 242d710cae7SWarner Losh prev = NULL; 243ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 244ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 245ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 246ca2e0534SDoug Rabson break; 247ca2e0534SDoug Rabson 248ca2e0534SDoug Rabson if (prev) 249ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 250ca2e0534SDoug Rabson else 251ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 252ca2e0534SDoug Rabson } 253ca2e0534SDoug Rabson 254ca2e0534SDoug Rabson task->ta_pending = 1; 2556d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) 2566d545f4cSAlexander Motin TQ_UNLOCK(queue); 257694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 258ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 2596d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) 2606d545f4cSAlexander Motin TQ_UNLOCK(queue); 261282873e2SJohn Baldwin 26218093155SAlexander Motin /* Return with lock released. */ 263b2ad91f2SKonstantin Belousov return (0); 264b2ad91f2SKonstantin Belousov } 2655b326a32SJustin T. Gibbs 266b2ad91f2SKonstantin Belousov int 267b2ad91f2SKonstantin Belousov taskqueue_enqueue(struct taskqueue *queue, struct task *task) 268b2ad91f2SKonstantin Belousov { 269b2ad91f2SKonstantin Belousov int res; 270b2ad91f2SKonstantin Belousov 271b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 272b2ad91f2SKonstantin Belousov res = taskqueue_enqueue_locked(queue, task); 27318093155SAlexander Motin /* The lock is released inside. */ 274282873e2SJohn Baldwin 275b2ad91f2SKonstantin Belousov return (res); 276b2ad91f2SKonstantin Belousov } 277b2ad91f2SKonstantin Belousov 278b2ad91f2SKonstantin Belousov static void 279b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg) 280b2ad91f2SKonstantin Belousov { 281b2ad91f2SKonstantin Belousov struct taskqueue *queue; 282b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task; 283b2ad91f2SKonstantin Belousov 284b2ad91f2SKonstantin Belousov timeout_task = arg; 285b2ad91f2SKonstantin Belousov queue = timeout_task->q; 286b2ad91f2SKonstantin Belousov KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 287b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 288b2ad91f2SKonstantin Belousov queue->tq_callouts--; 289b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 29018093155SAlexander Motin /* The lock is released inside. */ 291b2ad91f2SKonstantin Belousov } 292b2ad91f2SKonstantin Belousov 293b2ad91f2SKonstantin Belousov int 294f37b7fc2SIan Lepore taskqueue_enqueue_timeout_sbt(struct taskqueue *queue, 295f37b7fc2SIan Lepore struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags) 296b2ad91f2SKonstantin Belousov { 297b2ad91f2SKonstantin Belousov int res; 298b2ad91f2SKonstantin Belousov 299b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 300b2ad91f2SKonstantin Belousov KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 301b2ad91f2SKonstantin Belousov ("Migrated queue")); 302b2ad91f2SKonstantin Belousov KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); 303b2ad91f2SKonstantin Belousov timeout_task->q = queue; 304b2ad91f2SKonstantin Belousov res = timeout_task->t.ta_pending; 30599eca1b2SHans Petter Selasky if (timeout_task->f & DT_DRAIN_IN_PROGRESS) { 30699eca1b2SHans Petter Selasky /* Do nothing */ 30799eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 30899eca1b2SHans Petter Selasky res = -1; 309f37b7fc2SIan Lepore } else if (sbt == 0) { 310b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(queue, &timeout_task->t); 31118093155SAlexander Motin /* The lock is released inside. */ 312b2ad91f2SKonstantin Belousov } else { 313b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 314b2ad91f2SKonstantin Belousov res++; 315b2ad91f2SKonstantin Belousov } else { 316b2ad91f2SKonstantin Belousov queue->tq_callouts++; 317b2ad91f2SKonstantin Belousov timeout_task->f |= DT_CALLOUT_ARMED; 318f37b7fc2SIan Lepore if (sbt < 0) 319f37b7fc2SIan Lepore sbt = -sbt; /* Ignore overflow. */ 320b2ad91f2SKonstantin Belousov } 321f37b7fc2SIan Lepore if (sbt > 0) { 322f37b7fc2SIan Lepore callout_reset_sbt(&timeout_task->c, sbt, pr, 323f37b7fc2SIan Lepore taskqueue_timeout_func, timeout_task, flags); 324b7c8d2f2SKonstantin Belousov } 325b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 3266d545f4cSAlexander Motin } 327b2ad91f2SKonstantin Belousov return (res); 328ca2e0534SDoug Rabson } 329ca2e0534SDoug Rabson 330f37b7fc2SIan Lepore int 331f37b7fc2SIan Lepore taskqueue_enqueue_timeout(struct taskqueue *queue, 332f37b7fc2SIan Lepore struct timeout_task *ttask, int ticks) 333f37b7fc2SIan Lepore { 334f37b7fc2SIan Lepore 335f37b7fc2SIan Lepore return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt, 336f37b7fc2SIan Lepore 0, 0)); 337f37b7fc2SIan Lepore } 338f37b7fc2SIan Lepore 33973f82099SAndriy Gapon static void 3405b326a32SJustin T. Gibbs taskqueue_task_nop_fn(void *context, int pending) 34173f82099SAndriy Gapon { 3425b326a32SJustin T. Gibbs } 34373f82099SAndriy Gapon 3445b326a32SJustin T. Gibbs /* 3455b326a32SJustin T. Gibbs * Block until all currently queued tasks in this taskqueue 3465b326a32SJustin T. Gibbs * have begun execution. Tasks queued during execution of 3475b326a32SJustin T. Gibbs * this function are ignored. 3485b326a32SJustin T. Gibbs */ 349*bb58b5d6SMark Johnston static int 3505b326a32SJustin T. Gibbs taskqueue_drain_tq_queue(struct taskqueue *queue) 3515b326a32SJustin T. Gibbs { 3525b326a32SJustin T. Gibbs struct task t_barrier; 3535b326a32SJustin T. Gibbs 3545b326a32SJustin T. Gibbs if (STAILQ_EMPTY(&queue->tq_queue)) 355*bb58b5d6SMark Johnston return (0); 3565b326a32SJustin T. Gibbs 3575b326a32SJustin T. Gibbs /* 358eb3d0c5dSXin LI * Enqueue our barrier after all current tasks, but with 359eb3d0c5dSXin LI * the highest priority so that newly queued tasks cannot 360eb3d0c5dSXin LI * pass it. Because of the high priority, we can not use 361eb3d0c5dSXin LI * taskqueue_enqueue_locked directly (which drops the lock 362eb3d0c5dSXin LI * anyway) so just insert it at tail while we have the 363eb3d0c5dSXin LI * queue lock. 3645b326a32SJustin T. Gibbs */ 365eb3d0c5dSXin LI TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier); 366eb3d0c5dSXin LI STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); 367eb3d0c5dSXin LI t_barrier.ta_pending = 1; 3685b326a32SJustin T. Gibbs 3695b326a32SJustin T. Gibbs /* 3705b326a32SJustin T. Gibbs * Once the barrier has executed, all previously queued tasks 3715b326a32SJustin T. Gibbs * have completed or are currently executing. 3725b326a32SJustin T. Gibbs */ 3735b326a32SJustin T. Gibbs while (t_barrier.ta_pending != 0) 3745b326a32SJustin T. Gibbs TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0); 375*bb58b5d6SMark Johnston return (1); 3765b326a32SJustin T. Gibbs } 3775b326a32SJustin T. Gibbs 3785b326a32SJustin T. Gibbs /* 3795b326a32SJustin T. Gibbs * Block until all currently executing tasks for this taskqueue 3805b326a32SJustin T. Gibbs * complete. Tasks that begin execution during the execution 3815b326a32SJustin T. Gibbs * of this function are ignored. 3825b326a32SJustin T. Gibbs */ 383*bb58b5d6SMark Johnston static int 3845b326a32SJustin T. Gibbs taskqueue_drain_tq_active(struct taskqueue *queue) 3855b326a32SJustin T. Gibbs { 3865b326a32SJustin T. Gibbs struct taskqueue_busy tb_marker, *tb_first; 3875b326a32SJustin T. Gibbs 3885b326a32SJustin T. Gibbs if (TAILQ_EMPTY(&queue->tq_active)) 389*bb58b5d6SMark Johnston return (0); 3905b326a32SJustin T. Gibbs 3915b326a32SJustin T. Gibbs /* Block taskq_terminate().*/ 3925b326a32SJustin T. Gibbs queue->tq_callouts++; 3935b326a32SJustin T. Gibbs 3945b326a32SJustin T. Gibbs /* 3955b326a32SJustin T. Gibbs * Wait for all currently executing taskqueue threads 3965b326a32SJustin T. Gibbs * to go idle. 3975b326a32SJustin T. Gibbs */ 3985b326a32SJustin T. Gibbs tb_marker.tb_running = TB_DRAIN_WAITER; 3995b326a32SJustin T. Gibbs TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link); 4005b326a32SJustin T. Gibbs while (TAILQ_FIRST(&queue->tq_active) != &tb_marker) 4015b326a32SJustin T. Gibbs TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0); 4025b326a32SJustin T. Gibbs TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link); 4035b326a32SJustin T. Gibbs 4045b326a32SJustin T. Gibbs /* 4055b326a32SJustin T. Gibbs * Wakeup any other drain waiter that happened to queue up 4065b326a32SJustin T. Gibbs * without any intervening active thread. 4075b326a32SJustin T. Gibbs */ 4085b326a32SJustin T. Gibbs tb_first = TAILQ_FIRST(&queue->tq_active); 4095b326a32SJustin T. Gibbs if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER) 4105b326a32SJustin T. Gibbs wakeup(tb_first); 4115b326a32SJustin T. Gibbs 4125b326a32SJustin T. Gibbs /* Release taskqueue_terminate(). */ 4135b326a32SJustin T. Gibbs queue->tq_callouts--; 4145b326a32SJustin T. Gibbs if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) 4155b326a32SJustin T. Gibbs wakeup_one(queue->tq_threads); 416*bb58b5d6SMark Johnston return (1); 41773f82099SAndriy Gapon } 41873f82099SAndriy Gapon 419ca2e0534SDoug Rabson void 420478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 421478cfc73SScott Long { 422478cfc73SScott Long 423478cfc73SScott Long TQ_LOCK(queue); 424478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 425478cfc73SScott Long TQ_UNLOCK(queue); 426478cfc73SScott Long } 427478cfc73SScott Long 428478cfc73SScott Long void 429478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 430478cfc73SScott Long { 431478cfc73SScott Long 432478cfc73SScott Long TQ_LOCK(queue); 433478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 4341d1e92f1SAlexander Motin if (!STAILQ_EMPTY(&queue->tq_queue)) 435478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 436478cfc73SScott Long TQ_UNLOCK(queue); 437478cfc73SScott Long } 438478cfc73SScott Long 439bf73d4d2SMatthew D Fleming static void 440bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue) 441ca2e0534SDoug Rabson { 442bf73d4d2SMatthew D Fleming struct taskqueue_busy tb; 4435b326a32SJustin T. Gibbs struct taskqueue_busy *tb_first; 444033459c8SMatthew D Fleming struct task *task; 445242ed5d9SMatthew D Fleming int pending; 446ca2e0534SDoug Rabson 4474c7070dbSScott Long KASSERT(queue != NULL, ("tq is NULL")); 448fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 449bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 450bf73d4d2SMatthew D Fleming 451ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 4525b326a32SJustin T. Gibbs TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 4535b326a32SJustin T. Gibbs 454ca2e0534SDoug Rabson /* 455ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 456ca2e0534SDoug Rabson * zero its pending count. 457ca2e0534SDoug Rabson */ 458ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 4594c7070dbSScott Long KASSERT(task != NULL, ("task is NULL")); 460ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 461ca2e0534SDoug Rabson pending = task->ta_pending; 462ca2e0534SDoug Rabson task->ta_pending = 0; 463bf73d4d2SMatthew D Fleming tb.tb_running = task; 4649df1a6ddSScott Long TQ_UNLOCK(queue); 465ca2e0534SDoug Rabson 4664c7070dbSScott Long KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); 467282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 468ca2e0534SDoug Rabson 4699df1a6ddSScott Long TQ_LOCK(queue); 470bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 47114889b42SWarner Losh wakeup(task); 4725b326a32SJustin T. Gibbs 473bf73d4d2SMatthew D Fleming TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 4745b326a32SJustin T. Gibbs tb_first = TAILQ_FIRST(&queue->tq_active); 4755b326a32SJustin T. Gibbs if (tb_first != NULL && 4765b326a32SJustin T. Gibbs tb_first->tb_running == TB_DRAIN_WAITER) 4775b326a32SJustin T. Gibbs wakeup(tb_first); 4785b326a32SJustin T. Gibbs } 479bf73d4d2SMatthew D Fleming } 480bf73d4d2SMatthew D Fleming 481bf73d4d2SMatthew D Fleming void 482bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue) 483bf73d4d2SMatthew D Fleming { 484bf73d4d2SMatthew D Fleming 485bf73d4d2SMatthew D Fleming TQ_LOCK(queue); 486bf73d4d2SMatthew D Fleming taskqueue_run_locked(queue); 487bf73d4d2SMatthew D Fleming TQ_UNLOCK(queue); 488bf73d4d2SMatthew D Fleming } 489bf73d4d2SMatthew D Fleming 490bf73d4d2SMatthew D Fleming static int 491bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task) 492bf73d4d2SMatthew D Fleming { 493bf73d4d2SMatthew D Fleming struct taskqueue_busy *tb; 494bf73d4d2SMatthew D Fleming 495fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 496bf73d4d2SMatthew D Fleming TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 497bf73d4d2SMatthew D Fleming if (tb->tb_running == task) 498bf73d4d2SMatthew D Fleming return (1); 499bf73d4d2SMatthew D Fleming } 500bf73d4d2SMatthew D Fleming return (0); 501ca2e0534SDoug Rabson } 502ca2e0534SDoug Rabson 503403f4a31SHans Petter Selasky /* 504403f4a31SHans Petter Selasky * Only use this function in single threaded contexts. It returns 505403f4a31SHans Petter Selasky * non-zero if the given task is either pending or running. Else the 506403f4a31SHans Petter Selasky * task is idle and can be queued again or freed. 507403f4a31SHans Petter Selasky */ 508403f4a31SHans Petter Selasky int 509403f4a31SHans Petter Selasky taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task) 510403f4a31SHans Petter Selasky { 511403f4a31SHans Petter Selasky int retval; 512403f4a31SHans Petter Selasky 513403f4a31SHans Petter Selasky TQ_LOCK(queue); 514403f4a31SHans Petter Selasky retval = task->ta_pending > 0 || task_is_running(queue, task); 515403f4a31SHans Petter Selasky TQ_UNLOCK(queue); 516403f4a31SHans Petter Selasky 517403f4a31SHans Petter Selasky return (retval); 518403f4a31SHans Petter Selasky } 519403f4a31SHans Petter Selasky 520b2ad91f2SKonstantin Belousov static int 521b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 522b2ad91f2SKonstantin Belousov u_int *pendp) 523b2ad91f2SKonstantin Belousov { 524b2ad91f2SKonstantin Belousov 525b2ad91f2SKonstantin Belousov if (task->ta_pending > 0) 526b2ad91f2SKonstantin Belousov STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 527b2ad91f2SKonstantin Belousov if (pendp != NULL) 528b2ad91f2SKonstantin Belousov *pendp = task->ta_pending; 529b2ad91f2SKonstantin Belousov task->ta_pending = 0; 530b2ad91f2SKonstantin Belousov return (task_is_running(queue, task) ? EBUSY : 0); 531b2ad91f2SKonstantin Belousov } 532b2ad91f2SKonstantin Belousov 533f46276a9SMatthew D Fleming int 534f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 535f46276a9SMatthew D Fleming { 536f46276a9SMatthew D Fleming int error; 537f46276a9SMatthew D Fleming 538f46276a9SMatthew D Fleming TQ_LOCK(queue); 539b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, task, pendp); 540b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 541b2ad91f2SKonstantin Belousov 542b2ad91f2SKonstantin Belousov return (error); 543b2ad91f2SKonstantin Belousov } 544b2ad91f2SKonstantin Belousov 545b2ad91f2SKonstantin Belousov int 546b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue, 547b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, u_int *pendp) 548b2ad91f2SKonstantin Belousov { 549b2ad91f2SKonstantin Belousov u_int pending, pending1; 550b2ad91f2SKonstantin Belousov int error; 551b2ad91f2SKonstantin Belousov 552b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 5537c4676ddSRandall Stewart pending = !!(callout_stop(&timeout_task->c) > 0); 554b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 555b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 556b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 557b2ad91f2SKonstantin Belousov queue->tq_callouts--; 558b2ad91f2SKonstantin Belousov } 559f46276a9SMatthew D Fleming TQ_UNLOCK(queue); 560f46276a9SMatthew D Fleming 561f46276a9SMatthew D Fleming if (pendp != NULL) 562b2ad91f2SKonstantin Belousov *pendp = pending + pending1; 563f46276a9SMatthew D Fleming return (error); 564f46276a9SMatthew D Fleming } 565f46276a9SMatthew D Fleming 56614889b42SWarner Losh void 56714889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 56814889b42SWarner Losh { 5693d336cd0SPawel Jakub Dawidek 5703d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 5719df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 57252bc746aSSam Leffler 5733d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 574bf73d4d2SMatthew D Fleming while (task->ta_pending != 0 || task_is_running(queue, task)) 5753d336cd0SPawel Jakub Dawidek TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 5763d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 5779df1a6ddSScott Long } 57814889b42SWarner Losh 579b2ad91f2SKonstantin Belousov void 58073f82099SAndriy Gapon taskqueue_drain_all(struct taskqueue *queue) 58173f82099SAndriy Gapon { 58273f82099SAndriy Gapon 58373f82099SAndriy Gapon if (!queue->tq_spin) 58473f82099SAndriy Gapon WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 58573f82099SAndriy Gapon 58673f82099SAndriy Gapon TQ_LOCK(queue); 587*bb58b5d6SMark Johnston (void)taskqueue_drain_tq_queue(queue); 588*bb58b5d6SMark Johnston (void)taskqueue_drain_tq_active(queue); 58973f82099SAndriy Gapon TQ_UNLOCK(queue); 59073f82099SAndriy Gapon } 59173f82099SAndriy Gapon 59273f82099SAndriy Gapon void 593b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue, 594b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task) 595b2ad91f2SKonstantin Belousov { 596b2ad91f2SKonstantin Belousov 59799eca1b2SHans Petter Selasky /* 59899eca1b2SHans Petter Selasky * Set flag to prevent timer from re-starting during drain: 59999eca1b2SHans Petter Selasky */ 60099eca1b2SHans Petter Selasky TQ_LOCK(queue); 60199eca1b2SHans Petter Selasky KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0, 60299eca1b2SHans Petter Selasky ("Drain already in progress")); 60399eca1b2SHans Petter Selasky timeout_task->f |= DT_DRAIN_IN_PROGRESS; 60499eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 60599eca1b2SHans Petter Selasky 606b2ad91f2SKonstantin Belousov callout_drain(&timeout_task->c); 607b2ad91f2SKonstantin Belousov taskqueue_drain(queue, &timeout_task->t); 60899eca1b2SHans Petter Selasky 60999eca1b2SHans Petter Selasky /* 61099eca1b2SHans Petter Selasky * Clear flag to allow timer to re-start: 61199eca1b2SHans Petter Selasky */ 61299eca1b2SHans Petter Selasky TQ_LOCK(queue); 61399eca1b2SHans Petter Selasky timeout_task->f &= ~DT_DRAIN_IN_PROGRESS; 61499eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 615b2ad91f2SKonstantin Belousov } 616b2ad91f2SKonstantin Belousov 617*bb58b5d6SMark Johnston void 618*bb58b5d6SMark Johnston taskqueue_quiesce(struct taskqueue *queue) 619*bb58b5d6SMark Johnston { 620*bb58b5d6SMark Johnston int ret; 621*bb58b5d6SMark Johnston 622*bb58b5d6SMark Johnston TQ_LOCK(queue); 623*bb58b5d6SMark Johnston do { 624*bb58b5d6SMark Johnston ret = taskqueue_drain_tq_queue(queue); 625*bb58b5d6SMark Johnston if (ret == 0) 626*bb58b5d6SMark Johnston ret = taskqueue_drain_tq_active(queue); 627*bb58b5d6SMark Johnston } while (ret != 0); 628*bb58b5d6SMark Johnston TQ_UNLOCK(queue); 629*bb58b5d6SMark Johnston } 630*bb58b5d6SMark Johnston 631ca2e0534SDoug Rabson static void 632ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 633ca2e0534SDoug Rabson { 634c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 635ca2e0534SDoug Rabson } 636ca2e0534SDoug Rabson 637ca2e0534SDoug Rabson static void 6388088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 639ca2e0534SDoug Rabson { 640bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi); 641ca2e0534SDoug Rabson } 642ca2e0534SDoug Rabson 6437874f606SScott Long static void 6447874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 6457874f606SScott Long { 6467874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 6477874f606SScott Long } 6487874f606SScott Long 6497874f606SScott Long static void 6507874f606SScott Long taskqueue_swi_giant_run(void *dummy) 6517874f606SScott Long { 652bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi_giant); 6537874f606SScott Long } 6547874f606SScott Long 6555a6f0eeeSAdrian Chadd static int 6565a6f0eeeSAdrian Chadd _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 657bfa102caSAdrian Chadd cpuset_t *mask, const char *name, va_list ap) 6580f92108dSScott Long { 659bfa102caSAdrian Chadd char ktname[MAXCOMLEN + 1]; 66075b773aeSSam Leffler struct thread *td; 661175611b6SSam Leffler struct taskqueue *tq; 66200537061SSam Leffler int i, error; 6630f92108dSScott Long 6640f92108dSScott Long if (count <= 0) 6650f92108dSScott Long return (EINVAL); 666175611b6SSam Leffler 667bfa102caSAdrian Chadd vsnprintf(ktname, sizeof(ktname), name, ap); 6680f92108dSScott Long tq = *tqp; 6690f92108dSScott Long 670ac2fffa4SPedro F. Giffuni tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 671ac2fffa4SPedro F. Giffuni M_NOWAIT | M_ZERO); 672175611b6SSam Leffler if (tq->tq_threads == NULL) { 67300537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 67400537061SSam Leffler return (ENOMEM); 67500537061SSam Leffler } 67600537061SSam Leffler 6770f92108dSScott Long for (i = 0; i < count; i++) { 6780f92108dSScott Long if (count == 1) 679175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 6801bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 6810f92108dSScott Long else 682175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 683175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 684175611b6SSam Leffler "%s_%d", ktname, i); 68575b773aeSSam Leffler if (error) { 68600537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 687175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 688175611b6SSam Leffler ktname, error); 689175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 69075b773aeSSam Leffler } else 691175611b6SSam Leffler tq->tq_tcount++; 69200537061SSam Leffler } 693da2ded65SPatrick Kelsey if (tq->tq_tcount == 0) { 694da2ded65SPatrick Kelsey free(tq->tq_threads, M_TASKQUEUE); 695da2ded65SPatrick Kelsey tq->tq_threads = NULL; 696da2ded65SPatrick Kelsey return (ENOMEM); 697da2ded65SPatrick Kelsey } 69875b773aeSSam Leffler for (i = 0; i < count; i++) { 699175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 70075b773aeSSam Leffler continue; 701175611b6SSam Leffler td = tq->tq_threads[i]; 7025a6f0eeeSAdrian Chadd if (mask) { 7033e400979SAndrey V. Elsukov error = cpuset_setthread(td->td_tid, mask); 7045a6f0eeeSAdrian Chadd /* 7055a6f0eeeSAdrian Chadd * Failing to pin is rarely an actual fatal error; 7065a6f0eeeSAdrian Chadd * it'll just affect performance. 7075a6f0eeeSAdrian Chadd */ 7085a6f0eeeSAdrian Chadd if (error) 7095a6f0eeeSAdrian Chadd printf("%s: curthread=%llu: can't pin; " 7105a6f0eeeSAdrian Chadd "error=%d\n", 7115a6f0eeeSAdrian Chadd __func__, 7125a6f0eeeSAdrian Chadd (unsigned long long) td->td_tid, 7135a6f0eeeSAdrian Chadd error); 7145a6f0eeeSAdrian Chadd } 715982d11f8SJeff Roberson thread_lock(td); 71675b773aeSSam Leffler sched_prio(td, pri); 717f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 718982d11f8SJeff Roberson thread_unlock(td); 7190f92108dSScott Long } 7200f92108dSScott Long 7210f92108dSScott Long return (0); 7220f92108dSScott Long } 7230f92108dSScott Long 7245a6f0eeeSAdrian Chadd int 7255a6f0eeeSAdrian Chadd taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 7265a6f0eeeSAdrian Chadd const char *name, ...) 7275a6f0eeeSAdrian Chadd { 7285a6f0eeeSAdrian Chadd va_list ap; 729bfa102caSAdrian Chadd int error; 7305a6f0eeeSAdrian Chadd 7315a6f0eeeSAdrian Chadd va_start(ap, name); 732bfa102caSAdrian Chadd error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap); 7335a6f0eeeSAdrian Chadd va_end(ap); 734bfa102caSAdrian Chadd return (error); 735bfa102caSAdrian Chadd } 7365a6f0eeeSAdrian Chadd 737bfa102caSAdrian Chadd int 738bfa102caSAdrian Chadd taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, 739bfa102caSAdrian Chadd cpuset_t *mask, const char *name, ...) 740bfa102caSAdrian Chadd { 741bfa102caSAdrian Chadd va_list ap; 742bfa102caSAdrian Chadd int error; 743bfa102caSAdrian Chadd 744bfa102caSAdrian Chadd va_start(ap, name); 745bfa102caSAdrian Chadd error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap); 746bfa102caSAdrian Chadd va_end(ap); 747bfa102caSAdrian Chadd return (error); 7485a6f0eeeSAdrian Chadd } 7495a6f0eeeSAdrian Chadd 750fdbc7174SWill Andrews static inline void 751fdbc7174SWill Andrews taskqueue_run_callback(struct taskqueue *tq, 752fdbc7174SWill Andrews enum taskqueue_callback_type cb_type) 753fdbc7174SWill Andrews { 754fdbc7174SWill Andrews taskqueue_callback_fn tq_callback; 755fdbc7174SWill Andrews 756fdbc7174SWill Andrews TQ_ASSERT_UNLOCKED(tq); 757fdbc7174SWill Andrews tq_callback = tq->tq_callbacks[cb_type]; 758fdbc7174SWill Andrews if (tq_callback != NULL) 759fdbc7174SWill Andrews tq_callback(tq->tq_cb_contexts[cb_type]); 760fdbc7174SWill Andrews } 761fdbc7174SWill Andrews 762227559d1SJohn-Mark Gurney void 763227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 764cb32189eSKenneth D. Merry { 765227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 766bd83e879SJohn Baldwin 767227559d1SJohn-Mark Gurney tqp = arg; 768227559d1SJohn-Mark Gurney tq = *tqp; 769fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); 7709df1a6ddSScott Long TQ_LOCK(tq); 77124ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 7724c7070dbSScott Long /* XXX ? */ 773bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 7746a3b2893SPawel Jakub Dawidek /* 7756a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 7766a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 7776a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 7786a3b2893SPawel Jakub Dawidek */ 7796a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 7806a3b2893SPawel Jakub Dawidek break; 7810f180a7cSJohn Baldwin TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 782a1797ef6SAndrew Thompson } 783bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 784fdbc7174SWill Andrews /* 785fdbc7174SWill Andrews * This thread is on its way out, so just drop the lock temporarily 786fdbc7174SWill Andrews * in order to call the shutdown callback. This allows the callback 787fdbc7174SWill Andrews * to look at the taskqueue, even just before it dies. 788fdbc7174SWill Andrews */ 789fdbc7174SWill Andrews TQ_UNLOCK(tq); 790fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); 791fdbc7174SWill Andrews TQ_LOCK(tq); 792fdbc7174SWill Andrews 79352bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 794175611b6SSam Leffler tq->tq_tcount--; 795175611b6SSam Leffler wakeup_one(tq->tq_threads); 7969df1a6ddSScott Long TQ_UNLOCK(tq); 79703c7442dSJohn Baldwin kthread_exit(); 798cb32189eSKenneth D. Merry } 799cb32189eSKenneth D. Merry 800227559d1SJohn-Mark Gurney void 801cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 802cb32189eSKenneth D. Merry { 803227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 804bd83e879SJohn Baldwin 805227559d1SJohn-Mark Gurney tqp = context; 806227559d1SJohn-Mark Gurney tq = *tqp; 80752bc746aSSam Leffler wakeup_one(tq); 808cb32189eSKenneth D. Merry } 809cb32189eSKenneth D. Merry 810d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 8117874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 8127874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 8137874f606SScott Long 814d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 8156caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 8167874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 817cb32189eSKenneth D. Merry 818227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 819f82c9e70SSam Leffler 8209df1a6ddSScott Long struct taskqueue * 8219df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 8220f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 8239df1a6ddSScott Long { 8240f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 8259df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 8269df1a6ddSScott Long } 8279df1a6ddSScott Long 828f82c9e70SSam Leffler static void *taskqueue_fast_ih; 829f82c9e70SSam Leffler 830f82c9e70SSam Leffler static void 8319df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 832f82c9e70SSam Leffler { 833f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 834f82c9e70SSam Leffler } 835f82c9e70SSam Leffler 836f82c9e70SSam Leffler static void 837f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 838f82c9e70SSam Leffler { 839bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_fast); 840f82c9e70SSam Leffler } 841f82c9e70SSam Leffler 842d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 84310f0ab39SJohn Baldwin swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, 8449df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 845159ef108SPawel Jakub Dawidek 846159ef108SPawel Jakub Dawidek int 847159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 848159ef108SPawel Jakub Dawidek { 849159ef108SPawel Jakub Dawidek int i, j, ret = 0; 850159ef108SPawel Jakub Dawidek 851159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 852159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 853159ef108SPawel Jakub Dawidek continue; 854159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 855159ef108SPawel Jakub Dawidek ret = 1; 856159ef108SPawel Jakub Dawidek break; 857159ef108SPawel Jakub Dawidek } 858159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 859159ef108SPawel Jakub Dawidek break; 860159ef108SPawel Jakub Dawidek } 861159ef108SPawel Jakub Dawidek return (ret); 862159ef108SPawel Jakub Dawidek } 863