1ca2e0534SDoug Rabson /*- 2ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 3ca2e0534SDoug Rabson * All rights reserved. 4ca2e0534SDoug Rabson * 5ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 6ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 7ca2e0534SDoug Rabson * are met: 8ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 10ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 13ca2e0534SDoug Rabson * 14ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ca2e0534SDoug Rabson * SUCH DAMAGE. 25ca2e0534SDoug Rabson */ 26ca2e0534SDoug Rabson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 30ca2e0534SDoug Rabson #include <sys/param.h> 31ca2e0534SDoug Rabson #include <sys/systm.h> 321de1c550SJohn Baldwin #include <sys/bus.h> 33282873e2SJohn Baldwin #include <sys/interrupt.h> 34ca2e0534SDoug Rabson #include <sys/kernel.h> 35eb5b0e05SJohn Baldwin #include <sys/kthread.h> 361de1c550SJohn Baldwin #include <sys/lock.h> 37ca2e0534SDoug Rabson #include <sys/malloc.h> 381de1c550SJohn Baldwin #include <sys/mutex.h> 3952bc746aSSam Leffler #include <sys/proc.h> 401de1c550SJohn Baldwin #include <sys/taskqueue.h> 41cb32189eSKenneth D. Merry #include <sys/unistd.h> 42ca2e0534SDoug Rabson 43959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 447874f606SScott Long static void *taskqueue_giant_ih; 45eb5b0e05SJohn Baldwin static void *taskqueue_ih; 46eb5b0e05SJohn Baldwin static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; 471de1c550SJohn Baldwin static struct mtx taskqueue_queues_mutex; 488088699fSJohn Baldwin 49ca2e0534SDoug Rabson struct taskqueue { 50ca2e0534SDoug Rabson STAILQ_ENTRY(taskqueue) tq_link; 51ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 52ca2e0534SDoug Rabson const char *tq_name; 53ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 54ca2e0534SDoug Rabson void *tq_context; 55f4581151SSam Leffler struct task *tq_running; 561de1c550SJohn Baldwin struct mtx tq_mutex; 5752bc746aSSam Leffler struct proc **tq_pproc; 589df1a6ddSScott Long int tq_spin; 59ca2e0534SDoug Rabson }; 60ca2e0534SDoug Rabson 619df1a6ddSScott Long static __inline void 629df1a6ddSScott Long TQ_LOCK(struct taskqueue *tq) 639df1a6ddSScott Long { 649df1a6ddSScott Long if (tq->tq_spin) 659df1a6ddSScott Long mtx_lock_spin(&tq->tq_mutex); 669df1a6ddSScott Long else 679df1a6ddSScott Long mtx_lock(&tq->tq_mutex); 689df1a6ddSScott Long } 699df1a6ddSScott Long 709df1a6ddSScott Long static __inline void 719df1a6ddSScott Long TQ_UNLOCK(struct taskqueue *tq) 729df1a6ddSScott Long { 739df1a6ddSScott Long if (tq->tq_spin) 749df1a6ddSScott Long mtx_unlock_spin(&tq->tq_mutex); 759df1a6ddSScott Long else 769df1a6ddSScott Long mtx_unlock(&tq->tq_mutex); 779df1a6ddSScott Long } 789df1a6ddSScott Long 791de1c550SJohn Baldwin static void init_taskqueue_list(void *data); 801de1c550SJohn Baldwin 819df1a6ddSScott Long static __inline int 829df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 839df1a6ddSScott Long int t) 849df1a6ddSScott Long { 859df1a6ddSScott Long if (tq->tq_spin) 869df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 879df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 889df1a6ddSScott Long } 899df1a6ddSScott Long 901de1c550SJohn Baldwin static void 911de1c550SJohn Baldwin init_taskqueue_list(void *data __unused) 921de1c550SJohn Baldwin { 931de1c550SJohn Baldwin 946008862bSJohn Baldwin mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF); 951de1c550SJohn Baldwin STAILQ_INIT(&taskqueue_queues); 961de1c550SJohn Baldwin } 971de1c550SJohn Baldwin SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list, 981de1c550SJohn Baldwin NULL); 991de1c550SJohn Baldwin 1009df1a6ddSScott Long static struct taskqueue * 1019df1a6ddSScott Long _taskqueue_create(const char *name, int mflags, 10252bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1039df1a6ddSScott Long struct proc **pp, 1049df1a6ddSScott Long int mtxflags, const char *mtxname) 105ca2e0534SDoug Rabson { 106ca2e0534SDoug Rabson struct taskqueue *queue; 107ca2e0534SDoug Rabson 1081de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 109ca2e0534SDoug Rabson if (!queue) 110ca2e0534SDoug Rabson return 0; 1111de1c550SJohn Baldwin 112ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 113ca2e0534SDoug Rabson queue->tq_name = name; 114ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 115ca2e0534SDoug Rabson queue->tq_context = context; 11652bc746aSSam Leffler queue->tq_pproc = pp; 1179df1a6ddSScott Long queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 1189df1a6ddSScott Long mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 119ca2e0534SDoug Rabson 1201de1c550SJohn Baldwin mtx_lock(&taskqueue_queues_mutex); 121ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); 1221de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 123ca2e0534SDoug Rabson 124ca2e0534SDoug Rabson return queue; 125ca2e0534SDoug Rabson } 126ca2e0534SDoug Rabson 1279df1a6ddSScott Long struct taskqueue * 1289df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1299df1a6ddSScott Long taskqueue_enqueue_fn enqueue, void *context, 1309df1a6ddSScott Long struct proc **pp) 1319df1a6ddSScott Long { 1329df1a6ddSScott Long return _taskqueue_create(name, mflags, enqueue, context, pp, 1339df1a6ddSScott Long MTX_DEF, "taskqueue"); 1349df1a6ddSScott Long } 1359df1a6ddSScott Long 13652bc746aSSam Leffler /* 13752bc746aSSam Leffler * Signal a taskqueue thread to terminate. 13852bc746aSSam Leffler */ 13952bc746aSSam Leffler static void 14052bc746aSSam Leffler taskqueue_terminate(struct proc **pp, struct taskqueue *tq) 14152bc746aSSam Leffler { 14252bc746aSSam Leffler struct proc *p; 14352bc746aSSam Leffler 14452bc746aSSam Leffler p = *pp; 14552bc746aSSam Leffler *pp = NULL; 14652bc746aSSam Leffler if (p) { 14752bc746aSSam Leffler wakeup_one(tq); 14852bc746aSSam Leffler PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 1499df1a6ddSScott Long TQ_UNLOCK(tq); /* let taskqueue thread run */ 1509df1a6ddSScott Long TQ_SLEEP(tq, p, &p->p_mtx, PWAIT, "taskqueue_destroy", 0); 15152bc746aSSam Leffler PROC_UNLOCK(p); 1529df1a6ddSScott Long TQ_LOCK(tq); 15352bc746aSSam Leffler } 15452bc746aSSam Leffler } 15552bc746aSSam Leffler 156ca2e0534SDoug Rabson void 157ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 158ca2e0534SDoug Rabson { 1591de1c550SJohn Baldwin 1601de1c550SJohn Baldwin mtx_lock(&taskqueue_queues_mutex); 161ca2e0534SDoug Rabson STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link); 1621de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 163ca2e0534SDoug Rabson 1649df1a6ddSScott Long TQ_LOCK(queue); 165bd83e879SJohn Baldwin taskqueue_run(queue); 16652bc746aSSam Leffler taskqueue_terminate(queue->tq_pproc, queue); 1671de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 168ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 169ca2e0534SDoug Rabson } 170ca2e0534SDoug Rabson 1711de1c550SJohn Baldwin /* 1721de1c550SJohn Baldwin * Returns with the taskqueue locked. 1731de1c550SJohn Baldwin */ 174ca2e0534SDoug Rabson struct taskqueue * 175ca2e0534SDoug Rabson taskqueue_find(const char *name) 176ca2e0534SDoug Rabson { 177ca2e0534SDoug Rabson struct taskqueue *queue; 178ca2e0534SDoug Rabson 1791de1c550SJohn Baldwin mtx_lock(&taskqueue_queues_mutex); 1801de1c550SJohn Baldwin STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) { 181eb5b0e05SJohn Baldwin if (strcmp(queue->tq_name, name) == 0) { 1829df1a6ddSScott Long TQ_LOCK(queue); 1831de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 184ca2e0534SDoug Rabson return queue; 185ca2e0534SDoug Rabson } 1861de1c550SJohn Baldwin } 1871de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 188eb5b0e05SJohn Baldwin return NULL; 189ca2e0534SDoug Rabson } 190ca2e0534SDoug Rabson 191ca2e0534SDoug Rabson int 192ca2e0534SDoug Rabson taskqueue_enqueue(struct taskqueue *queue, struct task *task) 193ca2e0534SDoug Rabson { 194ca2e0534SDoug Rabson struct task *ins; 195ca2e0534SDoug Rabson struct task *prev; 196ca2e0534SDoug Rabson 1979df1a6ddSScott Long TQ_LOCK(queue); 198282873e2SJohn Baldwin 199ca2e0534SDoug Rabson /* 200ca2e0534SDoug Rabson * Count multiple enqueues. 201ca2e0534SDoug Rabson */ 202ca2e0534SDoug Rabson if (task->ta_pending) { 203ca2e0534SDoug Rabson task->ta_pending++; 2049df1a6ddSScott Long TQ_UNLOCK(queue); 205ca2e0534SDoug Rabson return 0; 206ca2e0534SDoug Rabson } 207ca2e0534SDoug Rabson 208ca2e0534SDoug Rabson /* 209ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 210ca2e0534SDoug Rabson */ 21151b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 212ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 213ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 214ca2e0534SDoug Rabson } else { 215ca2e0534SDoug Rabson prev = 0; 216ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 217ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 218ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 219ca2e0534SDoug Rabson break; 220ca2e0534SDoug Rabson 221ca2e0534SDoug Rabson if (prev) 222ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 223ca2e0534SDoug Rabson else 224ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 225ca2e0534SDoug Rabson } 226ca2e0534SDoug Rabson 227ca2e0534SDoug Rabson task->ta_pending = 1; 228ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 229282873e2SJohn Baldwin 2309df1a6ddSScott Long TQ_UNLOCK(queue); 231282873e2SJohn Baldwin 232ca2e0534SDoug Rabson return 0; 233ca2e0534SDoug Rabson } 234ca2e0534SDoug Rabson 235ca2e0534SDoug Rabson void 236ca2e0534SDoug Rabson taskqueue_run(struct taskqueue *queue) 237ca2e0534SDoug Rabson { 238ca2e0534SDoug Rabson struct task *task; 239bd83e879SJohn Baldwin int owned, pending; 240ca2e0534SDoug Rabson 241bd83e879SJohn Baldwin owned = mtx_owned(&queue->tq_mutex); 242bd83e879SJohn Baldwin if (!owned) 2439df1a6ddSScott Long TQ_LOCK(queue); 244ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 245ca2e0534SDoug Rabson /* 246ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 247ca2e0534SDoug Rabson * zero its pending count. 248ca2e0534SDoug Rabson */ 249ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 250ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 251ca2e0534SDoug Rabson pending = task->ta_pending; 252ca2e0534SDoug Rabson task->ta_pending = 0; 253f4581151SSam Leffler queue->tq_running = task; 2549df1a6ddSScott Long TQ_UNLOCK(queue); 255ca2e0534SDoug Rabson 256282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 257ca2e0534SDoug Rabson 2589df1a6ddSScott Long TQ_LOCK(queue); 259f4581151SSam Leffler queue->tq_running = NULL; 26014889b42SWarner Losh wakeup(task); 261ca2e0534SDoug Rabson } 262bd83e879SJohn Baldwin 263bd83e879SJohn Baldwin /* 264bd83e879SJohn Baldwin * For compatibility, unlock on return if the queue was not locked 265bd83e879SJohn Baldwin * on entry, although this opens a race window. 266bd83e879SJohn Baldwin */ 267bd83e879SJohn Baldwin if (!owned) 2689df1a6ddSScott Long TQ_UNLOCK(queue); 269ca2e0534SDoug Rabson } 270ca2e0534SDoug Rabson 27114889b42SWarner Losh void 27214889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 27314889b42SWarner Losh { 2749df1a6ddSScott Long if (queue->tq_spin) { /* XXX */ 2759df1a6ddSScott Long mtx_lock_spin(&queue->tq_mutex); 2769df1a6ddSScott Long while (task->ta_pending != 0 || task == queue->tq_running) 2779df1a6ddSScott Long msleep_spin(task, &queue->tq_mutex, "-", 0); 2789df1a6ddSScott Long mtx_unlock_spin(&queue->tq_mutex); 2799df1a6ddSScott Long } else { 2809df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 28152bc746aSSam Leffler 28214889b42SWarner Losh mtx_lock(&queue->tq_mutex); 28352bc746aSSam Leffler while (task->ta_pending != 0 || task == queue->tq_running) 28414889b42SWarner Losh msleep(task, &queue->tq_mutex, PWAIT, "-", 0); 28514889b42SWarner Losh mtx_unlock(&queue->tq_mutex); 28614889b42SWarner Losh } 2879df1a6ddSScott Long } 28814889b42SWarner Losh 289ca2e0534SDoug Rabson static void 290ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 291ca2e0534SDoug Rabson { 292c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 293ca2e0534SDoug Rabson } 294ca2e0534SDoug Rabson 295ca2e0534SDoug Rabson static void 2968088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 297ca2e0534SDoug Rabson { 298ca2e0534SDoug Rabson taskqueue_run(taskqueue_swi); 299ca2e0534SDoug Rabson } 300ca2e0534SDoug Rabson 3017874f606SScott Long static void 3027874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 3037874f606SScott Long { 3047874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 3057874f606SScott Long } 3067874f606SScott Long 3077874f606SScott Long static void 3087874f606SScott Long taskqueue_swi_giant_run(void *dummy) 3097874f606SScott Long { 3107874f606SScott Long taskqueue_run(taskqueue_swi_giant); 3117874f606SScott Long } 3127874f606SScott Long 313227559d1SJohn-Mark Gurney void 314227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 315cb32189eSKenneth D. Merry { 316227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 317bd83e879SJohn Baldwin 318227559d1SJohn-Mark Gurney tqp = arg; 319227559d1SJohn-Mark Gurney tq = *tqp; 3209df1a6ddSScott Long TQ_LOCK(tq); 32152bc746aSSam Leffler do { 322227559d1SJohn-Mark Gurney taskqueue_run(tq); 3239df1a6ddSScott Long TQ_SLEEP(tq, tq, &tq->tq_mutex, curthread->td_priority, "-", 0); 32452bc746aSSam Leffler } while (*tq->tq_pproc != NULL); 32552bc746aSSam Leffler 32652bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 32752bc746aSSam Leffler wakeup_one(tq); 3289df1a6ddSScott Long TQ_UNLOCK(tq); 32952bc746aSSam Leffler kthread_exit(0); 330cb32189eSKenneth D. Merry } 331cb32189eSKenneth D. Merry 332227559d1SJohn-Mark Gurney void 333cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 334cb32189eSKenneth D. Merry { 335227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 336bd83e879SJohn Baldwin 337227559d1SJohn-Mark Gurney tqp = context; 338227559d1SJohn-Mark Gurney tq = *tqp; 339227559d1SJohn-Mark Gurney 340227559d1SJohn-Mark Gurney mtx_assert(&tq->tq_mutex, MA_OWNED); 34152bc746aSSam Leffler wakeup_one(tq); 342cb32189eSKenneth D. Merry } 343cb32189eSKenneth D. Merry 344ca2e0534SDoug Rabson TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0, 3457874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 3467874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 3477874f606SScott Long 3487874f606SScott Long TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0, 3496caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 3507874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 351cb32189eSKenneth D. Merry 352227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 353f82c9e70SSam Leffler 3549df1a6ddSScott Long struct taskqueue * 3559df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 3569df1a6ddSScott Long taskqueue_enqueue_fn enqueue, void *context, 3579df1a6ddSScott Long struct proc **pp) 3589df1a6ddSScott Long { 3599df1a6ddSScott Long return _taskqueue_create(name, mflags, enqueue, context, pp, 3609df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 3619df1a6ddSScott Long } 3629df1a6ddSScott Long 3639df1a6ddSScott Long /* NB: for backwards compatibility */ 364f82c9e70SSam Leffler int 365f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 366f82c9e70SSam Leffler { 3679df1a6ddSScott Long return taskqueue_enqueue(queue, task); 368f82c9e70SSam Leffler } 369f82c9e70SSam Leffler 370f82c9e70SSam Leffler static void *taskqueue_fast_ih; 371f82c9e70SSam Leffler 372f82c9e70SSam Leffler static void 3739df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 374f82c9e70SSam Leffler { 375f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 376f82c9e70SSam Leffler } 377f82c9e70SSam Leffler 378f82c9e70SSam Leffler static void 379f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 380f82c9e70SSam Leffler { 3819df1a6ddSScott Long taskqueue_run(taskqueue_fast); 382f82c9e70SSam Leffler } 383f82c9e70SSam Leffler 3849df1a6ddSScott Long TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, 0, 3859df1a6ddSScott Long swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 3869df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 387