1ca2e0534SDoug Rabson /*- 2ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 3ca2e0534SDoug Rabson * All rights reserved. 4ca2e0534SDoug Rabson * 5ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 6ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 7ca2e0534SDoug Rabson * are met: 8ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 10ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 13ca2e0534SDoug Rabson * 14ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ca2e0534SDoug Rabson * SUCH DAMAGE. 25ca2e0534SDoug Rabson */ 26ca2e0534SDoug Rabson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 30ca2e0534SDoug Rabson #include <sys/param.h> 31ca2e0534SDoug Rabson #include <sys/systm.h> 321de1c550SJohn Baldwin #include <sys/bus.h> 33282873e2SJohn Baldwin #include <sys/interrupt.h> 34ca2e0534SDoug Rabson #include <sys/kernel.h> 35eb5b0e05SJohn Baldwin #include <sys/kthread.h> 361de1c550SJohn Baldwin #include <sys/lock.h> 37ca2e0534SDoug Rabson #include <sys/malloc.h> 381de1c550SJohn Baldwin #include <sys/mutex.h> 3952bc746aSSam Leffler #include <sys/proc.h> 400f92108dSScott Long #include <sys/sched.h> 411de1c550SJohn Baldwin #include <sys/taskqueue.h> 42cb32189eSKenneth D. Merry #include <sys/unistd.h> 430f92108dSScott Long #include <machine/stdarg.h> 44ca2e0534SDoug Rabson 45959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 467874f606SScott Long static void *taskqueue_giant_ih; 47eb5b0e05SJohn Baldwin static void *taskqueue_ih; 48eb5b0e05SJohn Baldwin static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; 491de1c550SJohn Baldwin static struct mtx taskqueue_queues_mutex; 508088699fSJohn Baldwin 51ca2e0534SDoug Rabson struct taskqueue { 52ca2e0534SDoug Rabson STAILQ_ENTRY(taskqueue) tq_link; 53ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 54ca2e0534SDoug Rabson const char *tq_name; 55ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 56ca2e0534SDoug Rabson void *tq_context; 57f4581151SSam Leffler struct task *tq_running; 581de1c550SJohn Baldwin struct mtx tq_mutex; 59175611b6SSam Leffler struct thread **tq_threads; 60175611b6SSam Leffler int tq_tcount; 61694382c8SKip Macy int tq_spin; 620f92108dSScott Long int tq_flags; 63ca2e0534SDoug Rabson }; 64ca2e0534SDoug Rabson 650f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 66478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 67478cfc73SScott Long #define TQ_FLAGS_PENDING (1 << 2) 680f92108dSScott Long 69694382c8SKip Macy static __inline void 70694382c8SKip Macy TQ_LOCK(struct taskqueue *tq) 71694382c8SKip Macy { 72694382c8SKip Macy if (tq->tq_spin) 73694382c8SKip Macy mtx_lock_spin(&tq->tq_mutex); 74694382c8SKip Macy else 75694382c8SKip Macy mtx_lock(&tq->tq_mutex); 76694382c8SKip Macy } 779df1a6ddSScott Long 78694382c8SKip Macy static __inline void 79694382c8SKip Macy TQ_UNLOCK(struct taskqueue *tq) 80694382c8SKip Macy { 81694382c8SKip Macy if (tq->tq_spin) 82694382c8SKip Macy mtx_unlock_spin(&tq->tq_mutex); 83694382c8SKip Macy else 84694382c8SKip Macy mtx_unlock(&tq->tq_mutex); 85694382c8SKip Macy } 869df1a6ddSScott Long 871de1c550SJohn Baldwin static void init_taskqueue_list(void *data); 881de1c550SJohn Baldwin 899df1a6ddSScott Long static __inline int 909df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 919df1a6ddSScott Long int t) 929df1a6ddSScott Long { 93694382c8SKip Macy if (tq->tq_spin) 949df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 959df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 969df1a6ddSScott Long } 979df1a6ddSScott Long 981de1c550SJohn Baldwin static void 991de1c550SJohn Baldwin init_taskqueue_list(void *data __unused) 1001de1c550SJohn Baldwin { 1011de1c550SJohn Baldwin 1026008862bSJohn Baldwin mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF); 1031de1c550SJohn Baldwin STAILQ_INIT(&taskqueue_queues); 1041de1c550SJohn Baldwin } 1051de1c550SJohn Baldwin SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list, 1061de1c550SJohn Baldwin NULL); 1071de1c550SJohn Baldwin 1089df1a6ddSScott Long static struct taskqueue * 1099df1a6ddSScott Long _taskqueue_create(const char *name, int mflags, 11052bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1119df1a6ddSScott Long int mtxflags, const char *mtxname) 112ca2e0534SDoug Rabson { 113ca2e0534SDoug Rabson struct taskqueue *queue; 114ca2e0534SDoug Rabson 1151de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 116ca2e0534SDoug Rabson if (!queue) 117d710cae7SWarner Losh return NULL; 118694382c8SKip Macy 119ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 120ca2e0534SDoug Rabson queue->tq_name = name; 121ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 122ca2e0534SDoug Rabson queue->tq_context = context; 123694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 124694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1259df1a6ddSScott Long mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 126ca2e0534SDoug Rabson 1271de1c550SJohn Baldwin mtx_lock(&taskqueue_queues_mutex); 128ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); 1291de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 130ca2e0534SDoug Rabson 131ca2e0534SDoug Rabson return queue; 132ca2e0534SDoug Rabson } 133ca2e0534SDoug Rabson 1349df1a6ddSScott Long struct taskqueue * 1359df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1360f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1379df1a6ddSScott Long { 1380f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1399df1a6ddSScott Long MTX_DEF, "taskqueue"); 1409df1a6ddSScott Long } 1419df1a6ddSScott Long 14252bc746aSSam Leffler /* 14352bc746aSSam Leffler * Signal a taskqueue thread to terminate. 14452bc746aSSam Leffler */ 14552bc746aSSam Leffler static void 146175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 14752bc746aSSam Leffler { 14852bc746aSSam Leffler 149175611b6SSam Leffler while (tq->tq_tcount > 0) { 1500f92108dSScott Long wakeup(tq); 1510f92108dSScott Long TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 15252bc746aSSam Leffler } 15352bc746aSSam Leffler } 15452bc746aSSam Leffler 155ca2e0534SDoug Rabson void 156ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 157ca2e0534SDoug Rabson { 1581de1c550SJohn Baldwin 1591de1c550SJohn Baldwin mtx_lock(&taskqueue_queues_mutex); 160ca2e0534SDoug Rabson STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link); 1611de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 162ca2e0534SDoug Rabson 1639df1a6ddSScott Long TQ_LOCK(queue); 1640f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 165bd83e879SJohn Baldwin taskqueue_run(queue); 166175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 1671de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 168175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 169ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 170ca2e0534SDoug Rabson } 171ca2e0534SDoug Rabson 1721de1c550SJohn Baldwin /* 1731de1c550SJohn Baldwin * Returns with the taskqueue locked. 1741de1c550SJohn Baldwin */ 175ca2e0534SDoug Rabson struct taskqueue * 176ca2e0534SDoug Rabson taskqueue_find(const char *name) 177ca2e0534SDoug Rabson { 178ca2e0534SDoug Rabson struct taskqueue *queue; 179ca2e0534SDoug Rabson 1801de1c550SJohn Baldwin mtx_lock(&taskqueue_queues_mutex); 1811de1c550SJohn Baldwin STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) { 182eb5b0e05SJohn Baldwin if (strcmp(queue->tq_name, name) == 0) { 1839df1a6ddSScott Long TQ_LOCK(queue); 1841de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 185ca2e0534SDoug Rabson return queue; 186ca2e0534SDoug Rabson } 1871de1c550SJohn Baldwin } 1881de1c550SJohn Baldwin mtx_unlock(&taskqueue_queues_mutex); 189eb5b0e05SJohn Baldwin return NULL; 190ca2e0534SDoug Rabson } 191ca2e0534SDoug Rabson 192ca2e0534SDoug Rabson int 193ca2e0534SDoug Rabson taskqueue_enqueue(struct taskqueue *queue, struct task *task) 194ca2e0534SDoug Rabson { 195ca2e0534SDoug Rabson struct task *ins; 196ca2e0534SDoug Rabson struct task *prev; 197ca2e0534SDoug Rabson 1989df1a6ddSScott Long TQ_LOCK(queue); 199282873e2SJohn Baldwin 200ca2e0534SDoug Rabson /* 201ca2e0534SDoug Rabson * Count multiple enqueues. 202ca2e0534SDoug Rabson */ 203694382c8SKip Macy if (task->ta_pending) { 204ca2e0534SDoug Rabson task->ta_pending++; 2059df1a6ddSScott Long TQ_UNLOCK(queue); 206ca2e0534SDoug Rabson return 0; 207ca2e0534SDoug Rabson } 208ca2e0534SDoug Rabson 209ca2e0534SDoug Rabson /* 210ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 211ca2e0534SDoug Rabson */ 21251b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 213ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 214ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 215ca2e0534SDoug Rabson } else { 216d710cae7SWarner Losh prev = NULL; 217ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 218ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 219ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 220ca2e0534SDoug Rabson break; 221ca2e0534SDoug Rabson 222ca2e0534SDoug Rabson if (prev) 223ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 224ca2e0534SDoug Rabson else 225ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 226ca2e0534SDoug Rabson } 227ca2e0534SDoug Rabson 228ca2e0534SDoug Rabson task->ta_pending = 1; 229694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 230ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 231694382c8SKip Macy else 232478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_PENDING; 233282873e2SJohn Baldwin 2349df1a6ddSScott Long TQ_UNLOCK(queue); 235282873e2SJohn Baldwin 236ca2e0534SDoug Rabson return 0; 237ca2e0534SDoug Rabson } 238ca2e0534SDoug Rabson 239ca2e0534SDoug Rabson void 240478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 241478cfc73SScott Long { 242478cfc73SScott Long 243478cfc73SScott Long TQ_LOCK(queue); 244478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 245478cfc73SScott Long TQ_UNLOCK(queue); 246478cfc73SScott Long } 247478cfc73SScott Long 248478cfc73SScott Long void 249478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 250478cfc73SScott Long { 251478cfc73SScott Long 252478cfc73SScott Long TQ_LOCK(queue); 253478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 254478cfc73SScott Long if (queue->tq_flags & TQ_FLAGS_PENDING) { 255478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_PENDING; 256478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 257478cfc73SScott Long } 258478cfc73SScott Long TQ_UNLOCK(queue); 259478cfc73SScott Long } 260478cfc73SScott Long 261478cfc73SScott Long void 262ca2e0534SDoug Rabson taskqueue_run(struct taskqueue *queue) 263ca2e0534SDoug Rabson { 264ca2e0534SDoug Rabson struct task *task; 265bd83e879SJohn Baldwin int owned, pending; 266ca2e0534SDoug Rabson 267bd83e879SJohn Baldwin owned = mtx_owned(&queue->tq_mutex); 268bd83e879SJohn Baldwin if (!owned) 2699df1a6ddSScott Long TQ_LOCK(queue); 270ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 271ca2e0534SDoug Rabson /* 272ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 273ca2e0534SDoug Rabson * zero its pending count. 274ca2e0534SDoug Rabson */ 275ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 276ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 277ca2e0534SDoug Rabson pending = task->ta_pending; 278ca2e0534SDoug Rabson task->ta_pending = 0; 279f4581151SSam Leffler queue->tq_running = task; 2809df1a6ddSScott Long TQ_UNLOCK(queue); 281ca2e0534SDoug Rabson 282282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 283ca2e0534SDoug Rabson 2849df1a6ddSScott Long TQ_LOCK(queue); 285f4581151SSam Leffler queue->tq_running = NULL; 28614889b42SWarner Losh wakeup(task); 287ca2e0534SDoug Rabson } 288bd83e879SJohn Baldwin 289bd83e879SJohn Baldwin /* 290bd83e879SJohn Baldwin * For compatibility, unlock on return if the queue was not locked 291bd83e879SJohn Baldwin * on entry, although this opens a race window. 292bd83e879SJohn Baldwin */ 293bd83e879SJohn Baldwin if (!owned) 2949df1a6ddSScott Long TQ_UNLOCK(queue); 295ca2e0534SDoug Rabson } 296ca2e0534SDoug Rabson 29714889b42SWarner Losh void 29814889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 29914889b42SWarner Losh { 300694382c8SKip Macy if (queue->tq_spin) { /* XXX */ 3019df1a6ddSScott Long mtx_lock_spin(&queue->tq_mutex); 3029df1a6ddSScott Long while (task->ta_pending != 0 || task == queue->tq_running) 3039df1a6ddSScott Long msleep_spin(task, &queue->tq_mutex, "-", 0); 3049df1a6ddSScott Long mtx_unlock_spin(&queue->tq_mutex); 3059df1a6ddSScott Long } else { 3069df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 30752bc746aSSam Leffler 30814889b42SWarner Losh mtx_lock(&queue->tq_mutex); 30952bc746aSSam Leffler while (task->ta_pending != 0 || task == queue->tq_running) 31014889b42SWarner Losh msleep(task, &queue->tq_mutex, PWAIT, "-", 0); 31114889b42SWarner Losh mtx_unlock(&queue->tq_mutex); 31214889b42SWarner Losh } 3139df1a6ddSScott Long } 31414889b42SWarner Losh 315ca2e0534SDoug Rabson static void 316ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 317ca2e0534SDoug Rabson { 318c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 319ca2e0534SDoug Rabson } 320ca2e0534SDoug Rabson 321ca2e0534SDoug Rabson static void 3228088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 323ca2e0534SDoug Rabson { 324ca2e0534SDoug Rabson taskqueue_run(taskqueue_swi); 325ca2e0534SDoug Rabson } 326ca2e0534SDoug Rabson 3277874f606SScott Long static void 3287874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 3297874f606SScott Long { 3307874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 3317874f606SScott Long } 3327874f606SScott Long 3337874f606SScott Long static void 3347874f606SScott Long taskqueue_swi_giant_run(void *dummy) 3357874f606SScott Long { 3367874f606SScott Long taskqueue_run(taskqueue_swi_giant); 3377874f606SScott Long } 3387874f606SScott Long 3390f92108dSScott Long int 3400f92108dSScott Long taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 3410f92108dSScott Long const char *name, ...) 3420f92108dSScott Long { 3430f92108dSScott Long va_list ap; 34475b773aeSSam Leffler struct thread *td; 345175611b6SSam Leffler struct taskqueue *tq; 34600537061SSam Leffler int i, error; 347175611b6SSam Leffler char ktname[MAXCOMLEN]; 3480f92108dSScott Long 3490f92108dSScott Long if (count <= 0) 3500f92108dSScott Long return (EINVAL); 351175611b6SSam Leffler 3520f92108dSScott Long tq = *tqp; 3530f92108dSScott Long 3540f92108dSScott Long va_start(ap, name); 3550f92108dSScott Long vsnprintf(ktname, MAXCOMLEN, name, ap); 3560f92108dSScott Long va_end(ap); 3570f92108dSScott Long 358175611b6SSam Leffler tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 35900537061SSam Leffler M_NOWAIT | M_ZERO); 360175611b6SSam Leffler if (tq->tq_threads == NULL) { 36100537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 36200537061SSam Leffler return (ENOMEM); 36300537061SSam Leffler } 36400537061SSam Leffler 3650f92108dSScott Long for (i = 0; i < count; i++) { 3660f92108dSScott Long if (count == 1) 367175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 368175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, ktname); 3690f92108dSScott Long else 370175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 371175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 372175611b6SSam Leffler "%s_%d", ktname, i); 37375b773aeSSam Leffler if (error) { 37400537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 375175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 376175611b6SSam Leffler ktname, error); 377175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 37875b773aeSSam Leffler } else 379175611b6SSam Leffler tq->tq_tcount++; 38000537061SSam Leffler } 38175b773aeSSam Leffler for (i = 0; i < count; i++) { 382175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 38375b773aeSSam Leffler continue; 384175611b6SSam Leffler td = tq->tq_threads[i]; 385982d11f8SJeff Roberson thread_lock(td); 38675b773aeSSam Leffler sched_prio(td, pri); 387f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 388982d11f8SJeff Roberson thread_unlock(td); 3890f92108dSScott Long } 3900f92108dSScott Long 3910f92108dSScott Long return (0); 3920f92108dSScott Long } 3930f92108dSScott Long 394227559d1SJohn-Mark Gurney void 395227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 396cb32189eSKenneth D. Merry { 397227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 398bd83e879SJohn Baldwin 399227559d1SJohn-Mark Gurney tqp = arg; 400227559d1SJohn-Mark Gurney tq = *tqp; 4019df1a6ddSScott Long TQ_LOCK(tq); 40252bc746aSSam Leffler do { 403227559d1SJohn-Mark Gurney taskqueue_run(tq); 4040f180a7cSJohn Baldwin TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 4050f92108dSScott Long } while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0); 40652bc746aSSam Leffler 40752bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 408175611b6SSam Leffler tq->tq_tcount--; 409175611b6SSam Leffler wakeup_one(tq->tq_threads); 4109df1a6ddSScott Long TQ_UNLOCK(tq); 41103c7442dSJohn Baldwin kthread_exit(); 412cb32189eSKenneth D. Merry } 413cb32189eSKenneth D. Merry 414227559d1SJohn-Mark Gurney void 415cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 416cb32189eSKenneth D. Merry { 417227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 418bd83e879SJohn Baldwin 419227559d1SJohn-Mark Gurney tqp = context; 420227559d1SJohn-Mark Gurney tq = *tqp; 421227559d1SJohn-Mark Gurney 422227559d1SJohn-Mark Gurney mtx_assert(&tq->tq_mutex, MA_OWNED); 42352bc746aSSam Leffler wakeup_one(tq); 424cb32189eSKenneth D. Merry } 425cb32189eSKenneth D. Merry 426d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 4277874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 4287874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 4297874f606SScott Long 430d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 4316caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 4327874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 433cb32189eSKenneth D. Merry 434227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 435f82c9e70SSam Leffler 4369df1a6ddSScott Long struct taskqueue * 4379df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 4380f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 4399df1a6ddSScott Long { 4400f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 4419df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 4429df1a6ddSScott Long } 4439df1a6ddSScott Long 4449df1a6ddSScott Long /* NB: for backwards compatibility */ 445f82c9e70SSam Leffler int 446f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 447f82c9e70SSam Leffler { 4489df1a6ddSScott Long return taskqueue_enqueue(queue, task); 449f82c9e70SSam Leffler } 450f82c9e70SSam Leffler 451f82c9e70SSam Leffler static void *taskqueue_fast_ih; 452f82c9e70SSam Leffler 453f82c9e70SSam Leffler static void 4549df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 455f82c9e70SSam Leffler { 456f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 457f82c9e70SSam Leffler } 458f82c9e70SSam Leffler 459f82c9e70SSam Leffler static void 460f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 461f82c9e70SSam Leffler { 4629df1a6ddSScott Long taskqueue_run(taskqueue_fast); 463f82c9e70SSam Leffler } 464f82c9e70SSam Leffler 465d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 4669df1a6ddSScott Long swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 4679df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 468