1ca2e0534SDoug Rabson /*- 2ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 3ca2e0534SDoug Rabson * All rights reserved. 4ca2e0534SDoug Rabson * 5ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 6ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 7ca2e0534SDoug Rabson * are met: 8ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 10ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 13ca2e0534SDoug Rabson * 14ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ca2e0534SDoug Rabson * SUCH DAMAGE. 25ca2e0534SDoug Rabson */ 26ca2e0534SDoug Rabson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 30ca2e0534SDoug Rabson #include <sys/param.h> 31ca2e0534SDoug Rabson #include <sys/systm.h> 321de1c550SJohn Baldwin #include <sys/bus.h> 33282873e2SJohn Baldwin #include <sys/interrupt.h> 34ca2e0534SDoug Rabson #include <sys/kernel.h> 35eb5b0e05SJohn Baldwin #include <sys/kthread.h> 361de1c550SJohn Baldwin #include <sys/lock.h> 37ca2e0534SDoug Rabson #include <sys/malloc.h> 381de1c550SJohn Baldwin #include <sys/mutex.h> 3952bc746aSSam Leffler #include <sys/proc.h> 400f92108dSScott Long #include <sys/sched.h> 411de1c550SJohn Baldwin #include <sys/taskqueue.h> 42cb32189eSKenneth D. Merry #include <sys/unistd.h> 430f92108dSScott Long #include <machine/stdarg.h> 44ca2e0534SDoug Rabson 45959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 467874f606SScott Long static void *taskqueue_giant_ih; 47eb5b0e05SJohn Baldwin static void *taskqueue_ih; 488088699fSJohn Baldwin 49ca2e0534SDoug Rabson struct taskqueue { 50ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 51ca2e0534SDoug Rabson const char *tq_name; 52ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 53ca2e0534SDoug Rabson void *tq_context; 54121e802bSZachary Loafman struct task *tq_running; 551de1c550SJohn Baldwin struct mtx tq_mutex; 56175611b6SSam Leffler struct thread **tq_threads; 57175611b6SSam Leffler int tq_tcount; 58694382c8SKip Macy int tq_spin; 590f92108dSScott Long int tq_flags; 60ca2e0534SDoug Rabson }; 61ca2e0534SDoug Rabson 620f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 63478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 64478cfc73SScott Long #define TQ_FLAGS_PENDING (1 << 2) 650f92108dSScott Long 66242ed5d9SMatthew D Fleming static void taskqueue_run(struct taskqueue *, struct task **); 67242ed5d9SMatthew D Fleming 68694382c8SKip Macy static __inline void 69694382c8SKip Macy TQ_LOCK(struct taskqueue *tq) 70694382c8SKip Macy { 71694382c8SKip Macy if (tq->tq_spin) 72694382c8SKip Macy mtx_lock_spin(&tq->tq_mutex); 73694382c8SKip Macy else 74694382c8SKip Macy mtx_lock(&tq->tq_mutex); 75694382c8SKip Macy } 769df1a6ddSScott Long 77694382c8SKip Macy static __inline void 78694382c8SKip Macy TQ_UNLOCK(struct taskqueue *tq) 79694382c8SKip Macy { 80694382c8SKip Macy if (tq->tq_spin) 81694382c8SKip Macy mtx_unlock_spin(&tq->tq_mutex); 82694382c8SKip Macy else 83694382c8SKip Macy mtx_unlock(&tq->tq_mutex); 84694382c8SKip Macy } 859df1a6ddSScott Long 869df1a6ddSScott Long static __inline int 879df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 889df1a6ddSScott Long int t) 899df1a6ddSScott Long { 90694382c8SKip Macy if (tq->tq_spin) 919df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 929df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 939df1a6ddSScott Long } 949df1a6ddSScott Long 959df1a6ddSScott Long static struct taskqueue * 969df1a6ddSScott Long _taskqueue_create(const char *name, int mflags, 9752bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 989df1a6ddSScott Long int mtxflags, const char *mtxname) 99ca2e0534SDoug Rabson { 100ca2e0534SDoug Rabson struct taskqueue *queue; 101ca2e0534SDoug Rabson 1021de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 103ca2e0534SDoug Rabson if (!queue) 104d710cae7SWarner Losh return NULL; 105694382c8SKip Macy 106ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 107ca2e0534SDoug Rabson queue->tq_name = name; 108ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 109ca2e0534SDoug Rabson queue->tq_context = context; 110694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 111694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1129df1a6ddSScott Long mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 113ca2e0534SDoug Rabson 114ca2e0534SDoug Rabson return queue; 115ca2e0534SDoug Rabson } 116ca2e0534SDoug Rabson 1179df1a6ddSScott Long struct taskqueue * 1189df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1190f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1209df1a6ddSScott Long { 1210f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1229df1a6ddSScott Long MTX_DEF, "taskqueue"); 1239df1a6ddSScott Long } 1249df1a6ddSScott Long 12552bc746aSSam Leffler /* 12652bc746aSSam Leffler * Signal a taskqueue thread to terminate. 12752bc746aSSam Leffler */ 12852bc746aSSam Leffler static void 129175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 13052bc746aSSam Leffler { 13152bc746aSSam Leffler 132175611b6SSam Leffler while (tq->tq_tcount > 0) { 1330f92108dSScott Long wakeup(tq); 1340f92108dSScott Long TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 13552bc746aSSam Leffler } 13652bc746aSSam Leffler } 13752bc746aSSam Leffler 138ca2e0534SDoug Rabson void 139ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 140ca2e0534SDoug Rabson { 1411de1c550SJohn Baldwin 1429df1a6ddSScott Long TQ_LOCK(queue); 1430f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 144242ed5d9SMatthew D Fleming taskqueue_run(queue, &queue->tq_running); 145175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 1461de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 147175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 148ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 149ca2e0534SDoug Rabson } 150ca2e0534SDoug Rabson 151ca2e0534SDoug Rabson int 152ca2e0534SDoug Rabson taskqueue_enqueue(struct taskqueue *queue, struct task *task) 153ca2e0534SDoug Rabson { 154ca2e0534SDoug Rabson struct task *ins; 155ca2e0534SDoug Rabson struct task *prev; 156ca2e0534SDoug Rabson 1579df1a6ddSScott Long TQ_LOCK(queue); 158282873e2SJohn Baldwin 159ca2e0534SDoug Rabson /* 160ca2e0534SDoug Rabson * Count multiple enqueues. 161ca2e0534SDoug Rabson */ 162694382c8SKip Macy if (task->ta_pending) { 163ca2e0534SDoug Rabson task->ta_pending++; 1649df1a6ddSScott Long TQ_UNLOCK(queue); 165ca2e0534SDoug Rabson return 0; 166ca2e0534SDoug Rabson } 167ca2e0534SDoug Rabson 168ca2e0534SDoug Rabson /* 169ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 170ca2e0534SDoug Rabson */ 17151b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 172ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 173ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 174ca2e0534SDoug Rabson } else { 175d710cae7SWarner Losh prev = NULL; 176ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 177ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 178ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 179ca2e0534SDoug Rabson break; 180ca2e0534SDoug Rabson 181ca2e0534SDoug Rabson if (prev) 182ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 183ca2e0534SDoug Rabson else 184ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 185ca2e0534SDoug Rabson } 186ca2e0534SDoug Rabson 187ca2e0534SDoug Rabson task->ta_pending = 1; 188694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 189ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 190694382c8SKip Macy else 191478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_PENDING; 192282873e2SJohn Baldwin 1939df1a6ddSScott Long TQ_UNLOCK(queue); 194282873e2SJohn Baldwin 195ca2e0534SDoug Rabson return 0; 196ca2e0534SDoug Rabson } 197ca2e0534SDoug Rabson 198ca2e0534SDoug Rabson void 199478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 200478cfc73SScott Long { 201478cfc73SScott Long 202478cfc73SScott Long TQ_LOCK(queue); 203478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 204478cfc73SScott Long TQ_UNLOCK(queue); 205478cfc73SScott Long } 206478cfc73SScott Long 207478cfc73SScott Long void 208478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 209478cfc73SScott Long { 210478cfc73SScott Long 211478cfc73SScott Long TQ_LOCK(queue); 212478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 213478cfc73SScott Long if (queue->tq_flags & TQ_FLAGS_PENDING) { 214478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_PENDING; 215478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 216478cfc73SScott Long } 217478cfc73SScott Long TQ_UNLOCK(queue); 218478cfc73SScott Long } 219478cfc73SScott Long 220242ed5d9SMatthew D Fleming static void 221242ed5d9SMatthew D Fleming taskqueue_run(struct taskqueue *queue, struct task **tpp) 222ca2e0534SDoug Rabson { 223033459c8SMatthew D Fleming struct task *task; 224242ed5d9SMatthew D Fleming int pending; 225ca2e0534SDoug Rabson 226242ed5d9SMatthew D Fleming mtx_assert(&queue->tq_mutex, MA_OWNED); 227ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 228ca2e0534SDoug Rabson /* 229ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 230ca2e0534SDoug Rabson * zero its pending count. 231ca2e0534SDoug Rabson */ 232ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 233ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 234ca2e0534SDoug Rabson pending = task->ta_pending; 235ca2e0534SDoug Rabson task->ta_pending = 0; 236242ed5d9SMatthew D Fleming task->ta_running = tpp; 237242ed5d9SMatthew D Fleming *tpp = task; 2389df1a6ddSScott Long TQ_UNLOCK(queue); 239ca2e0534SDoug Rabson 240282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 241ca2e0534SDoug Rabson 2429df1a6ddSScott Long TQ_LOCK(queue); 243242ed5d9SMatthew D Fleming *tpp = NULL; 24414889b42SWarner Losh wakeup(task); 245ca2e0534SDoug Rabson } 246ca2e0534SDoug Rabson } 247ca2e0534SDoug Rabson 24814889b42SWarner Losh void 24914889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 25014889b42SWarner Losh { 251*3d336cd0SPawel Jakub Dawidek 252*3d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 2539df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 25452bc746aSSam Leffler 255*3d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 256242ed5d9SMatthew D Fleming while (task->ta_pending != 0 || 257242ed5d9SMatthew D Fleming (task->ta_running != NULL && task == *task->ta_running)) { 258*3d336cd0SPawel Jakub Dawidek TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 259242ed5d9SMatthew D Fleming } 260*3d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 2619df1a6ddSScott Long } 26214889b42SWarner Losh 263ca2e0534SDoug Rabson static void 264ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 265ca2e0534SDoug Rabson { 266c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 267ca2e0534SDoug Rabson } 268ca2e0534SDoug Rabson 269ca2e0534SDoug Rabson static void 2708088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 271ca2e0534SDoug Rabson { 272242ed5d9SMatthew D Fleming TQ_LOCK(taskqueue_swi); 273242ed5d9SMatthew D Fleming taskqueue_run(taskqueue_swi, &taskqueue_swi->tq_running); 274242ed5d9SMatthew D Fleming TQ_UNLOCK(taskqueue_swi); 275ca2e0534SDoug Rabson } 276ca2e0534SDoug Rabson 2777874f606SScott Long static void 2787874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 2797874f606SScott Long { 2807874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 2817874f606SScott Long } 2827874f606SScott Long 2837874f606SScott Long static void 2847874f606SScott Long taskqueue_swi_giant_run(void *dummy) 2857874f606SScott Long { 286242ed5d9SMatthew D Fleming TQ_LOCK(taskqueue_swi_giant); 287242ed5d9SMatthew D Fleming taskqueue_run(taskqueue_swi_giant, &taskqueue_swi_giant->tq_running); 288242ed5d9SMatthew D Fleming TQ_UNLOCK(taskqueue_swi_giant); 2897874f606SScott Long } 2907874f606SScott Long 2910f92108dSScott Long int 2920f92108dSScott Long taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 2930f92108dSScott Long const char *name, ...) 2940f92108dSScott Long { 2950f92108dSScott Long va_list ap; 29675b773aeSSam Leffler struct thread *td; 297175611b6SSam Leffler struct taskqueue *tq; 29800537061SSam Leffler int i, error; 2995ca4819dSJohn Baldwin char ktname[MAXCOMLEN + 1]; 3000f92108dSScott Long 3010f92108dSScott Long if (count <= 0) 3020f92108dSScott Long return (EINVAL); 303175611b6SSam Leffler 3040f92108dSScott Long tq = *tqp; 3050f92108dSScott Long 3060f92108dSScott Long va_start(ap, name); 3075ca4819dSJohn Baldwin vsnprintf(ktname, sizeof(ktname), name, ap); 3080f92108dSScott Long va_end(ap); 3090f92108dSScott Long 310175611b6SSam Leffler tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 31100537061SSam Leffler M_NOWAIT | M_ZERO); 312175611b6SSam Leffler if (tq->tq_threads == NULL) { 31300537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 31400537061SSam Leffler return (ENOMEM); 31500537061SSam Leffler } 31600537061SSam Leffler 3170f92108dSScott Long for (i = 0; i < count; i++) { 3180f92108dSScott Long if (count == 1) 319175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 3201bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 3210f92108dSScott Long else 322175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 323175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 324175611b6SSam Leffler "%s_%d", ktname, i); 32575b773aeSSam Leffler if (error) { 32600537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 327175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 328175611b6SSam Leffler ktname, error); 329175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 33075b773aeSSam Leffler } else 331175611b6SSam Leffler tq->tq_tcount++; 33200537061SSam Leffler } 33375b773aeSSam Leffler for (i = 0; i < count; i++) { 334175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 33575b773aeSSam Leffler continue; 336175611b6SSam Leffler td = tq->tq_threads[i]; 337982d11f8SJeff Roberson thread_lock(td); 33875b773aeSSam Leffler sched_prio(td, pri); 339f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 340982d11f8SJeff Roberson thread_unlock(td); 3410f92108dSScott Long } 3420f92108dSScott Long 3430f92108dSScott Long return (0); 3440f92108dSScott Long } 3450f92108dSScott Long 346227559d1SJohn-Mark Gurney void 347227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 348cb32189eSKenneth D. Merry { 349227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 350242ed5d9SMatthew D Fleming struct task *running; 351242ed5d9SMatthew D Fleming 352242ed5d9SMatthew D Fleming /* 353242ed5d9SMatthew D Fleming * The kernel stack space is globaly addressable, and it would 354242ed5d9SMatthew D Fleming * be an error to ask whether a task is running after the 355242ed5d9SMatthew D Fleming * taskqueue has been released. So it is safe to have the 356242ed5d9SMatthew D Fleming * task point back to an address in the taskqueue's stack to 357242ed5d9SMatthew D Fleming * determine if the task is running. 358242ed5d9SMatthew D Fleming */ 359242ed5d9SMatthew D Fleming running = NULL; 360bd83e879SJohn Baldwin 361227559d1SJohn-Mark Gurney tqp = arg; 362227559d1SJohn-Mark Gurney tq = *tqp; 3639df1a6ddSScott Long TQ_LOCK(tq); 36424ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 365242ed5d9SMatthew D Fleming taskqueue_run(tq, &running); 3666a3b2893SPawel Jakub Dawidek /* 3676a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 3686a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 3696a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 3706a3b2893SPawel Jakub Dawidek */ 3716a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 3726a3b2893SPawel Jakub Dawidek break; 3730f180a7cSJohn Baldwin TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 374a1797ef6SAndrew Thompson } 37552bc746aSSam Leffler 37652bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 377175611b6SSam Leffler tq->tq_tcount--; 378175611b6SSam Leffler wakeup_one(tq->tq_threads); 3799df1a6ddSScott Long TQ_UNLOCK(tq); 38003c7442dSJohn Baldwin kthread_exit(); 381cb32189eSKenneth D. Merry } 382cb32189eSKenneth D. Merry 383227559d1SJohn-Mark Gurney void 384cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 385cb32189eSKenneth D. Merry { 386227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 387bd83e879SJohn Baldwin 388227559d1SJohn-Mark Gurney tqp = context; 389227559d1SJohn-Mark Gurney tq = *tqp; 390227559d1SJohn-Mark Gurney 391227559d1SJohn-Mark Gurney mtx_assert(&tq->tq_mutex, MA_OWNED); 39252bc746aSSam Leffler wakeup_one(tq); 393cb32189eSKenneth D. Merry } 394cb32189eSKenneth D. Merry 395d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 3967874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 3977874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 3987874f606SScott Long 399d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 4006caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 4017874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 402cb32189eSKenneth D. Merry 403227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 404f82c9e70SSam Leffler 4059df1a6ddSScott Long struct taskqueue * 4069df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 4070f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 4089df1a6ddSScott Long { 4090f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 4109df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 4119df1a6ddSScott Long } 4129df1a6ddSScott Long 4139df1a6ddSScott Long /* NB: for backwards compatibility */ 414f82c9e70SSam Leffler int 415f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 416f82c9e70SSam Leffler { 4179df1a6ddSScott Long return taskqueue_enqueue(queue, task); 418f82c9e70SSam Leffler } 419f82c9e70SSam Leffler 420f82c9e70SSam Leffler static void *taskqueue_fast_ih; 421f82c9e70SSam Leffler 422f82c9e70SSam Leffler static void 4239df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 424f82c9e70SSam Leffler { 425f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 426f82c9e70SSam Leffler } 427f82c9e70SSam Leffler 428f82c9e70SSam Leffler static void 429f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 430f82c9e70SSam Leffler { 431242ed5d9SMatthew D Fleming TQ_LOCK(taskqueue_fast); 432242ed5d9SMatthew D Fleming taskqueue_run(taskqueue_fast, &taskqueue_fast->tq_running); 433242ed5d9SMatthew D Fleming TQ_UNLOCK(taskqueue_fast); 434f82c9e70SSam Leffler } 435f82c9e70SSam Leffler 436d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 4379df1a6ddSScott Long swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 4389df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 439159ef108SPawel Jakub Dawidek 440159ef108SPawel Jakub Dawidek int 441159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 442159ef108SPawel Jakub Dawidek { 443159ef108SPawel Jakub Dawidek int i, j, ret = 0; 444159ef108SPawel Jakub Dawidek 445159ef108SPawel Jakub Dawidek TQ_LOCK(queue); 446159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 447159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 448159ef108SPawel Jakub Dawidek continue; 449159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 450159ef108SPawel Jakub Dawidek ret = 1; 451159ef108SPawel Jakub Dawidek break; 452159ef108SPawel Jakub Dawidek } 453159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 454159ef108SPawel Jakub Dawidek break; 455159ef108SPawel Jakub Dawidek } 456159ef108SPawel Jakub Dawidek TQ_UNLOCK(queue); 457159ef108SPawel Jakub Dawidek return (ret); 458159ef108SPawel Jakub Dawidek } 459