1ca2e0534SDoug Rabson /*- 2ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 3ca2e0534SDoug Rabson * All rights reserved. 4ca2e0534SDoug Rabson * 5ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 6ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 7ca2e0534SDoug Rabson * are met: 8ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 10ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 13ca2e0534SDoug Rabson * 14ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ca2e0534SDoug Rabson * SUCH DAMAGE. 25ca2e0534SDoug Rabson */ 26ca2e0534SDoug Rabson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 30ca2e0534SDoug Rabson #include <sys/param.h> 31ca2e0534SDoug Rabson #include <sys/systm.h> 321de1c550SJohn Baldwin #include <sys/bus.h> 33282873e2SJohn Baldwin #include <sys/interrupt.h> 34ca2e0534SDoug Rabson #include <sys/kernel.h> 35eb5b0e05SJohn Baldwin #include <sys/kthread.h> 361de1c550SJohn Baldwin #include <sys/lock.h> 37ca2e0534SDoug Rabson #include <sys/malloc.h> 381de1c550SJohn Baldwin #include <sys/mutex.h> 3952bc746aSSam Leffler #include <sys/proc.h> 400f92108dSScott Long #include <sys/sched.h> 411de1c550SJohn Baldwin #include <sys/taskqueue.h> 42cb32189eSKenneth D. Merry #include <sys/unistd.h> 430f92108dSScott Long #include <machine/stdarg.h> 44ca2e0534SDoug Rabson 45959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 467874f606SScott Long static void *taskqueue_giant_ih; 47eb5b0e05SJohn Baldwin static void *taskqueue_ih; 488088699fSJohn Baldwin 49bf73d4d2SMatthew D Fleming struct taskqueue_busy { 50bf73d4d2SMatthew D Fleming struct task *tb_running; 51bf73d4d2SMatthew D Fleming TAILQ_ENTRY(taskqueue_busy) tb_link; 52bf73d4d2SMatthew D Fleming }; 53bf73d4d2SMatthew D Fleming 54ca2e0534SDoug Rabson struct taskqueue { 55ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 56ca2e0534SDoug Rabson const char *tq_name; 57ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 58ca2e0534SDoug Rabson void *tq_context; 59bf73d4d2SMatthew D Fleming TAILQ_HEAD(, taskqueue_busy) tq_active; 601de1c550SJohn Baldwin struct mtx tq_mutex; 61175611b6SSam Leffler struct thread **tq_threads; 62175611b6SSam Leffler int tq_tcount; 63694382c8SKip Macy int tq_spin; 640f92108dSScott Long int tq_flags; 65ca2e0534SDoug Rabson }; 66ca2e0534SDoug Rabson 670f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 68478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 69478cfc73SScott Long #define TQ_FLAGS_PENDING (1 << 2) 700f92108dSScott Long 71*b79b28b6SJuli Mallett #define TQ_LOCK(tq) \ 72*b79b28b6SJuli Mallett do { \ 73*b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 74*b79b28b6SJuli Mallett mtx_lock_spin(&(tq)->tq_mutex); \ 75*b79b28b6SJuli Mallett else \ 76*b79b28b6SJuli Mallett mtx_lock(&(tq)->tq_mutex); \ 77*b79b28b6SJuli Mallett } while (0) 789df1a6ddSScott Long 79*b79b28b6SJuli Mallett #define TQ_UNLOCK(tq) \ 80*b79b28b6SJuli Mallett do { \ 81*b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 82*b79b28b6SJuli Mallett mtx_unlock_spin(&(tq)->tq_mutex); \ 83*b79b28b6SJuli Mallett else \ 84*b79b28b6SJuli Mallett mtx_unlock(&(tq)->tq_mutex); \ 85*b79b28b6SJuli Mallett } while (0) 869df1a6ddSScott Long 879df1a6ddSScott Long static __inline int 889df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 899df1a6ddSScott Long int t) 909df1a6ddSScott Long { 91694382c8SKip Macy if (tq->tq_spin) 929df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 939df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 949df1a6ddSScott Long } 959df1a6ddSScott Long 969df1a6ddSScott Long static struct taskqueue * 979df1a6ddSScott Long _taskqueue_create(const char *name, int mflags, 9852bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 999df1a6ddSScott Long int mtxflags, const char *mtxname) 100ca2e0534SDoug Rabson { 101ca2e0534SDoug Rabson struct taskqueue *queue; 102ca2e0534SDoug Rabson 1031de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 104ca2e0534SDoug Rabson if (!queue) 105d710cae7SWarner Losh return NULL; 106694382c8SKip Macy 107ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 108bf73d4d2SMatthew D Fleming TAILQ_INIT(&queue->tq_active); 109ca2e0534SDoug Rabson queue->tq_name = name; 110ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 111ca2e0534SDoug Rabson queue->tq_context = context; 112694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 113694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1149df1a6ddSScott Long mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 115ca2e0534SDoug Rabson 116ca2e0534SDoug Rabson return queue; 117ca2e0534SDoug Rabson } 118ca2e0534SDoug Rabson 1199df1a6ddSScott Long struct taskqueue * 1209df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1210f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1229df1a6ddSScott Long { 1230f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1249df1a6ddSScott Long MTX_DEF, "taskqueue"); 1259df1a6ddSScott Long } 1269df1a6ddSScott Long 12752bc746aSSam Leffler /* 12852bc746aSSam Leffler * Signal a taskqueue thread to terminate. 12952bc746aSSam Leffler */ 13052bc746aSSam Leffler static void 131175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 13252bc746aSSam Leffler { 13352bc746aSSam Leffler 134175611b6SSam Leffler while (tq->tq_tcount > 0) { 1350f92108dSScott Long wakeup(tq); 1360f92108dSScott Long TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 13752bc746aSSam Leffler } 13852bc746aSSam Leffler } 13952bc746aSSam Leffler 140ca2e0534SDoug Rabson void 141ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 142ca2e0534SDoug Rabson { 1431de1c550SJohn Baldwin 1449df1a6ddSScott Long TQ_LOCK(queue); 1450f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 146175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 147bf73d4d2SMatthew D Fleming KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 1481de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 149175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 150ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 151ca2e0534SDoug Rabson } 152ca2e0534SDoug Rabson 153ca2e0534SDoug Rabson int 154ca2e0534SDoug Rabson taskqueue_enqueue(struct taskqueue *queue, struct task *task) 155ca2e0534SDoug Rabson { 156ca2e0534SDoug Rabson struct task *ins; 157ca2e0534SDoug Rabson struct task *prev; 158ca2e0534SDoug Rabson 1599df1a6ddSScott Long TQ_LOCK(queue); 160282873e2SJohn Baldwin 161ca2e0534SDoug Rabson /* 162ca2e0534SDoug Rabson * Count multiple enqueues. 163ca2e0534SDoug Rabson */ 164694382c8SKip Macy if (task->ta_pending) { 165ca2e0534SDoug Rabson task->ta_pending++; 1669df1a6ddSScott Long TQ_UNLOCK(queue); 167ca2e0534SDoug Rabson return 0; 168ca2e0534SDoug Rabson } 169ca2e0534SDoug Rabson 170ca2e0534SDoug Rabson /* 171ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 172ca2e0534SDoug Rabson */ 17351b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 174ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 175ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 176ca2e0534SDoug Rabson } else { 177d710cae7SWarner Losh prev = NULL; 178ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 179ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 180ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 181ca2e0534SDoug Rabson break; 182ca2e0534SDoug Rabson 183ca2e0534SDoug Rabson if (prev) 184ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 185ca2e0534SDoug Rabson else 186ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 187ca2e0534SDoug Rabson } 188ca2e0534SDoug Rabson 189ca2e0534SDoug Rabson task->ta_pending = 1; 190694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 191ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 192694382c8SKip Macy else 193478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_PENDING; 194282873e2SJohn Baldwin 1959df1a6ddSScott Long TQ_UNLOCK(queue); 196282873e2SJohn Baldwin 197ca2e0534SDoug Rabson return 0; 198ca2e0534SDoug Rabson } 199ca2e0534SDoug Rabson 200ca2e0534SDoug Rabson void 201478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 202478cfc73SScott Long { 203478cfc73SScott Long 204478cfc73SScott Long TQ_LOCK(queue); 205478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 206478cfc73SScott Long TQ_UNLOCK(queue); 207478cfc73SScott Long } 208478cfc73SScott Long 209478cfc73SScott Long void 210478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 211478cfc73SScott Long { 212478cfc73SScott Long 213478cfc73SScott Long TQ_LOCK(queue); 214478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 215478cfc73SScott Long if (queue->tq_flags & TQ_FLAGS_PENDING) { 216478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_PENDING; 217478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 218478cfc73SScott Long } 219478cfc73SScott Long TQ_UNLOCK(queue); 220478cfc73SScott Long } 221478cfc73SScott Long 222bf73d4d2SMatthew D Fleming static void 223bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue) 224ca2e0534SDoug Rabson { 225bf73d4d2SMatthew D Fleming struct taskqueue_busy tb; 226033459c8SMatthew D Fleming struct task *task; 227242ed5d9SMatthew D Fleming int pending; 228ca2e0534SDoug Rabson 229242ed5d9SMatthew D Fleming mtx_assert(&queue->tq_mutex, MA_OWNED); 230bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 231bf73d4d2SMatthew D Fleming TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 232bf73d4d2SMatthew D Fleming 233ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 234ca2e0534SDoug Rabson /* 235ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 236ca2e0534SDoug Rabson * zero its pending count. 237ca2e0534SDoug Rabson */ 238ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 239ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 240ca2e0534SDoug Rabson pending = task->ta_pending; 241ca2e0534SDoug Rabson task->ta_pending = 0; 242bf73d4d2SMatthew D Fleming tb.tb_running = task; 2439df1a6ddSScott Long TQ_UNLOCK(queue); 244ca2e0534SDoug Rabson 245282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 246ca2e0534SDoug Rabson 2479df1a6ddSScott Long TQ_LOCK(queue); 248bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 24914889b42SWarner Losh wakeup(task); 250ca2e0534SDoug Rabson } 251bf73d4d2SMatthew D Fleming TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 252bf73d4d2SMatthew D Fleming } 253bf73d4d2SMatthew D Fleming 254bf73d4d2SMatthew D Fleming void 255bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue) 256bf73d4d2SMatthew D Fleming { 257bf73d4d2SMatthew D Fleming 258bf73d4d2SMatthew D Fleming TQ_LOCK(queue); 259bf73d4d2SMatthew D Fleming taskqueue_run_locked(queue); 260bf73d4d2SMatthew D Fleming TQ_UNLOCK(queue); 261bf73d4d2SMatthew D Fleming } 262bf73d4d2SMatthew D Fleming 263bf73d4d2SMatthew D Fleming static int 264bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task) 265bf73d4d2SMatthew D Fleming { 266bf73d4d2SMatthew D Fleming struct taskqueue_busy *tb; 267bf73d4d2SMatthew D Fleming 268bf73d4d2SMatthew D Fleming mtx_assert(&queue->tq_mutex, MA_OWNED); 269bf73d4d2SMatthew D Fleming TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 270bf73d4d2SMatthew D Fleming if (tb->tb_running == task) 271bf73d4d2SMatthew D Fleming return (1); 272bf73d4d2SMatthew D Fleming } 273bf73d4d2SMatthew D Fleming return (0); 274ca2e0534SDoug Rabson } 275ca2e0534SDoug Rabson 276f46276a9SMatthew D Fleming int 277f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 278f46276a9SMatthew D Fleming { 279f46276a9SMatthew D Fleming u_int pending; 280f46276a9SMatthew D Fleming int error; 281f46276a9SMatthew D Fleming 282f46276a9SMatthew D Fleming TQ_LOCK(queue); 283f46276a9SMatthew D Fleming if ((pending = task->ta_pending) > 0) 284f46276a9SMatthew D Fleming STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 285f46276a9SMatthew D Fleming task->ta_pending = 0; 286f46276a9SMatthew D Fleming error = task_is_running(queue, task) ? EBUSY : 0; 287f46276a9SMatthew D Fleming TQ_UNLOCK(queue); 288f46276a9SMatthew D Fleming 289f46276a9SMatthew D Fleming if (pendp != NULL) 290f46276a9SMatthew D Fleming *pendp = pending; 291f46276a9SMatthew D Fleming return (error); 292f46276a9SMatthew D Fleming } 293f46276a9SMatthew D Fleming 29414889b42SWarner Losh void 29514889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 29614889b42SWarner Losh { 2973d336cd0SPawel Jakub Dawidek 2983d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 2999df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 30052bc746aSSam Leffler 3013d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 302bf73d4d2SMatthew D Fleming while (task->ta_pending != 0 || task_is_running(queue, task)) 3033d336cd0SPawel Jakub Dawidek TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 3043d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 3059df1a6ddSScott Long } 30614889b42SWarner Losh 307ca2e0534SDoug Rabson static void 308ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 309ca2e0534SDoug Rabson { 310c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 311ca2e0534SDoug Rabson } 312ca2e0534SDoug Rabson 313ca2e0534SDoug Rabson static void 3148088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 315ca2e0534SDoug Rabson { 316bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi); 317ca2e0534SDoug Rabson } 318ca2e0534SDoug Rabson 3197874f606SScott Long static void 3207874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 3217874f606SScott Long { 3227874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 3237874f606SScott Long } 3247874f606SScott Long 3257874f606SScott Long static void 3267874f606SScott Long taskqueue_swi_giant_run(void *dummy) 3277874f606SScott Long { 328bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi_giant); 3297874f606SScott Long } 3307874f606SScott Long 3310f92108dSScott Long int 3320f92108dSScott Long taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 3330f92108dSScott Long const char *name, ...) 3340f92108dSScott Long { 3350f92108dSScott Long va_list ap; 33675b773aeSSam Leffler struct thread *td; 337175611b6SSam Leffler struct taskqueue *tq; 33800537061SSam Leffler int i, error; 3395ca4819dSJohn Baldwin char ktname[MAXCOMLEN + 1]; 3400f92108dSScott Long 3410f92108dSScott Long if (count <= 0) 3420f92108dSScott Long return (EINVAL); 343175611b6SSam Leffler 3440f92108dSScott Long tq = *tqp; 3450f92108dSScott Long 3460f92108dSScott Long va_start(ap, name); 3475ca4819dSJohn Baldwin vsnprintf(ktname, sizeof(ktname), name, ap); 3480f92108dSScott Long va_end(ap); 3490f92108dSScott Long 350175611b6SSam Leffler tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 35100537061SSam Leffler M_NOWAIT | M_ZERO); 352175611b6SSam Leffler if (tq->tq_threads == NULL) { 35300537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 35400537061SSam Leffler return (ENOMEM); 35500537061SSam Leffler } 35600537061SSam Leffler 3570f92108dSScott Long for (i = 0; i < count; i++) { 3580f92108dSScott Long if (count == 1) 359175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 3601bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 3610f92108dSScott Long else 362175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 363175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 364175611b6SSam Leffler "%s_%d", ktname, i); 36575b773aeSSam Leffler if (error) { 36600537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 367175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 368175611b6SSam Leffler ktname, error); 369175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 37075b773aeSSam Leffler } else 371175611b6SSam Leffler tq->tq_tcount++; 37200537061SSam Leffler } 37375b773aeSSam Leffler for (i = 0; i < count; i++) { 374175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 37575b773aeSSam Leffler continue; 376175611b6SSam Leffler td = tq->tq_threads[i]; 377982d11f8SJeff Roberson thread_lock(td); 37875b773aeSSam Leffler sched_prio(td, pri); 379f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 380982d11f8SJeff Roberson thread_unlock(td); 3810f92108dSScott Long } 3820f92108dSScott Long 3830f92108dSScott Long return (0); 3840f92108dSScott Long } 3850f92108dSScott Long 386227559d1SJohn-Mark Gurney void 387227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 388cb32189eSKenneth D. Merry { 389227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 390bd83e879SJohn Baldwin 391227559d1SJohn-Mark Gurney tqp = arg; 392227559d1SJohn-Mark Gurney tq = *tqp; 3939df1a6ddSScott Long TQ_LOCK(tq); 39424ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 395bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 3966a3b2893SPawel Jakub Dawidek /* 3976a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 3986a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 3996a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 4006a3b2893SPawel Jakub Dawidek */ 4016a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 4026a3b2893SPawel Jakub Dawidek break; 4030f180a7cSJohn Baldwin TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 404a1797ef6SAndrew Thompson } 405bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 40652bc746aSSam Leffler 40752bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 408175611b6SSam Leffler tq->tq_tcount--; 409175611b6SSam Leffler wakeup_one(tq->tq_threads); 4109df1a6ddSScott Long TQ_UNLOCK(tq); 41103c7442dSJohn Baldwin kthread_exit(); 412cb32189eSKenneth D. Merry } 413cb32189eSKenneth D. Merry 414227559d1SJohn-Mark Gurney void 415cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 416cb32189eSKenneth D. Merry { 417227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 418bd83e879SJohn Baldwin 419227559d1SJohn-Mark Gurney tqp = context; 420227559d1SJohn-Mark Gurney tq = *tqp; 421227559d1SJohn-Mark Gurney 422227559d1SJohn-Mark Gurney mtx_assert(&tq->tq_mutex, MA_OWNED); 42352bc746aSSam Leffler wakeup_one(tq); 424cb32189eSKenneth D. Merry } 425cb32189eSKenneth D. Merry 426d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 4277874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 4287874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 4297874f606SScott Long 430d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 4316caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 4327874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 433cb32189eSKenneth D. Merry 434227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 435f82c9e70SSam Leffler 4369df1a6ddSScott Long struct taskqueue * 4379df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 4380f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 4399df1a6ddSScott Long { 4400f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 4419df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 4429df1a6ddSScott Long } 4439df1a6ddSScott Long 4449df1a6ddSScott Long /* NB: for backwards compatibility */ 445f82c9e70SSam Leffler int 446f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 447f82c9e70SSam Leffler { 4489df1a6ddSScott Long return taskqueue_enqueue(queue, task); 449f82c9e70SSam Leffler } 450f82c9e70SSam Leffler 451f82c9e70SSam Leffler static void *taskqueue_fast_ih; 452f82c9e70SSam Leffler 453f82c9e70SSam Leffler static void 4549df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 455f82c9e70SSam Leffler { 456f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 457f82c9e70SSam Leffler } 458f82c9e70SSam Leffler 459f82c9e70SSam Leffler static void 460f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 461f82c9e70SSam Leffler { 462bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_fast); 463f82c9e70SSam Leffler } 464f82c9e70SSam Leffler 465d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 4669df1a6ddSScott Long swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 4679df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 468159ef108SPawel Jakub Dawidek 469159ef108SPawel Jakub Dawidek int 470159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 471159ef108SPawel Jakub Dawidek { 472159ef108SPawel Jakub Dawidek int i, j, ret = 0; 473159ef108SPawel Jakub Dawidek 474159ef108SPawel Jakub Dawidek TQ_LOCK(queue); 475159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 476159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 477159ef108SPawel Jakub Dawidek continue; 478159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 479159ef108SPawel Jakub Dawidek ret = 1; 480159ef108SPawel Jakub Dawidek break; 481159ef108SPawel Jakub Dawidek } 482159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 483159ef108SPawel Jakub Dawidek break; 484159ef108SPawel Jakub Dawidek } 485159ef108SPawel Jakub Dawidek TQ_UNLOCK(queue); 486159ef108SPawel Jakub Dawidek return (ret); 487159ef108SPawel Jakub Dawidek } 488