1ca2e0534SDoug Rabson /*- 2ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 3ca2e0534SDoug Rabson * All rights reserved. 4ca2e0534SDoug Rabson * 5ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 6ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 7ca2e0534SDoug Rabson * are met: 8ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 10ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 13ca2e0534SDoug Rabson * 14ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ca2e0534SDoug Rabson * SUCH DAMAGE. 25ca2e0534SDoug Rabson */ 26ca2e0534SDoug Rabson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 30ca2e0534SDoug Rabson #include <sys/param.h> 31ca2e0534SDoug Rabson #include <sys/systm.h> 321de1c550SJohn Baldwin #include <sys/bus.h> 33282873e2SJohn Baldwin #include <sys/interrupt.h> 34ca2e0534SDoug Rabson #include <sys/kernel.h> 35eb5b0e05SJohn Baldwin #include <sys/kthread.h> 361de1c550SJohn Baldwin #include <sys/lock.h> 37ca2e0534SDoug Rabson #include <sys/malloc.h> 381de1c550SJohn Baldwin #include <sys/mutex.h> 3952bc746aSSam Leffler #include <sys/proc.h> 400f92108dSScott Long #include <sys/sched.h> 411de1c550SJohn Baldwin #include <sys/taskqueue.h> 42cb32189eSKenneth D. Merry #include <sys/unistd.h> 430f92108dSScott Long #include <machine/stdarg.h> 44ca2e0534SDoug Rabson 45959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 467874f606SScott Long static void *taskqueue_giant_ih; 47eb5b0e05SJohn Baldwin static void *taskqueue_ih; 488088699fSJohn Baldwin 49bf73d4d2SMatthew D Fleming struct taskqueue_busy { 50bf73d4d2SMatthew D Fleming struct task *tb_running; 51bf73d4d2SMatthew D Fleming TAILQ_ENTRY(taskqueue_busy) tb_link; 52bf73d4d2SMatthew D Fleming }; 53bf73d4d2SMatthew D Fleming 54ca2e0534SDoug Rabson struct taskqueue { 55ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 56ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 57ca2e0534SDoug Rabson void *tq_context; 58bf73d4d2SMatthew D Fleming TAILQ_HEAD(, taskqueue_busy) tq_active; 591de1c550SJohn Baldwin struct mtx tq_mutex; 60175611b6SSam Leffler struct thread **tq_threads; 61175611b6SSam Leffler int tq_tcount; 62694382c8SKip Macy int tq_spin; 630f92108dSScott Long int tq_flags; 64*b2ad91f2SKonstantin Belousov int tq_callouts; 65ca2e0534SDoug Rabson }; 66ca2e0534SDoug Rabson 670f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 68478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 69478cfc73SScott Long #define TQ_FLAGS_PENDING (1 << 2) 700f92108dSScott Long 71*b2ad91f2SKonstantin Belousov #define DT_CALLOUT_ARMED (1 << 0) 72*b2ad91f2SKonstantin Belousov 73b79b28b6SJuli Mallett #define TQ_LOCK(tq) \ 74b79b28b6SJuli Mallett do { \ 75b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 76b79b28b6SJuli Mallett mtx_lock_spin(&(tq)->tq_mutex); \ 77b79b28b6SJuli Mallett else \ 78b79b28b6SJuli Mallett mtx_lock(&(tq)->tq_mutex); \ 79b79b28b6SJuli Mallett } while (0) 809df1a6ddSScott Long 81b79b28b6SJuli Mallett #define TQ_UNLOCK(tq) \ 82b79b28b6SJuli Mallett do { \ 83b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 84b79b28b6SJuli Mallett mtx_unlock_spin(&(tq)->tq_mutex); \ 85b79b28b6SJuli Mallett else \ 86b79b28b6SJuli Mallett mtx_unlock(&(tq)->tq_mutex); \ 87b79b28b6SJuli Mallett } while (0) 889df1a6ddSScott Long 89*b2ad91f2SKonstantin Belousov void 90*b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 91*b2ad91f2SKonstantin Belousov int priority, task_fn_t func, void *context) 92*b2ad91f2SKonstantin Belousov { 93*b2ad91f2SKonstantin Belousov 94*b2ad91f2SKonstantin Belousov TASK_INIT(&timeout_task->t, priority, func, context); 95*b2ad91f2SKonstantin Belousov callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0); 96*b2ad91f2SKonstantin Belousov timeout_task->q = queue; 97*b2ad91f2SKonstantin Belousov timeout_task->f = 0; 98*b2ad91f2SKonstantin Belousov } 99*b2ad91f2SKonstantin Belousov 1009df1a6ddSScott Long static __inline int 1019df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 1029df1a6ddSScott Long int t) 1039df1a6ddSScott Long { 104694382c8SKip Macy if (tq->tq_spin) 1059df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 1069df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 1079df1a6ddSScott Long } 1089df1a6ddSScott Long 1099df1a6ddSScott Long static struct taskqueue * 110706b0d31SAndriy Gapon _taskqueue_create(const char *name __unused, int mflags, 11152bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1129df1a6ddSScott Long int mtxflags, const char *mtxname) 113ca2e0534SDoug Rabson { 114ca2e0534SDoug Rabson struct taskqueue *queue; 115ca2e0534SDoug Rabson 1161de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 117ca2e0534SDoug Rabson if (!queue) 118d710cae7SWarner Losh return NULL; 119694382c8SKip Macy 120ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 121bf73d4d2SMatthew D Fleming TAILQ_INIT(&queue->tq_active); 122ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 123ca2e0534SDoug Rabson queue->tq_context = context; 124694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 125694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1269df1a6ddSScott Long mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 127ca2e0534SDoug Rabson 128ca2e0534SDoug Rabson return queue; 129ca2e0534SDoug Rabson } 130ca2e0534SDoug Rabson 1319df1a6ddSScott Long struct taskqueue * 1329df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1330f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1349df1a6ddSScott Long { 1350f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1369df1a6ddSScott Long MTX_DEF, "taskqueue"); 1379df1a6ddSScott Long } 1389df1a6ddSScott Long 13952bc746aSSam Leffler /* 14052bc746aSSam Leffler * Signal a taskqueue thread to terminate. 14152bc746aSSam Leffler */ 14252bc746aSSam Leffler static void 143175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 14452bc746aSSam Leffler { 14552bc746aSSam Leffler 146*b2ad91f2SKonstantin Belousov while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 1470f92108dSScott Long wakeup(tq); 1480f92108dSScott Long TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 14952bc746aSSam Leffler } 15052bc746aSSam Leffler } 15152bc746aSSam Leffler 152ca2e0534SDoug Rabson void 153ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 154ca2e0534SDoug Rabson { 1551de1c550SJohn Baldwin 1569df1a6ddSScott Long TQ_LOCK(queue); 1570f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 158175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 159bf73d4d2SMatthew D Fleming KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 160*b2ad91f2SKonstantin Belousov KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 1611de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 162175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 163ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 164ca2e0534SDoug Rabson } 165ca2e0534SDoug Rabson 166*b2ad91f2SKonstantin Belousov static int 167*b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 168ca2e0534SDoug Rabson { 169ca2e0534SDoug Rabson struct task *ins; 170ca2e0534SDoug Rabson struct task *prev; 171ca2e0534SDoug Rabson 172ca2e0534SDoug Rabson /* 173ca2e0534SDoug Rabson * Count multiple enqueues. 174ca2e0534SDoug Rabson */ 175694382c8SKip Macy if (task->ta_pending) { 176ca2e0534SDoug Rabson task->ta_pending++; 177*b2ad91f2SKonstantin Belousov return (0); 178ca2e0534SDoug Rabson } 179ca2e0534SDoug Rabson 180ca2e0534SDoug Rabson /* 181ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 182ca2e0534SDoug Rabson */ 18351b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 184ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 185ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 186ca2e0534SDoug Rabson } else { 187d710cae7SWarner Losh prev = NULL; 188ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 189ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 190ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 191ca2e0534SDoug Rabson break; 192ca2e0534SDoug Rabson 193ca2e0534SDoug Rabson if (prev) 194ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 195ca2e0534SDoug Rabson else 196ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 197ca2e0534SDoug Rabson } 198ca2e0534SDoug Rabson 199ca2e0534SDoug Rabson task->ta_pending = 1; 200694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 201ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 202694382c8SKip Macy else 203478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_PENDING; 204282873e2SJohn Baldwin 205*b2ad91f2SKonstantin Belousov return (0); 206*b2ad91f2SKonstantin Belousov } 207*b2ad91f2SKonstantin Belousov int 208*b2ad91f2SKonstantin Belousov taskqueue_enqueue(struct taskqueue *queue, struct task *task) 209*b2ad91f2SKonstantin Belousov { 210*b2ad91f2SKonstantin Belousov int res; 211*b2ad91f2SKonstantin Belousov 212*b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 213*b2ad91f2SKonstantin Belousov res = taskqueue_enqueue_locked(queue, task); 2149df1a6ddSScott Long TQ_UNLOCK(queue); 215282873e2SJohn Baldwin 216*b2ad91f2SKonstantin Belousov return (res); 217*b2ad91f2SKonstantin Belousov } 218*b2ad91f2SKonstantin Belousov 219*b2ad91f2SKonstantin Belousov static void 220*b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg) 221*b2ad91f2SKonstantin Belousov { 222*b2ad91f2SKonstantin Belousov struct taskqueue *queue; 223*b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task; 224*b2ad91f2SKonstantin Belousov 225*b2ad91f2SKonstantin Belousov timeout_task = arg; 226*b2ad91f2SKonstantin Belousov queue = timeout_task->q; 227*b2ad91f2SKonstantin Belousov KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 228*b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 229*b2ad91f2SKonstantin Belousov queue->tq_callouts--; 230*b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 231*b2ad91f2SKonstantin Belousov } 232*b2ad91f2SKonstantin Belousov 233*b2ad91f2SKonstantin Belousov int 234*b2ad91f2SKonstantin Belousov taskqueue_enqueue_timeout(struct taskqueue *queue, 235*b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, int ticks) 236*b2ad91f2SKonstantin Belousov { 237*b2ad91f2SKonstantin Belousov int res; 238*b2ad91f2SKonstantin Belousov 239*b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 240*b2ad91f2SKonstantin Belousov KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 241*b2ad91f2SKonstantin Belousov ("Migrated queue")); 242*b2ad91f2SKonstantin Belousov KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); 243*b2ad91f2SKonstantin Belousov timeout_task->q = queue; 244*b2ad91f2SKonstantin Belousov res = timeout_task->t.ta_pending; 245*b2ad91f2SKonstantin Belousov if (ticks == 0) { 246*b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(queue, &timeout_task->t); 247*b2ad91f2SKonstantin Belousov } else { 248*b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 249*b2ad91f2SKonstantin Belousov res++; 250*b2ad91f2SKonstantin Belousov } else { 251*b2ad91f2SKonstantin Belousov queue->tq_callouts++; 252*b2ad91f2SKonstantin Belousov timeout_task->f |= DT_CALLOUT_ARMED; 253*b2ad91f2SKonstantin Belousov } 254*b2ad91f2SKonstantin Belousov callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func, 255*b2ad91f2SKonstantin Belousov timeout_task); 256*b2ad91f2SKonstantin Belousov } 257*b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 258*b2ad91f2SKonstantin Belousov return (res); 259ca2e0534SDoug Rabson } 260ca2e0534SDoug Rabson 261ca2e0534SDoug Rabson void 262478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 263478cfc73SScott Long { 264478cfc73SScott Long 265478cfc73SScott Long TQ_LOCK(queue); 266478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 267478cfc73SScott Long TQ_UNLOCK(queue); 268478cfc73SScott Long } 269478cfc73SScott Long 270478cfc73SScott Long void 271478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 272478cfc73SScott Long { 273478cfc73SScott Long 274478cfc73SScott Long TQ_LOCK(queue); 275478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 276478cfc73SScott Long if (queue->tq_flags & TQ_FLAGS_PENDING) { 277478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_PENDING; 278478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 279478cfc73SScott Long } 280478cfc73SScott Long TQ_UNLOCK(queue); 281478cfc73SScott Long } 282478cfc73SScott Long 283bf73d4d2SMatthew D Fleming static void 284bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue) 285ca2e0534SDoug Rabson { 286bf73d4d2SMatthew D Fleming struct taskqueue_busy tb; 287033459c8SMatthew D Fleming struct task *task; 288242ed5d9SMatthew D Fleming int pending; 289ca2e0534SDoug Rabson 290242ed5d9SMatthew D Fleming mtx_assert(&queue->tq_mutex, MA_OWNED); 291bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 292bf73d4d2SMatthew D Fleming TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 293bf73d4d2SMatthew D Fleming 294ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 295ca2e0534SDoug Rabson /* 296ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 297ca2e0534SDoug Rabson * zero its pending count. 298ca2e0534SDoug Rabson */ 299ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 300ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 301ca2e0534SDoug Rabson pending = task->ta_pending; 302ca2e0534SDoug Rabson task->ta_pending = 0; 303bf73d4d2SMatthew D Fleming tb.tb_running = task; 3049df1a6ddSScott Long TQ_UNLOCK(queue); 305ca2e0534SDoug Rabson 306282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 307ca2e0534SDoug Rabson 3089df1a6ddSScott Long TQ_LOCK(queue); 309bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 31014889b42SWarner Losh wakeup(task); 311ca2e0534SDoug Rabson } 312bf73d4d2SMatthew D Fleming TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 313bf73d4d2SMatthew D Fleming } 314bf73d4d2SMatthew D Fleming 315bf73d4d2SMatthew D Fleming void 316bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue) 317bf73d4d2SMatthew D Fleming { 318bf73d4d2SMatthew D Fleming 319bf73d4d2SMatthew D Fleming TQ_LOCK(queue); 320bf73d4d2SMatthew D Fleming taskqueue_run_locked(queue); 321bf73d4d2SMatthew D Fleming TQ_UNLOCK(queue); 322bf73d4d2SMatthew D Fleming } 323bf73d4d2SMatthew D Fleming 324bf73d4d2SMatthew D Fleming static int 325bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task) 326bf73d4d2SMatthew D Fleming { 327bf73d4d2SMatthew D Fleming struct taskqueue_busy *tb; 328bf73d4d2SMatthew D Fleming 329bf73d4d2SMatthew D Fleming mtx_assert(&queue->tq_mutex, MA_OWNED); 330bf73d4d2SMatthew D Fleming TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 331bf73d4d2SMatthew D Fleming if (tb->tb_running == task) 332bf73d4d2SMatthew D Fleming return (1); 333bf73d4d2SMatthew D Fleming } 334bf73d4d2SMatthew D Fleming return (0); 335ca2e0534SDoug Rabson } 336ca2e0534SDoug Rabson 337*b2ad91f2SKonstantin Belousov static int 338*b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 339*b2ad91f2SKonstantin Belousov u_int *pendp) 340*b2ad91f2SKonstantin Belousov { 341*b2ad91f2SKonstantin Belousov 342*b2ad91f2SKonstantin Belousov if (task->ta_pending > 0) 343*b2ad91f2SKonstantin Belousov STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 344*b2ad91f2SKonstantin Belousov if (pendp != NULL) 345*b2ad91f2SKonstantin Belousov *pendp = task->ta_pending; 346*b2ad91f2SKonstantin Belousov task->ta_pending = 0; 347*b2ad91f2SKonstantin Belousov return (task_is_running(queue, task) ? EBUSY : 0); 348*b2ad91f2SKonstantin Belousov } 349*b2ad91f2SKonstantin Belousov 350f46276a9SMatthew D Fleming int 351f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 352f46276a9SMatthew D Fleming { 353f46276a9SMatthew D Fleming u_int pending; 354f46276a9SMatthew D Fleming int error; 355f46276a9SMatthew D Fleming 356f46276a9SMatthew D Fleming TQ_LOCK(queue); 357*b2ad91f2SKonstantin Belousov pending = task->ta_pending; 358*b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, task, pendp); 359*b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 360*b2ad91f2SKonstantin Belousov 361*b2ad91f2SKonstantin Belousov return (error); 362*b2ad91f2SKonstantin Belousov } 363*b2ad91f2SKonstantin Belousov 364*b2ad91f2SKonstantin Belousov int 365*b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue, 366*b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, u_int *pendp) 367*b2ad91f2SKonstantin Belousov { 368*b2ad91f2SKonstantin Belousov u_int pending, pending1; 369*b2ad91f2SKonstantin Belousov int error; 370*b2ad91f2SKonstantin Belousov 371*b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 372*b2ad91f2SKonstantin Belousov pending = !!callout_stop(&timeout_task->c); 373*b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 374*b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 375*b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 376*b2ad91f2SKonstantin Belousov queue->tq_callouts--; 377*b2ad91f2SKonstantin Belousov } 378f46276a9SMatthew D Fleming TQ_UNLOCK(queue); 379f46276a9SMatthew D Fleming 380f46276a9SMatthew D Fleming if (pendp != NULL) 381*b2ad91f2SKonstantin Belousov *pendp = pending + pending1; 382f46276a9SMatthew D Fleming return (error); 383f46276a9SMatthew D Fleming } 384f46276a9SMatthew D Fleming 38514889b42SWarner Losh void 38614889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 38714889b42SWarner Losh { 3883d336cd0SPawel Jakub Dawidek 3893d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 3909df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 39152bc746aSSam Leffler 3923d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 393bf73d4d2SMatthew D Fleming while (task->ta_pending != 0 || task_is_running(queue, task)) 3943d336cd0SPawel Jakub Dawidek TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 3953d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 3969df1a6ddSScott Long } 39714889b42SWarner Losh 398*b2ad91f2SKonstantin Belousov void 399*b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue, 400*b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task) 401*b2ad91f2SKonstantin Belousov { 402*b2ad91f2SKonstantin Belousov 403*b2ad91f2SKonstantin Belousov callout_drain(&timeout_task->c); 404*b2ad91f2SKonstantin Belousov taskqueue_drain(queue, &timeout_task->t); 405*b2ad91f2SKonstantin Belousov } 406*b2ad91f2SKonstantin Belousov 407ca2e0534SDoug Rabson static void 408ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 409ca2e0534SDoug Rabson { 410c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 411ca2e0534SDoug Rabson } 412ca2e0534SDoug Rabson 413ca2e0534SDoug Rabson static void 4148088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 415ca2e0534SDoug Rabson { 416bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi); 417ca2e0534SDoug Rabson } 418ca2e0534SDoug Rabson 4197874f606SScott Long static void 4207874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 4217874f606SScott Long { 4227874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 4237874f606SScott Long } 4247874f606SScott Long 4257874f606SScott Long static void 4267874f606SScott Long taskqueue_swi_giant_run(void *dummy) 4277874f606SScott Long { 428bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi_giant); 4297874f606SScott Long } 4307874f606SScott Long 4310f92108dSScott Long int 4320f92108dSScott Long taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 4330f92108dSScott Long const char *name, ...) 4340f92108dSScott Long { 4350f92108dSScott Long va_list ap; 43675b773aeSSam Leffler struct thread *td; 437175611b6SSam Leffler struct taskqueue *tq; 43800537061SSam Leffler int i, error; 4395ca4819dSJohn Baldwin char ktname[MAXCOMLEN + 1]; 4400f92108dSScott Long 4410f92108dSScott Long if (count <= 0) 4420f92108dSScott Long return (EINVAL); 443175611b6SSam Leffler 4440f92108dSScott Long tq = *tqp; 4450f92108dSScott Long 4460f92108dSScott Long va_start(ap, name); 4475ca4819dSJohn Baldwin vsnprintf(ktname, sizeof(ktname), name, ap); 4480f92108dSScott Long va_end(ap); 4490f92108dSScott Long 450175611b6SSam Leffler tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 45100537061SSam Leffler M_NOWAIT | M_ZERO); 452175611b6SSam Leffler if (tq->tq_threads == NULL) { 45300537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 45400537061SSam Leffler return (ENOMEM); 45500537061SSam Leffler } 45600537061SSam Leffler 4570f92108dSScott Long for (i = 0; i < count; i++) { 4580f92108dSScott Long if (count == 1) 459175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 4601bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 4610f92108dSScott Long else 462175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 463175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 464175611b6SSam Leffler "%s_%d", ktname, i); 46575b773aeSSam Leffler if (error) { 46600537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 467175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 468175611b6SSam Leffler ktname, error); 469175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 47075b773aeSSam Leffler } else 471175611b6SSam Leffler tq->tq_tcount++; 47200537061SSam Leffler } 47375b773aeSSam Leffler for (i = 0; i < count; i++) { 474175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 47575b773aeSSam Leffler continue; 476175611b6SSam Leffler td = tq->tq_threads[i]; 477982d11f8SJeff Roberson thread_lock(td); 47875b773aeSSam Leffler sched_prio(td, pri); 479f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 480982d11f8SJeff Roberson thread_unlock(td); 4810f92108dSScott Long } 4820f92108dSScott Long 4830f92108dSScott Long return (0); 4840f92108dSScott Long } 4850f92108dSScott Long 486227559d1SJohn-Mark Gurney void 487227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 488cb32189eSKenneth D. Merry { 489227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 490bd83e879SJohn Baldwin 491227559d1SJohn-Mark Gurney tqp = arg; 492227559d1SJohn-Mark Gurney tq = *tqp; 4939df1a6ddSScott Long TQ_LOCK(tq); 49424ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 495bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 4966a3b2893SPawel Jakub Dawidek /* 4976a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 4986a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 4996a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 5006a3b2893SPawel Jakub Dawidek */ 5016a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 5026a3b2893SPawel Jakub Dawidek break; 5030f180a7cSJohn Baldwin TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 504a1797ef6SAndrew Thompson } 505bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 50652bc746aSSam Leffler 50752bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 508175611b6SSam Leffler tq->tq_tcount--; 509175611b6SSam Leffler wakeup_one(tq->tq_threads); 5109df1a6ddSScott Long TQ_UNLOCK(tq); 51103c7442dSJohn Baldwin kthread_exit(); 512cb32189eSKenneth D. Merry } 513cb32189eSKenneth D. Merry 514227559d1SJohn-Mark Gurney void 515cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 516cb32189eSKenneth D. Merry { 517227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 518bd83e879SJohn Baldwin 519227559d1SJohn-Mark Gurney tqp = context; 520227559d1SJohn-Mark Gurney tq = *tqp; 521227559d1SJohn-Mark Gurney 522227559d1SJohn-Mark Gurney mtx_assert(&tq->tq_mutex, MA_OWNED); 52352bc746aSSam Leffler wakeup_one(tq); 524cb32189eSKenneth D. Merry } 525cb32189eSKenneth D. Merry 526d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 5277874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 5287874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 5297874f606SScott Long 530d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 5316caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 5327874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 533cb32189eSKenneth D. Merry 534227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 535f82c9e70SSam Leffler 5369df1a6ddSScott Long struct taskqueue * 5379df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 5380f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 5399df1a6ddSScott Long { 5400f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 5419df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 5429df1a6ddSScott Long } 5439df1a6ddSScott Long 5449df1a6ddSScott Long /* NB: for backwards compatibility */ 545f82c9e70SSam Leffler int 546f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 547f82c9e70SSam Leffler { 5489df1a6ddSScott Long return taskqueue_enqueue(queue, task); 549f82c9e70SSam Leffler } 550f82c9e70SSam Leffler 551f82c9e70SSam Leffler static void *taskqueue_fast_ih; 552f82c9e70SSam Leffler 553f82c9e70SSam Leffler static void 5549df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 555f82c9e70SSam Leffler { 556f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 557f82c9e70SSam Leffler } 558f82c9e70SSam Leffler 559f82c9e70SSam Leffler static void 560f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 561f82c9e70SSam Leffler { 562bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_fast); 563f82c9e70SSam Leffler } 564f82c9e70SSam Leffler 565d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 5669df1a6ddSScott Long swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 5679df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 568159ef108SPawel Jakub Dawidek 569159ef108SPawel Jakub Dawidek int 570159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 571159ef108SPawel Jakub Dawidek { 572159ef108SPawel Jakub Dawidek int i, j, ret = 0; 573159ef108SPawel Jakub Dawidek 574159ef108SPawel Jakub Dawidek TQ_LOCK(queue); 575159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 576159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 577159ef108SPawel Jakub Dawidek continue; 578159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 579159ef108SPawel Jakub Dawidek ret = 1; 580159ef108SPawel Jakub Dawidek break; 581159ef108SPawel Jakub Dawidek } 582159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 583159ef108SPawel Jakub Dawidek break; 584159ef108SPawel Jakub Dawidek } 585159ef108SPawel Jakub Dawidek TQ_UNLOCK(queue); 586159ef108SPawel Jakub Dawidek return (ret); 587159ef108SPawel Jakub Dawidek } 588