1ca2e0534SDoug Rabson /*- 2ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 3ca2e0534SDoug Rabson * All rights reserved. 4ca2e0534SDoug Rabson * 5ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 6ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 7ca2e0534SDoug Rabson * are met: 8ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 9ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 10ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 12ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 13ca2e0534SDoug Rabson * 14ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24ca2e0534SDoug Rabson * SUCH DAMAGE. 25ca2e0534SDoug Rabson */ 26ca2e0534SDoug Rabson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 30ca2e0534SDoug Rabson #include <sys/param.h> 31ca2e0534SDoug Rabson #include <sys/systm.h> 321de1c550SJohn Baldwin #include <sys/bus.h> 33282873e2SJohn Baldwin #include <sys/interrupt.h> 34ca2e0534SDoug Rabson #include <sys/kernel.h> 35eb5b0e05SJohn Baldwin #include <sys/kthread.h> 36d2849f27SAdrian Chadd #include <sys/limits.h> 371de1c550SJohn Baldwin #include <sys/lock.h> 38ca2e0534SDoug Rabson #include <sys/malloc.h> 391de1c550SJohn Baldwin #include <sys/mutex.h> 4052bc746aSSam Leffler #include <sys/proc.h> 410f92108dSScott Long #include <sys/sched.h> 421de1c550SJohn Baldwin #include <sys/taskqueue.h> 43cb32189eSKenneth D. Merry #include <sys/unistd.h> 440f92108dSScott Long #include <machine/stdarg.h> 45348298b1SGleb Smirnoff #include <net/vnet.h> 46ca2e0534SDoug Rabson 47959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 487874f606SScott Long static void *taskqueue_giant_ih; 49eb5b0e05SJohn Baldwin static void *taskqueue_ih; 50*6d545f4cSAlexander Motin static void taskqueue_fast_enqueue(void *); 51*6d545f4cSAlexander Motin static void taskqueue_swi_enqueue(void *); 52*6d545f4cSAlexander Motin static void taskqueue_swi_giant_enqueue(void *); 538088699fSJohn Baldwin 54bf73d4d2SMatthew D Fleming struct taskqueue_busy { 55bf73d4d2SMatthew D Fleming struct task *tb_running; 56bf73d4d2SMatthew D Fleming TAILQ_ENTRY(taskqueue_busy) tb_link; 57bf73d4d2SMatthew D Fleming }; 58bf73d4d2SMatthew D Fleming 59ca2e0534SDoug Rabson struct taskqueue { 60ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 61ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 62ca2e0534SDoug Rabson void *tq_context; 63bf73d4d2SMatthew D Fleming TAILQ_HEAD(, taskqueue_busy) tq_active; 641de1c550SJohn Baldwin struct mtx tq_mutex; 65175611b6SSam Leffler struct thread **tq_threads; 66175611b6SSam Leffler int tq_tcount; 67694382c8SKip Macy int tq_spin; 680f92108dSScott Long int tq_flags; 69b2ad91f2SKonstantin Belousov int tq_callouts; 70fdbc7174SWill Andrews taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; 71fdbc7174SWill Andrews void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; 72ca2e0534SDoug Rabson }; 73ca2e0534SDoug Rabson 740f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 75478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 76*6d545f4cSAlexander Motin #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) 770f92108dSScott Long 78b2ad91f2SKonstantin Belousov #define DT_CALLOUT_ARMED (1 << 0) 79b2ad91f2SKonstantin Belousov 80b79b28b6SJuli Mallett #define TQ_LOCK(tq) \ 81b79b28b6SJuli Mallett do { \ 82b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 83b79b28b6SJuli Mallett mtx_lock_spin(&(tq)->tq_mutex); \ 84b79b28b6SJuli Mallett else \ 85b79b28b6SJuli Mallett mtx_lock(&(tq)->tq_mutex); \ 86b79b28b6SJuli Mallett } while (0) 87fdbc7174SWill Andrews #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) 889df1a6ddSScott Long 89b79b28b6SJuli Mallett #define TQ_UNLOCK(tq) \ 90b79b28b6SJuli Mallett do { \ 91b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 92b79b28b6SJuli Mallett mtx_unlock_spin(&(tq)->tq_mutex); \ 93b79b28b6SJuli Mallett else \ 94b79b28b6SJuli Mallett mtx_unlock(&(tq)->tq_mutex); \ 95b79b28b6SJuli Mallett } while (0) 96fdbc7174SWill Andrews #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) 979df1a6ddSScott Long 98b2ad91f2SKonstantin Belousov void 99b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 100b2ad91f2SKonstantin Belousov int priority, task_fn_t func, void *context) 101b2ad91f2SKonstantin Belousov { 102b2ad91f2SKonstantin Belousov 103b2ad91f2SKonstantin Belousov TASK_INIT(&timeout_task->t, priority, func, context); 104*6d545f4cSAlexander Motin callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 105*6d545f4cSAlexander Motin CALLOUT_RETURNUNLOCKED); 106b2ad91f2SKonstantin Belousov timeout_task->q = queue; 107b2ad91f2SKonstantin Belousov timeout_task->f = 0; 108b2ad91f2SKonstantin Belousov } 109b2ad91f2SKonstantin Belousov 1109df1a6ddSScott Long static __inline int 1119df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 1129df1a6ddSScott Long int t) 1139df1a6ddSScott Long { 114694382c8SKip Macy if (tq->tq_spin) 1159df1a6ddSScott Long return (msleep_spin(p, m, wm, t)); 1169df1a6ddSScott Long return (msleep(p, m, pri, wm, t)); 1179df1a6ddSScott Long } 1189df1a6ddSScott Long 1199df1a6ddSScott Long static struct taskqueue * 120706b0d31SAndriy Gapon _taskqueue_create(const char *name __unused, int mflags, 12152bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1229df1a6ddSScott Long int mtxflags, const char *mtxname) 123ca2e0534SDoug Rabson { 124ca2e0534SDoug Rabson struct taskqueue *queue; 125ca2e0534SDoug Rabson 1261de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 127ca2e0534SDoug Rabson if (!queue) 128d710cae7SWarner Losh return NULL; 129694382c8SKip Macy 130ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 131bf73d4d2SMatthew D Fleming TAILQ_INIT(&queue->tq_active); 132ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 133ca2e0534SDoug Rabson queue->tq_context = context; 134694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 135694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 136*6d545f4cSAlexander Motin if (enqueue == taskqueue_fast_enqueue || 137*6d545f4cSAlexander Motin enqueue == taskqueue_swi_enqueue || 138*6d545f4cSAlexander Motin enqueue == taskqueue_swi_giant_enqueue || 139*6d545f4cSAlexander Motin enqueue == taskqueue_thread_enqueue) 140*6d545f4cSAlexander Motin queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; 1419df1a6ddSScott Long mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 142ca2e0534SDoug Rabson 143ca2e0534SDoug Rabson return queue; 144ca2e0534SDoug Rabson } 145ca2e0534SDoug Rabson 1469df1a6ddSScott Long struct taskqueue * 1479df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1480f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1499df1a6ddSScott Long { 1500f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1519df1a6ddSScott Long MTX_DEF, "taskqueue"); 1529df1a6ddSScott Long } 1539df1a6ddSScott Long 154fdbc7174SWill Andrews void 155fdbc7174SWill Andrews taskqueue_set_callback(struct taskqueue *queue, 156fdbc7174SWill Andrews enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, 157fdbc7174SWill Andrews void *context) 158fdbc7174SWill Andrews { 159fdbc7174SWill Andrews 160fdbc7174SWill Andrews KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && 161fdbc7174SWill Andrews (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), 162fdbc7174SWill Andrews ("Callback type %d not valid, must be %d-%d", cb_type, 163fdbc7174SWill Andrews TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); 164fdbc7174SWill Andrews KASSERT((queue->tq_callbacks[cb_type] == NULL), 165fdbc7174SWill Andrews ("Re-initialization of taskqueue callback?")); 166fdbc7174SWill Andrews 167fdbc7174SWill Andrews queue->tq_callbacks[cb_type] = callback; 168fdbc7174SWill Andrews queue->tq_cb_contexts[cb_type] = context; 169fdbc7174SWill Andrews } 170fdbc7174SWill Andrews 17152bc746aSSam Leffler /* 17252bc746aSSam Leffler * Signal a taskqueue thread to terminate. 17352bc746aSSam Leffler */ 17452bc746aSSam Leffler static void 175175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 17652bc746aSSam Leffler { 17752bc746aSSam Leffler 178b2ad91f2SKonstantin Belousov while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 1790f92108dSScott Long wakeup(tq); 1800f92108dSScott Long TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 18152bc746aSSam Leffler } 18252bc746aSSam Leffler } 18352bc746aSSam Leffler 184ca2e0534SDoug Rabson void 185ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 186ca2e0534SDoug Rabson { 1871de1c550SJohn Baldwin 1889df1a6ddSScott Long TQ_LOCK(queue); 1890f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 190175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 191bf73d4d2SMatthew D Fleming KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 192b2ad91f2SKonstantin Belousov KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 1931de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 194175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 195ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 196ca2e0534SDoug Rabson } 197ca2e0534SDoug Rabson 198b2ad91f2SKonstantin Belousov static int 199b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 200ca2e0534SDoug Rabson { 201ca2e0534SDoug Rabson struct task *ins; 202ca2e0534SDoug Rabson struct task *prev; 203ca2e0534SDoug Rabson 204ca2e0534SDoug Rabson /* 205ca2e0534SDoug Rabson * Count multiple enqueues. 206ca2e0534SDoug Rabson */ 207694382c8SKip Macy if (task->ta_pending) { 208d2849f27SAdrian Chadd if (task->ta_pending < USHRT_MAX) 209ca2e0534SDoug Rabson task->ta_pending++; 210*6d545f4cSAlexander Motin TQ_UNLOCK(queue); 211b2ad91f2SKonstantin Belousov return (0); 212ca2e0534SDoug Rabson } 213ca2e0534SDoug Rabson 214ca2e0534SDoug Rabson /* 215ca2e0534SDoug Rabson * Optimise the case when all tasks have the same priority. 216ca2e0534SDoug Rabson */ 21751b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 218ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 219ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 220ca2e0534SDoug Rabson } else { 221d710cae7SWarner Losh prev = NULL; 222ca2e0534SDoug Rabson for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 223ca2e0534SDoug Rabson prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 224ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 225ca2e0534SDoug Rabson break; 226ca2e0534SDoug Rabson 227ca2e0534SDoug Rabson if (prev) 228ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 229ca2e0534SDoug Rabson else 230ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 231ca2e0534SDoug Rabson } 232ca2e0534SDoug Rabson 233ca2e0534SDoug Rabson task->ta_pending = 1; 234*6d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) 235*6d545f4cSAlexander Motin TQ_UNLOCK(queue); 236694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 237ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 238*6d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) 239*6d545f4cSAlexander Motin TQ_UNLOCK(queue); 240282873e2SJohn Baldwin 241b2ad91f2SKonstantin Belousov return (0); 242b2ad91f2SKonstantin Belousov } 243b2ad91f2SKonstantin Belousov int 244b2ad91f2SKonstantin Belousov taskqueue_enqueue(struct taskqueue *queue, struct task *task) 245b2ad91f2SKonstantin Belousov { 246b2ad91f2SKonstantin Belousov int res; 247b2ad91f2SKonstantin Belousov 248b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 249b2ad91f2SKonstantin Belousov res = taskqueue_enqueue_locked(queue, task); 250282873e2SJohn Baldwin 251b2ad91f2SKonstantin Belousov return (res); 252b2ad91f2SKonstantin Belousov } 253b2ad91f2SKonstantin Belousov 254b2ad91f2SKonstantin Belousov static void 255b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg) 256b2ad91f2SKonstantin Belousov { 257b2ad91f2SKonstantin Belousov struct taskqueue *queue; 258b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task; 259b2ad91f2SKonstantin Belousov 260b2ad91f2SKonstantin Belousov timeout_task = arg; 261b2ad91f2SKonstantin Belousov queue = timeout_task->q; 262b2ad91f2SKonstantin Belousov KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 263b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 264b2ad91f2SKonstantin Belousov queue->tq_callouts--; 265b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 266b2ad91f2SKonstantin Belousov } 267b2ad91f2SKonstantin Belousov 268b2ad91f2SKonstantin Belousov int 269b2ad91f2SKonstantin Belousov taskqueue_enqueue_timeout(struct taskqueue *queue, 270b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, int ticks) 271b2ad91f2SKonstantin Belousov { 272b2ad91f2SKonstantin Belousov int res; 273b2ad91f2SKonstantin Belousov 274b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 275b2ad91f2SKonstantin Belousov KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 276b2ad91f2SKonstantin Belousov ("Migrated queue")); 277b2ad91f2SKonstantin Belousov KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); 278b2ad91f2SKonstantin Belousov timeout_task->q = queue; 279b2ad91f2SKonstantin Belousov res = timeout_task->t.ta_pending; 280b2ad91f2SKonstantin Belousov if (ticks == 0) { 281b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(queue, &timeout_task->t); 282b2ad91f2SKonstantin Belousov } else { 283b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 284b2ad91f2SKonstantin Belousov res++; 285b2ad91f2SKonstantin Belousov } else { 286b2ad91f2SKonstantin Belousov queue->tq_callouts++; 287b2ad91f2SKonstantin Belousov timeout_task->f |= DT_CALLOUT_ARMED; 288b7c8d2f2SKonstantin Belousov if (ticks < 0) 289b7c8d2f2SKonstantin Belousov ticks = -ticks; /* Ignore overflow. */ 290b2ad91f2SKonstantin Belousov } 291b7c8d2f2SKonstantin Belousov if (ticks > 0) { 292b7c8d2f2SKonstantin Belousov callout_reset(&timeout_task->c, ticks, 293b7c8d2f2SKonstantin Belousov taskqueue_timeout_func, timeout_task); 294b7c8d2f2SKonstantin Belousov } 295b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 296*6d545f4cSAlexander Motin } 297b2ad91f2SKonstantin Belousov return (res); 298ca2e0534SDoug Rabson } 299ca2e0534SDoug Rabson 300ca2e0534SDoug Rabson void 301478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 302478cfc73SScott Long { 303478cfc73SScott Long 304478cfc73SScott Long TQ_LOCK(queue); 305478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 306478cfc73SScott Long TQ_UNLOCK(queue); 307478cfc73SScott Long } 308478cfc73SScott Long 309478cfc73SScott Long void 310478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 311478cfc73SScott Long { 312478cfc73SScott Long 313478cfc73SScott Long TQ_LOCK(queue); 314478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 3151d1e92f1SAlexander Motin if (!STAILQ_EMPTY(&queue->tq_queue)) 316478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 317478cfc73SScott Long TQ_UNLOCK(queue); 318478cfc73SScott Long } 319478cfc73SScott Long 320bf73d4d2SMatthew D Fleming static void 321bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue) 322ca2e0534SDoug Rabson { 323bf73d4d2SMatthew D Fleming struct taskqueue_busy tb; 324033459c8SMatthew D Fleming struct task *task; 325242ed5d9SMatthew D Fleming int pending; 326ca2e0534SDoug Rabson 327fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 328bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 329bf73d4d2SMatthew D Fleming TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 330bf73d4d2SMatthew D Fleming 331ca2e0534SDoug Rabson while (STAILQ_FIRST(&queue->tq_queue)) { 332ca2e0534SDoug Rabson /* 333ca2e0534SDoug Rabson * Carefully remove the first task from the queue and 334ca2e0534SDoug Rabson * zero its pending count. 335ca2e0534SDoug Rabson */ 336ca2e0534SDoug Rabson task = STAILQ_FIRST(&queue->tq_queue); 337ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 338ca2e0534SDoug Rabson pending = task->ta_pending; 339ca2e0534SDoug Rabson task->ta_pending = 0; 340bf73d4d2SMatthew D Fleming tb.tb_running = task; 3419df1a6ddSScott Long TQ_UNLOCK(queue); 342ca2e0534SDoug Rabson 343348298b1SGleb Smirnoff CURVNET_SET(task->ta_vnet); 344282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 345348298b1SGleb Smirnoff CURVNET_RESTORE(); 346ca2e0534SDoug Rabson 3479df1a6ddSScott Long TQ_LOCK(queue); 348bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 34914889b42SWarner Losh wakeup(task); 350ca2e0534SDoug Rabson } 351bf73d4d2SMatthew D Fleming TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 352bf73d4d2SMatthew D Fleming } 353bf73d4d2SMatthew D Fleming 354bf73d4d2SMatthew D Fleming void 355bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue) 356bf73d4d2SMatthew D Fleming { 357bf73d4d2SMatthew D Fleming 358bf73d4d2SMatthew D Fleming TQ_LOCK(queue); 359bf73d4d2SMatthew D Fleming taskqueue_run_locked(queue); 360bf73d4d2SMatthew D Fleming TQ_UNLOCK(queue); 361bf73d4d2SMatthew D Fleming } 362bf73d4d2SMatthew D Fleming 363bf73d4d2SMatthew D Fleming static int 364bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task) 365bf73d4d2SMatthew D Fleming { 366bf73d4d2SMatthew D Fleming struct taskqueue_busy *tb; 367bf73d4d2SMatthew D Fleming 368fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 369bf73d4d2SMatthew D Fleming TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 370bf73d4d2SMatthew D Fleming if (tb->tb_running == task) 371bf73d4d2SMatthew D Fleming return (1); 372bf73d4d2SMatthew D Fleming } 373bf73d4d2SMatthew D Fleming return (0); 374ca2e0534SDoug Rabson } 375ca2e0534SDoug Rabson 376b2ad91f2SKonstantin Belousov static int 377b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 378b2ad91f2SKonstantin Belousov u_int *pendp) 379b2ad91f2SKonstantin Belousov { 380b2ad91f2SKonstantin Belousov 381b2ad91f2SKonstantin Belousov if (task->ta_pending > 0) 382b2ad91f2SKonstantin Belousov STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 383b2ad91f2SKonstantin Belousov if (pendp != NULL) 384b2ad91f2SKonstantin Belousov *pendp = task->ta_pending; 385b2ad91f2SKonstantin Belousov task->ta_pending = 0; 386b2ad91f2SKonstantin Belousov return (task_is_running(queue, task) ? EBUSY : 0); 387b2ad91f2SKonstantin Belousov } 388b2ad91f2SKonstantin Belousov 389f46276a9SMatthew D Fleming int 390f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 391f46276a9SMatthew D Fleming { 392f46276a9SMatthew D Fleming u_int pending; 393f46276a9SMatthew D Fleming int error; 394f46276a9SMatthew D Fleming 395f46276a9SMatthew D Fleming TQ_LOCK(queue); 396b2ad91f2SKonstantin Belousov pending = task->ta_pending; 397b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, task, pendp); 398b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 399b2ad91f2SKonstantin Belousov 400b2ad91f2SKonstantin Belousov return (error); 401b2ad91f2SKonstantin Belousov } 402b2ad91f2SKonstantin Belousov 403b2ad91f2SKonstantin Belousov int 404b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue, 405b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, u_int *pendp) 406b2ad91f2SKonstantin Belousov { 407b2ad91f2SKonstantin Belousov u_int pending, pending1; 408b2ad91f2SKonstantin Belousov int error; 409b2ad91f2SKonstantin Belousov 410b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 411b2ad91f2SKonstantin Belousov pending = !!callout_stop(&timeout_task->c); 412b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 413b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 414b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 415b2ad91f2SKonstantin Belousov queue->tq_callouts--; 416b2ad91f2SKonstantin Belousov } 417f46276a9SMatthew D Fleming TQ_UNLOCK(queue); 418f46276a9SMatthew D Fleming 419f46276a9SMatthew D Fleming if (pendp != NULL) 420b2ad91f2SKonstantin Belousov *pendp = pending + pending1; 421f46276a9SMatthew D Fleming return (error); 422f46276a9SMatthew D Fleming } 423f46276a9SMatthew D Fleming 42414889b42SWarner Losh void 42514889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 42614889b42SWarner Losh { 4273d336cd0SPawel Jakub Dawidek 4283d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 4299df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 43052bc746aSSam Leffler 4313d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 432bf73d4d2SMatthew D Fleming while (task->ta_pending != 0 || task_is_running(queue, task)) 4333d336cd0SPawel Jakub Dawidek TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 4343d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 4359df1a6ddSScott Long } 43614889b42SWarner Losh 437b2ad91f2SKonstantin Belousov void 438b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue, 439b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task) 440b2ad91f2SKonstantin Belousov { 441b2ad91f2SKonstantin Belousov 442b2ad91f2SKonstantin Belousov callout_drain(&timeout_task->c); 443b2ad91f2SKonstantin Belousov taskqueue_drain(queue, &timeout_task->t); 444b2ad91f2SKonstantin Belousov } 445b2ad91f2SKonstantin Belousov 446ca2e0534SDoug Rabson static void 447ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 448ca2e0534SDoug Rabson { 449c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 450ca2e0534SDoug Rabson } 451ca2e0534SDoug Rabson 452ca2e0534SDoug Rabson static void 4538088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 454ca2e0534SDoug Rabson { 455bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi); 456ca2e0534SDoug Rabson } 457ca2e0534SDoug Rabson 4587874f606SScott Long static void 4597874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 4607874f606SScott Long { 4617874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 4627874f606SScott Long } 4637874f606SScott Long 4647874f606SScott Long static void 4657874f606SScott Long taskqueue_swi_giant_run(void *dummy) 4667874f606SScott Long { 467bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi_giant); 4687874f606SScott Long } 4697874f606SScott Long 4700f92108dSScott Long int 4710f92108dSScott Long taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 4720f92108dSScott Long const char *name, ...) 4730f92108dSScott Long { 4740f92108dSScott Long va_list ap; 47575b773aeSSam Leffler struct thread *td; 476175611b6SSam Leffler struct taskqueue *tq; 47700537061SSam Leffler int i, error; 4785ca4819dSJohn Baldwin char ktname[MAXCOMLEN + 1]; 4790f92108dSScott Long 4800f92108dSScott Long if (count <= 0) 4810f92108dSScott Long return (EINVAL); 482175611b6SSam Leffler 4830f92108dSScott Long tq = *tqp; 4840f92108dSScott Long 4850f92108dSScott Long va_start(ap, name); 4865ca4819dSJohn Baldwin vsnprintf(ktname, sizeof(ktname), name, ap); 4870f92108dSScott Long va_end(ap); 4880f92108dSScott Long 489175611b6SSam Leffler tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 49000537061SSam Leffler M_NOWAIT | M_ZERO); 491175611b6SSam Leffler if (tq->tq_threads == NULL) { 49200537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 49300537061SSam Leffler return (ENOMEM); 49400537061SSam Leffler } 49500537061SSam Leffler 4960f92108dSScott Long for (i = 0; i < count; i++) { 4970f92108dSScott Long if (count == 1) 498175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 4991bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 5000f92108dSScott Long else 501175611b6SSam Leffler error = kthread_add(taskqueue_thread_loop, tqp, NULL, 502175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 503175611b6SSam Leffler "%s_%d", ktname, i); 50475b773aeSSam Leffler if (error) { 50500537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 506175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 507175611b6SSam Leffler ktname, error); 508175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 50975b773aeSSam Leffler } else 510175611b6SSam Leffler tq->tq_tcount++; 51100537061SSam Leffler } 51275b773aeSSam Leffler for (i = 0; i < count; i++) { 513175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 51475b773aeSSam Leffler continue; 515175611b6SSam Leffler td = tq->tq_threads[i]; 516982d11f8SJeff Roberson thread_lock(td); 51775b773aeSSam Leffler sched_prio(td, pri); 518f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 519982d11f8SJeff Roberson thread_unlock(td); 5200f92108dSScott Long } 5210f92108dSScott Long 5220f92108dSScott Long return (0); 5230f92108dSScott Long } 5240f92108dSScott Long 525fdbc7174SWill Andrews static inline void 526fdbc7174SWill Andrews taskqueue_run_callback(struct taskqueue *tq, 527fdbc7174SWill Andrews enum taskqueue_callback_type cb_type) 528fdbc7174SWill Andrews { 529fdbc7174SWill Andrews taskqueue_callback_fn tq_callback; 530fdbc7174SWill Andrews 531fdbc7174SWill Andrews TQ_ASSERT_UNLOCKED(tq); 532fdbc7174SWill Andrews tq_callback = tq->tq_callbacks[cb_type]; 533fdbc7174SWill Andrews if (tq_callback != NULL) 534fdbc7174SWill Andrews tq_callback(tq->tq_cb_contexts[cb_type]); 535fdbc7174SWill Andrews } 536fdbc7174SWill Andrews 537227559d1SJohn-Mark Gurney void 538227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 539cb32189eSKenneth D. Merry { 540227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 541bd83e879SJohn Baldwin 542227559d1SJohn-Mark Gurney tqp = arg; 543227559d1SJohn-Mark Gurney tq = *tqp; 544fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); 5459df1a6ddSScott Long TQ_LOCK(tq); 54624ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 547bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 5486a3b2893SPawel Jakub Dawidek /* 5496a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 5506a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 5516a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 5526a3b2893SPawel Jakub Dawidek */ 5536a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 5546a3b2893SPawel Jakub Dawidek break; 5550f180a7cSJohn Baldwin TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 556a1797ef6SAndrew Thompson } 557bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 55852bc746aSSam Leffler 559fdbc7174SWill Andrews /* 560fdbc7174SWill Andrews * This thread is on its way out, so just drop the lock temporarily 561fdbc7174SWill Andrews * in order to call the shutdown callback. This allows the callback 562fdbc7174SWill Andrews * to look at the taskqueue, even just before it dies. 563fdbc7174SWill Andrews */ 564fdbc7174SWill Andrews TQ_UNLOCK(tq); 565fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); 566fdbc7174SWill Andrews TQ_LOCK(tq); 567fdbc7174SWill Andrews 56852bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 569175611b6SSam Leffler tq->tq_tcount--; 570175611b6SSam Leffler wakeup_one(tq->tq_threads); 5719df1a6ddSScott Long TQ_UNLOCK(tq); 57203c7442dSJohn Baldwin kthread_exit(); 573cb32189eSKenneth D. Merry } 574cb32189eSKenneth D. Merry 575227559d1SJohn-Mark Gurney void 576cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 577cb32189eSKenneth D. Merry { 578227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 579bd83e879SJohn Baldwin 580227559d1SJohn-Mark Gurney tqp = context; 581227559d1SJohn-Mark Gurney tq = *tqp; 582227559d1SJohn-Mark Gurney 58352bc746aSSam Leffler wakeup_one(tq); 584cb32189eSKenneth D. Merry } 585cb32189eSKenneth D. Merry 586d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 5877874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 5887874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 5897874f606SScott Long 590d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 5916caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 5927874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 593cb32189eSKenneth D. Merry 594227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 595f82c9e70SSam Leffler 5969df1a6ddSScott Long struct taskqueue * 5979df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 5980f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 5999df1a6ddSScott Long { 6000f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 6019df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 6029df1a6ddSScott Long } 6039df1a6ddSScott Long 6049df1a6ddSScott Long /* NB: for backwards compatibility */ 605f82c9e70SSam Leffler int 606f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 607f82c9e70SSam Leffler { 6089df1a6ddSScott Long return taskqueue_enqueue(queue, task); 609f82c9e70SSam Leffler } 610f82c9e70SSam Leffler 611f82c9e70SSam Leffler static void *taskqueue_fast_ih; 612f82c9e70SSam Leffler 613f82c9e70SSam Leffler static void 6149df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 615f82c9e70SSam Leffler { 616f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 617f82c9e70SSam Leffler } 618f82c9e70SSam Leffler 619f82c9e70SSam Leffler static void 620f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 621f82c9e70SSam Leffler { 622bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_fast); 623f82c9e70SSam Leffler } 624f82c9e70SSam Leffler 625d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 62610f0ab39SJohn Baldwin swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, 6279df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 628159ef108SPawel Jakub Dawidek 629159ef108SPawel Jakub Dawidek int 630159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 631159ef108SPawel Jakub Dawidek { 632159ef108SPawel Jakub Dawidek int i, j, ret = 0; 633159ef108SPawel Jakub Dawidek 634159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 635159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 636159ef108SPawel Jakub Dawidek continue; 637159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 638159ef108SPawel Jakub Dawidek ret = 1; 639159ef108SPawel Jakub Dawidek break; 640159ef108SPawel Jakub Dawidek } 641159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 642159ef108SPawel Jakub Dawidek break; 643159ef108SPawel Jakub Dawidek } 644159ef108SPawel Jakub Dawidek return (ret); 645159ef108SPawel Jakub Dawidek } 646