1ca2e0534SDoug Rabson /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 38a36da99SPedro F. Giffuni * 4ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 5ca2e0534SDoug Rabson * All rights reserved. 6ca2e0534SDoug Rabson * 7ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 8ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 9ca2e0534SDoug Rabson * are met: 10ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 12ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 13ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 14ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 15ca2e0534SDoug Rabson * 16ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26ca2e0534SDoug Rabson * SUCH DAMAGE. 27ca2e0534SDoug Rabson */ 28ca2e0534SDoug Rabson 29ca2e0534SDoug Rabson #include <sys/param.h> 30ca2e0534SDoug Rabson #include <sys/systm.h> 311de1c550SJohn Baldwin #include <sys/bus.h> 325a6f0eeeSAdrian Chadd #include <sys/cpuset.h> 33282873e2SJohn Baldwin #include <sys/interrupt.h> 34ca2e0534SDoug Rabson #include <sys/kernel.h> 35eb5b0e05SJohn Baldwin #include <sys/kthread.h> 364c7070dbSScott Long #include <sys/libkern.h> 37d2849f27SAdrian Chadd #include <sys/limits.h> 381de1c550SJohn Baldwin #include <sys/lock.h> 39ca2e0534SDoug Rabson #include <sys/malloc.h> 401de1c550SJohn Baldwin #include <sys/mutex.h> 4152bc746aSSam Leffler #include <sys/proc.h> 424426b2e6SGleb Smirnoff #include <sys/epoch.h> 430f92108dSScott Long #include <sys/sched.h> 444c7070dbSScott Long #include <sys/smp.h> 451de1c550SJohn Baldwin #include <sys/taskqueue.h> 46cb32189eSKenneth D. Merry #include <sys/unistd.h> 470f92108dSScott Long #include <machine/stdarg.h> 48ca2e0534SDoug Rabson 49959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 507874f606SScott Long static void *taskqueue_giant_ih; 51eb5b0e05SJohn Baldwin static void *taskqueue_ih; 526d545f4cSAlexander Motin static void taskqueue_fast_enqueue(void *); 536d545f4cSAlexander Motin static void taskqueue_swi_enqueue(void *); 546d545f4cSAlexander Motin static void taskqueue_swi_giant_enqueue(void *); 558088699fSJohn Baldwin 56bf73d4d2SMatthew D Fleming struct taskqueue_busy { 57bf73d4d2SMatthew D Fleming struct task *tb_running; 583db35ffaSAlexander Motin u_int tb_seq; 59b6f87b78SVladimir Kondratyev bool tb_canceling; 603db35ffaSAlexander Motin LIST_ENTRY(taskqueue_busy) tb_link; 61bf73d4d2SMatthew D Fleming }; 62bf73d4d2SMatthew D Fleming 63ca2e0534SDoug Rabson struct taskqueue { 64ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 653db35ffaSAlexander Motin LIST_HEAD(, taskqueue_busy) tq_active; 663db35ffaSAlexander Motin struct task *tq_hint; 673db35ffaSAlexander Motin u_int tq_seq; 683db35ffaSAlexander Motin int tq_callouts; 693db35ffaSAlexander Motin struct mtx_padalign tq_mutex; 70ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 71ca2e0534SDoug Rabson void *tq_context; 724c7070dbSScott Long char *tq_name; 73175611b6SSam Leffler struct thread **tq_threads; 74175611b6SSam Leffler int tq_tcount; 75694382c8SKip Macy int tq_spin; 760f92108dSScott Long int tq_flags; 77fdbc7174SWill Andrews taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; 78fdbc7174SWill Andrews void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; 79ca2e0534SDoug Rabson }; 80ca2e0534SDoug Rabson 810f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 82478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 836d545f4cSAlexander Motin #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) 840f92108dSScott Long 85b2ad91f2SKonstantin Belousov #define DT_CALLOUT_ARMED (1 << 0) 8699eca1b2SHans Petter Selasky #define DT_DRAIN_IN_PROGRESS (1 << 1) 87b2ad91f2SKonstantin Belousov 88b79b28b6SJuli Mallett #define TQ_LOCK(tq) \ 89b79b28b6SJuli Mallett do { \ 90b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 91b79b28b6SJuli Mallett mtx_lock_spin(&(tq)->tq_mutex); \ 92b79b28b6SJuli Mallett else \ 93b79b28b6SJuli Mallett mtx_lock(&(tq)->tq_mutex); \ 94b79b28b6SJuli Mallett } while (0) 95fdbc7174SWill Andrews #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) 969df1a6ddSScott Long 97b79b28b6SJuli Mallett #define TQ_UNLOCK(tq) \ 98b79b28b6SJuli Mallett do { \ 99b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 100b79b28b6SJuli Mallett mtx_unlock_spin(&(tq)->tq_mutex); \ 101b79b28b6SJuli Mallett else \ 102b79b28b6SJuli Mallett mtx_unlock(&(tq)->tq_mutex); \ 103b79b28b6SJuli Mallett } while (0) 104fdbc7174SWill Andrews #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) 1059df1a6ddSScott Long 106b2ad91f2SKonstantin Belousov void 107b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 108b2ad91f2SKonstantin Belousov int priority, task_fn_t func, void *context) 109b2ad91f2SKonstantin Belousov { 110b2ad91f2SKonstantin Belousov 111b2ad91f2SKonstantin Belousov TASK_INIT(&timeout_task->t, priority, func, context); 1126d545f4cSAlexander Motin callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 1136d545f4cSAlexander Motin CALLOUT_RETURNUNLOCKED); 114b2ad91f2SKonstantin Belousov timeout_task->q = queue; 115b2ad91f2SKonstantin Belousov timeout_task->f = 0; 116b2ad91f2SKonstantin Belousov } 117b2ad91f2SKonstantin Belousov 1189df1a6ddSScott Long static __inline int 1193db35ffaSAlexander Motin TQ_SLEEP(struct taskqueue *tq, void *p, const char *wm) 1209df1a6ddSScott Long { 121694382c8SKip Macy if (tq->tq_spin) 1223db35ffaSAlexander Motin return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0)); 1233db35ffaSAlexander Motin return (msleep(p, &tq->tq_mutex, 0, wm, 0)); 1249df1a6ddSScott Long } 1259df1a6ddSScott Long 126b6f87b78SVladimir Kondratyev static struct taskqueue_busy * 127b6f87b78SVladimir Kondratyev task_get_busy(struct taskqueue *queue, struct task *task) 128b6f87b78SVladimir Kondratyev { 129b6f87b78SVladimir Kondratyev struct taskqueue_busy *tb; 130b6f87b78SVladimir Kondratyev 131b6f87b78SVladimir Kondratyev TQ_ASSERT_LOCKED(queue); 132b6f87b78SVladimir Kondratyev LIST_FOREACH(tb, &queue->tq_active, tb_link) { 133b6f87b78SVladimir Kondratyev if (tb->tb_running == task) 134b6f87b78SVladimir Kondratyev return (tb); 135b6f87b78SVladimir Kondratyev } 136b6f87b78SVladimir Kondratyev return (NULL); 137b6f87b78SVladimir Kondratyev } 138b6f87b78SVladimir Kondratyev 1399df1a6ddSScott Long static struct taskqueue * 1404c7070dbSScott Long _taskqueue_create(const char *name, int mflags, 14152bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1424c7070dbSScott Long int mtxflags, const char *mtxname __unused) 143ca2e0534SDoug Rabson { 144ca2e0534SDoug Rabson struct taskqueue *queue; 1457e52504fSScott Long char *tq_name; 1464c7070dbSScott Long 1477e52504fSScott Long tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO); 148f3c8e16eSMateusz Guzik if (tq_name == NULL) 1497e52504fSScott Long return (NULL); 1507e52504fSScott Long 1511de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 152f3c8e16eSMateusz Guzik if (queue == NULL) { 153f3c8e16eSMateusz Guzik free(tq_name, M_TASKQUEUE); 1547e52504fSScott Long return (NULL); 155f3c8e16eSMateusz Guzik } 156f3c8e16eSMateusz Guzik 157f3c8e16eSMateusz Guzik snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); 158694382c8SKip Macy 159ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 1603db35ffaSAlexander Motin LIST_INIT(&queue->tq_active); 161ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 162ca2e0534SDoug Rabson queue->tq_context = context; 1634c7070dbSScott Long queue->tq_name = tq_name; 164694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 165694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1666d545f4cSAlexander Motin if (enqueue == taskqueue_fast_enqueue || 1676d545f4cSAlexander Motin enqueue == taskqueue_swi_enqueue || 1686d545f4cSAlexander Motin enqueue == taskqueue_swi_giant_enqueue || 1696d545f4cSAlexander Motin enqueue == taskqueue_thread_enqueue) 1706d545f4cSAlexander Motin queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; 1714c7070dbSScott Long mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); 172ca2e0534SDoug Rabson 1737e52504fSScott Long return (queue); 174ca2e0534SDoug Rabson } 175ca2e0534SDoug Rabson 1769df1a6ddSScott Long struct taskqueue * 1779df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1780f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1799df1a6ddSScott Long { 1804c7070dbSScott Long 1810f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1824c7070dbSScott Long MTX_DEF, name); 1839df1a6ddSScott Long } 1849df1a6ddSScott Long 185fdbc7174SWill Andrews void 186fdbc7174SWill Andrews taskqueue_set_callback(struct taskqueue *queue, 187fdbc7174SWill Andrews enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, 188fdbc7174SWill Andrews void *context) 189fdbc7174SWill Andrews { 190fdbc7174SWill Andrews 191fdbc7174SWill Andrews KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && 192fdbc7174SWill Andrews (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), 193fdbc7174SWill Andrews ("Callback type %d not valid, must be %d-%d", cb_type, 194fdbc7174SWill Andrews TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); 195fdbc7174SWill Andrews KASSERT((queue->tq_callbacks[cb_type] == NULL), 196fdbc7174SWill Andrews ("Re-initialization of taskqueue callback?")); 197fdbc7174SWill Andrews 198fdbc7174SWill Andrews queue->tq_callbacks[cb_type] = callback; 199fdbc7174SWill Andrews queue->tq_cb_contexts[cb_type] = context; 200fdbc7174SWill Andrews } 201fdbc7174SWill Andrews 20252bc746aSSam Leffler /* 20352bc746aSSam Leffler * Signal a taskqueue thread to terminate. 20452bc746aSSam Leffler */ 20552bc746aSSam Leffler static void 206175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 20752bc746aSSam Leffler { 20852bc746aSSam Leffler 209b2ad91f2SKonstantin Belousov while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 2100f92108dSScott Long wakeup(tq); 2113db35ffaSAlexander Motin TQ_SLEEP(tq, pp, "tq_destroy"); 21252bc746aSSam Leffler } 21352bc746aSSam Leffler } 21452bc746aSSam Leffler 215ca2e0534SDoug Rabson void 216ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 217ca2e0534SDoug Rabson { 2181de1c550SJohn Baldwin 2199df1a6ddSScott Long TQ_LOCK(queue); 2200f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 221175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 2223db35ffaSAlexander Motin KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?")); 223b2ad91f2SKonstantin Belousov KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 2241de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 225175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 2264c7070dbSScott Long free(queue->tq_name, M_TASKQUEUE); 227ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 228ca2e0534SDoug Rabson } 229ca2e0534SDoug Rabson 230b2ad91f2SKonstantin Belousov static int 231b6f87b78SVladimir Kondratyev taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task, int flags) 232ca2e0534SDoug Rabson { 233ca2e0534SDoug Rabson struct task *ins; 234ca2e0534SDoug Rabson struct task *prev; 235b6f87b78SVladimir Kondratyev struct taskqueue_busy *tb; 236ca2e0534SDoug Rabson 2374c7070dbSScott Long KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func")); 238ca2e0534SDoug Rabson /* 239b6f87b78SVladimir Kondratyev * Ignore canceling task if requested. 240b6f87b78SVladimir Kondratyev */ 241b6f87b78SVladimir Kondratyev if (__predict_false((flags & TASKQUEUE_FAIL_IF_CANCELING) != 0)) { 242b6f87b78SVladimir Kondratyev tb = task_get_busy(queue, task); 243b6f87b78SVladimir Kondratyev if (tb != NULL && tb->tb_canceling) { 244b6f87b78SVladimir Kondratyev TQ_UNLOCK(queue); 245b6f87b78SVladimir Kondratyev return (ECANCELED); 246b6f87b78SVladimir Kondratyev } 247b6f87b78SVladimir Kondratyev } 248b6f87b78SVladimir Kondratyev 249b6f87b78SVladimir Kondratyev /* 250ca2e0534SDoug Rabson * Count multiple enqueues. 251ca2e0534SDoug Rabson */ 252694382c8SKip Macy if (task->ta_pending) { 253b6f87b78SVladimir Kondratyev if (__predict_false((flags & TASKQUEUE_FAIL_IF_PENDING) != 0)) { 254b6f87b78SVladimir Kondratyev TQ_UNLOCK(queue); 255b6f87b78SVladimir Kondratyev return (EEXIST); 256b6f87b78SVladimir Kondratyev } 2577107bed0SAndriy Gapon if (task->ta_pending < USHRT_MAX) 258ca2e0534SDoug Rabson task->ta_pending++; 2596d545f4cSAlexander Motin TQ_UNLOCK(queue); 260b2ad91f2SKonstantin Belousov return (0); 261ca2e0534SDoug Rabson } 262ca2e0534SDoug Rabson 263ca2e0534SDoug Rabson /* 2643db35ffaSAlexander Motin * Optimise cases when all tasks use small set of priorities. 2653db35ffaSAlexander Motin * In case of only one priority we always insert at the end. 2663db35ffaSAlexander Motin * In case of two tq_hint typically gives the insertion point. 2673db35ffaSAlexander Motin * In case of more then two tq_hint should halve the search. 268ca2e0534SDoug Rabson */ 26951b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 270ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 271ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 272ca2e0534SDoug Rabson } else { 2733db35ffaSAlexander Motin prev = queue->tq_hint; 2743db35ffaSAlexander Motin if (prev && prev->ta_priority >= task->ta_priority) { 2753db35ffaSAlexander Motin ins = STAILQ_NEXT(prev, ta_link); 2763db35ffaSAlexander Motin } else { 277d710cae7SWarner Losh prev = NULL; 2783db35ffaSAlexander Motin ins = STAILQ_FIRST(&queue->tq_queue); 2793db35ffaSAlexander Motin } 2803db35ffaSAlexander Motin for (; ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 281ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 282ca2e0534SDoug Rabson break; 283ca2e0534SDoug Rabson 2843db35ffaSAlexander Motin if (prev) { 285ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 2863db35ffaSAlexander Motin queue->tq_hint = task; 2873db35ffaSAlexander Motin } else 288ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 289ca2e0534SDoug Rabson } 290ca2e0534SDoug Rabson 291ca2e0534SDoug Rabson task->ta_pending = 1; 2926d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) 2936d545f4cSAlexander Motin TQ_UNLOCK(queue); 294694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 295ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 2966d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) 2976d545f4cSAlexander Motin TQ_UNLOCK(queue); 298282873e2SJohn Baldwin 29918093155SAlexander Motin /* Return with lock released. */ 300b2ad91f2SKonstantin Belousov return (0); 301b2ad91f2SKonstantin Belousov } 3025b326a32SJustin T. Gibbs 303b2ad91f2SKonstantin Belousov int 304b6f87b78SVladimir Kondratyev taskqueue_enqueue_flags(struct taskqueue *queue, struct task *task, int flags) 305b2ad91f2SKonstantin Belousov { 306b2ad91f2SKonstantin Belousov int res; 307b2ad91f2SKonstantin Belousov 308b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 309b6f87b78SVladimir Kondratyev res = taskqueue_enqueue_locked(queue, task, flags); 31018093155SAlexander Motin /* The lock is released inside. */ 311282873e2SJohn Baldwin 312b2ad91f2SKonstantin Belousov return (res); 313b2ad91f2SKonstantin Belousov } 314b2ad91f2SKonstantin Belousov 315b6f87b78SVladimir Kondratyev int 316b6f87b78SVladimir Kondratyev taskqueue_enqueue(struct taskqueue *queue, struct task *task) 317b6f87b78SVladimir Kondratyev { 318b6f87b78SVladimir Kondratyev return (taskqueue_enqueue_flags(queue, task, 0)); 319b6f87b78SVladimir Kondratyev } 320b6f87b78SVladimir Kondratyev 321b2ad91f2SKonstantin Belousov static void 322b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg) 323b2ad91f2SKonstantin Belousov { 324b2ad91f2SKonstantin Belousov struct taskqueue *queue; 325b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task; 326b2ad91f2SKonstantin Belousov 327b2ad91f2SKonstantin Belousov timeout_task = arg; 328b2ad91f2SKonstantin Belousov queue = timeout_task->q; 329b2ad91f2SKonstantin Belousov KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 330b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 331b2ad91f2SKonstantin Belousov queue->tq_callouts--; 332b6f87b78SVladimir Kondratyev taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t, 0); 33318093155SAlexander Motin /* The lock is released inside. */ 334b2ad91f2SKonstantin Belousov } 335b2ad91f2SKonstantin Belousov 336b2ad91f2SKonstantin Belousov int 337f37b7fc2SIan Lepore taskqueue_enqueue_timeout_sbt(struct taskqueue *queue, 338f37b7fc2SIan Lepore struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags) 339b2ad91f2SKonstantin Belousov { 340b2ad91f2SKonstantin Belousov int res; 341b2ad91f2SKonstantin Belousov 342b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 343b2ad91f2SKonstantin Belousov KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 344b2ad91f2SKonstantin Belousov ("Migrated queue")); 345b2ad91f2SKonstantin Belousov timeout_task->q = queue; 346b2ad91f2SKonstantin Belousov res = timeout_task->t.ta_pending; 34799eca1b2SHans Petter Selasky if (timeout_task->f & DT_DRAIN_IN_PROGRESS) { 34899eca1b2SHans Petter Selasky /* Do nothing */ 34999eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 35099eca1b2SHans Petter Selasky res = -1; 351f37b7fc2SIan Lepore } else if (sbt == 0) { 352b6f87b78SVladimir Kondratyev taskqueue_enqueue_locked(queue, &timeout_task->t, 0); 35318093155SAlexander Motin /* The lock is released inside. */ 354b2ad91f2SKonstantin Belousov } else { 355b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 356b2ad91f2SKonstantin Belousov res++; 357b2ad91f2SKonstantin Belousov } else { 358b2ad91f2SKonstantin Belousov queue->tq_callouts++; 359b2ad91f2SKonstantin Belousov timeout_task->f |= DT_CALLOUT_ARMED; 360f37b7fc2SIan Lepore if (sbt < 0) 361f37b7fc2SIan Lepore sbt = -sbt; /* Ignore overflow. */ 362b2ad91f2SKonstantin Belousov } 363f37b7fc2SIan Lepore if (sbt > 0) { 3644730a897SAlexander Motin if (queue->tq_spin) 3654730a897SAlexander Motin flags |= C_DIRECT_EXEC; 366*7bbac641SAlexander Motin if (queue->tq_spin && queue->tq_tcount == 1 && 367*7bbac641SAlexander Motin queue->tq_threads[0] == curthread) { 368*7bbac641SAlexander Motin callout_reset_sbt_curcpu(&timeout_task->c, sbt, pr, 369*7bbac641SAlexander Motin taskqueue_timeout_func, timeout_task, flags); 370*7bbac641SAlexander Motin } else { 371f37b7fc2SIan Lepore callout_reset_sbt(&timeout_task->c, sbt, pr, 372f37b7fc2SIan Lepore taskqueue_timeout_func, timeout_task, flags); 373b7c8d2f2SKonstantin Belousov } 374*7bbac641SAlexander Motin } 375b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 3766d545f4cSAlexander Motin } 377b2ad91f2SKonstantin Belousov return (res); 378ca2e0534SDoug Rabson } 379ca2e0534SDoug Rabson 380f37b7fc2SIan Lepore int 381f37b7fc2SIan Lepore taskqueue_enqueue_timeout(struct taskqueue *queue, 382f37b7fc2SIan Lepore struct timeout_task *ttask, int ticks) 383f37b7fc2SIan Lepore { 384f37b7fc2SIan Lepore 385f37b7fc2SIan Lepore return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt, 386706b1a57SAlexander Motin 0, C_HARDCLOCK)); 387f37b7fc2SIan Lepore } 388f37b7fc2SIan Lepore 38973f82099SAndriy Gapon static void 3905b326a32SJustin T. Gibbs taskqueue_task_nop_fn(void *context, int pending) 39173f82099SAndriy Gapon { 3925b326a32SJustin T. Gibbs } 39373f82099SAndriy Gapon 3945b326a32SJustin T. Gibbs /* 3955b326a32SJustin T. Gibbs * Block until all currently queued tasks in this taskqueue 3965b326a32SJustin T. Gibbs * have begun execution. Tasks queued during execution of 3975b326a32SJustin T. Gibbs * this function are ignored. 3985b326a32SJustin T. Gibbs */ 399bb58b5d6SMark Johnston static int 4005b326a32SJustin T. Gibbs taskqueue_drain_tq_queue(struct taskqueue *queue) 4015b326a32SJustin T. Gibbs { 4025b326a32SJustin T. Gibbs struct task t_barrier; 4035b326a32SJustin T. Gibbs 4045b326a32SJustin T. Gibbs if (STAILQ_EMPTY(&queue->tq_queue)) 405bb58b5d6SMark Johnston return (0); 4065b326a32SJustin T. Gibbs 4075b326a32SJustin T. Gibbs /* 408eb3d0c5dSXin LI * Enqueue our barrier after all current tasks, but with 409eb3d0c5dSXin LI * the highest priority so that newly queued tasks cannot 410eb3d0c5dSXin LI * pass it. Because of the high priority, we can not use 411eb3d0c5dSXin LI * taskqueue_enqueue_locked directly (which drops the lock 412eb3d0c5dSXin LI * anyway) so just insert it at tail while we have the 413eb3d0c5dSXin LI * queue lock. 4145b326a32SJustin T. Gibbs */ 4154426b2e6SGleb Smirnoff TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier); 416eb3d0c5dSXin LI STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); 4173db35ffaSAlexander Motin queue->tq_hint = &t_barrier; 418eb3d0c5dSXin LI t_barrier.ta_pending = 1; 4195b326a32SJustin T. Gibbs 4205b326a32SJustin T. Gibbs /* 4215b326a32SJustin T. Gibbs * Once the barrier has executed, all previously queued tasks 4225b326a32SJustin T. Gibbs * have completed or are currently executing. 4235b326a32SJustin T. Gibbs */ 4245b326a32SJustin T. Gibbs while (t_barrier.ta_pending != 0) 4253db35ffaSAlexander Motin TQ_SLEEP(queue, &t_barrier, "tq_qdrain"); 426bb58b5d6SMark Johnston return (1); 4275b326a32SJustin T. Gibbs } 4285b326a32SJustin T. Gibbs 4295b326a32SJustin T. Gibbs /* 4305b326a32SJustin T. Gibbs * Block until all currently executing tasks for this taskqueue 4315b326a32SJustin T. Gibbs * complete. Tasks that begin execution during the execution 4325b326a32SJustin T. Gibbs * of this function are ignored. 4335b326a32SJustin T. Gibbs */ 434bb58b5d6SMark Johnston static int 4355b326a32SJustin T. Gibbs taskqueue_drain_tq_active(struct taskqueue *queue) 4365b326a32SJustin T. Gibbs { 4373db35ffaSAlexander Motin struct taskqueue_busy *tb; 4383db35ffaSAlexander Motin u_int seq; 4395b326a32SJustin T. Gibbs 4403db35ffaSAlexander Motin if (LIST_EMPTY(&queue->tq_active)) 441bb58b5d6SMark Johnston return (0); 4425b326a32SJustin T. Gibbs 4435b326a32SJustin T. Gibbs /* Block taskq_terminate().*/ 4445b326a32SJustin T. Gibbs queue->tq_callouts++; 4455b326a32SJustin T. Gibbs 4463db35ffaSAlexander Motin /* Wait for any active task with sequence from the past. */ 4473db35ffaSAlexander Motin seq = queue->tq_seq; 4483db35ffaSAlexander Motin restart: 4493db35ffaSAlexander Motin LIST_FOREACH(tb, &queue->tq_active, tb_link) { 4503db35ffaSAlexander Motin if ((int)(tb->tb_seq - seq) <= 0) { 4513db35ffaSAlexander Motin TQ_SLEEP(queue, tb->tb_running, "tq_adrain"); 4523db35ffaSAlexander Motin goto restart; 4533db35ffaSAlexander Motin } 4543db35ffaSAlexander Motin } 4555b326a32SJustin T. Gibbs 4565b326a32SJustin T. Gibbs /* Release taskqueue_terminate(). */ 4575b326a32SJustin T. Gibbs queue->tq_callouts--; 4585b326a32SJustin T. Gibbs if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) 4595b326a32SJustin T. Gibbs wakeup_one(queue->tq_threads); 460bb58b5d6SMark Johnston return (1); 46173f82099SAndriy Gapon } 46273f82099SAndriy Gapon 463ca2e0534SDoug Rabson void 464478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 465478cfc73SScott Long { 466478cfc73SScott Long 467478cfc73SScott Long TQ_LOCK(queue); 468478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 469478cfc73SScott Long TQ_UNLOCK(queue); 470478cfc73SScott Long } 471478cfc73SScott Long 472478cfc73SScott Long void 473478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 474478cfc73SScott Long { 475478cfc73SScott Long 476478cfc73SScott Long TQ_LOCK(queue); 477478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 4781d1e92f1SAlexander Motin if (!STAILQ_EMPTY(&queue->tq_queue)) 479478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 480478cfc73SScott Long TQ_UNLOCK(queue); 481478cfc73SScott Long } 482478cfc73SScott Long 483bf73d4d2SMatthew D Fleming static void 484bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue) 485ca2e0534SDoug Rabson { 4864426b2e6SGleb Smirnoff struct epoch_tracker et; 487bf73d4d2SMatthew D Fleming struct taskqueue_busy tb; 488033459c8SMatthew D Fleming struct task *task; 4894426b2e6SGleb Smirnoff bool in_net_epoch; 490242ed5d9SMatthew D Fleming int pending; 491ca2e0534SDoug Rabson 4924c7070dbSScott Long KASSERT(queue != NULL, ("tq is NULL")); 493fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 494bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 4953db35ffaSAlexander Motin LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); 4964426b2e6SGleb Smirnoff in_net_epoch = false; 497bf73d4d2SMatthew D Fleming 4983db35ffaSAlexander Motin while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) { 499ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 5003db35ffaSAlexander Motin if (queue->tq_hint == task) 5013db35ffaSAlexander Motin queue->tq_hint = NULL; 502ca2e0534SDoug Rabson pending = task->ta_pending; 503ca2e0534SDoug Rabson task->ta_pending = 0; 504bf73d4d2SMatthew D Fleming tb.tb_running = task; 5053db35ffaSAlexander Motin tb.tb_seq = ++queue->tq_seq; 506b6f87b78SVladimir Kondratyev tb.tb_canceling = false; 5079df1a6ddSScott Long TQ_UNLOCK(queue); 508ca2e0534SDoug Rabson 5094c7070dbSScott Long KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); 5104426b2e6SGleb Smirnoff if (!in_net_epoch && TASK_IS_NET(task)) { 5114426b2e6SGleb Smirnoff in_net_epoch = true; 5124426b2e6SGleb Smirnoff NET_EPOCH_ENTER(et); 5134426b2e6SGleb Smirnoff } else if (in_net_epoch && !TASK_IS_NET(task)) { 5144426b2e6SGleb Smirnoff NET_EPOCH_EXIT(et); 5154426b2e6SGleb Smirnoff in_net_epoch = false; 5164426b2e6SGleb Smirnoff } 517282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 518ca2e0534SDoug Rabson 5199df1a6ddSScott Long TQ_LOCK(queue); 52014889b42SWarner Losh wakeup(task); 5215b326a32SJustin T. Gibbs } 5224426b2e6SGleb Smirnoff if (in_net_epoch) 5234426b2e6SGleb Smirnoff NET_EPOCH_EXIT(et); 5243db35ffaSAlexander Motin LIST_REMOVE(&tb, tb_link); 525bf73d4d2SMatthew D Fleming } 526bf73d4d2SMatthew D Fleming 527bf73d4d2SMatthew D Fleming void 528bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue) 529bf73d4d2SMatthew D Fleming { 530bf73d4d2SMatthew D Fleming 531bf73d4d2SMatthew D Fleming TQ_LOCK(queue); 532bf73d4d2SMatthew D Fleming taskqueue_run_locked(queue); 533bf73d4d2SMatthew D Fleming TQ_UNLOCK(queue); 534bf73d4d2SMatthew D Fleming } 535bf73d4d2SMatthew D Fleming 536403f4a31SHans Petter Selasky /* 537403f4a31SHans Petter Selasky * Only use this function in single threaded contexts. It returns 538403f4a31SHans Petter Selasky * non-zero if the given task is either pending or running. Else the 539403f4a31SHans Petter Selasky * task is idle and can be queued again or freed. 540403f4a31SHans Petter Selasky */ 541403f4a31SHans Petter Selasky int 542403f4a31SHans Petter Selasky taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task) 543403f4a31SHans Petter Selasky { 544403f4a31SHans Petter Selasky int retval; 545403f4a31SHans Petter Selasky 546403f4a31SHans Petter Selasky TQ_LOCK(queue); 547b6f87b78SVladimir Kondratyev retval = task->ta_pending > 0 || task_get_busy(queue, task) != NULL; 548403f4a31SHans Petter Selasky TQ_UNLOCK(queue); 549403f4a31SHans Petter Selasky 550403f4a31SHans Petter Selasky return (retval); 551403f4a31SHans Petter Selasky } 552403f4a31SHans Petter Selasky 553b2ad91f2SKonstantin Belousov static int 554b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 555b2ad91f2SKonstantin Belousov u_int *pendp) 556b2ad91f2SKonstantin Belousov { 557b6f87b78SVladimir Kondratyev struct taskqueue_busy *tb; 558b6f87b78SVladimir Kondratyev int retval = 0; 559b2ad91f2SKonstantin Belousov 5603db35ffaSAlexander Motin if (task->ta_pending > 0) { 561b2ad91f2SKonstantin Belousov STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 5623db35ffaSAlexander Motin if (queue->tq_hint == task) 5633db35ffaSAlexander Motin queue->tq_hint = NULL; 5643db35ffaSAlexander Motin } 565b2ad91f2SKonstantin Belousov if (pendp != NULL) 566b2ad91f2SKonstantin Belousov *pendp = task->ta_pending; 567b2ad91f2SKonstantin Belousov task->ta_pending = 0; 568b6f87b78SVladimir Kondratyev tb = task_get_busy(queue, task); 569b6f87b78SVladimir Kondratyev if (tb != NULL) { 570b6f87b78SVladimir Kondratyev tb->tb_canceling = true; 571b6f87b78SVladimir Kondratyev retval = EBUSY; 572b6f87b78SVladimir Kondratyev } 573b6f87b78SVladimir Kondratyev 574b6f87b78SVladimir Kondratyev return (retval); 575b2ad91f2SKonstantin Belousov } 576b2ad91f2SKonstantin Belousov 577f46276a9SMatthew D Fleming int 578f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 579f46276a9SMatthew D Fleming { 580f46276a9SMatthew D Fleming int error; 581f46276a9SMatthew D Fleming 582f46276a9SMatthew D Fleming TQ_LOCK(queue); 583b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, task, pendp); 584b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 585b2ad91f2SKonstantin Belousov 586b2ad91f2SKonstantin Belousov return (error); 587b2ad91f2SKonstantin Belousov } 588b2ad91f2SKonstantin Belousov 589b2ad91f2SKonstantin Belousov int 590b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue, 591b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, u_int *pendp) 592b2ad91f2SKonstantin Belousov { 593b2ad91f2SKonstantin Belousov u_int pending, pending1; 594b2ad91f2SKonstantin Belousov int error; 595b2ad91f2SKonstantin Belousov 596b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 5977c4676ddSRandall Stewart pending = !!(callout_stop(&timeout_task->c) > 0); 598b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 599b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 600b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 601b2ad91f2SKonstantin Belousov queue->tq_callouts--; 602b2ad91f2SKonstantin Belousov } 603f46276a9SMatthew D Fleming TQ_UNLOCK(queue); 604f46276a9SMatthew D Fleming 605f46276a9SMatthew D Fleming if (pendp != NULL) 606b2ad91f2SKonstantin Belousov *pendp = pending + pending1; 607f46276a9SMatthew D Fleming return (error); 608f46276a9SMatthew D Fleming } 609f46276a9SMatthew D Fleming 61014889b42SWarner Losh void 61114889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 61214889b42SWarner Losh { 6133d336cd0SPawel Jakub Dawidek 6143d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 6159df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 61652bc746aSSam Leffler 6173d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 618b6f87b78SVladimir Kondratyev while (task->ta_pending != 0 || task_get_busy(queue, task) != NULL) 6193db35ffaSAlexander Motin TQ_SLEEP(queue, task, "tq_drain"); 6203d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 6219df1a6ddSScott Long } 62214889b42SWarner Losh 623b2ad91f2SKonstantin Belousov void 62473f82099SAndriy Gapon taskqueue_drain_all(struct taskqueue *queue) 62573f82099SAndriy Gapon { 62673f82099SAndriy Gapon 62773f82099SAndriy Gapon if (!queue->tq_spin) 62873f82099SAndriy Gapon WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 62973f82099SAndriy Gapon 63073f82099SAndriy Gapon TQ_LOCK(queue); 631bb58b5d6SMark Johnston (void)taskqueue_drain_tq_queue(queue); 632bb58b5d6SMark Johnston (void)taskqueue_drain_tq_active(queue); 63373f82099SAndriy Gapon TQ_UNLOCK(queue); 63473f82099SAndriy Gapon } 63573f82099SAndriy Gapon 63673f82099SAndriy Gapon void 637b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue, 638b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task) 639b2ad91f2SKonstantin Belousov { 640b2ad91f2SKonstantin Belousov 64199eca1b2SHans Petter Selasky /* 64299eca1b2SHans Petter Selasky * Set flag to prevent timer from re-starting during drain: 64399eca1b2SHans Petter Selasky */ 64499eca1b2SHans Petter Selasky TQ_LOCK(queue); 64599eca1b2SHans Petter Selasky KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0, 64699eca1b2SHans Petter Selasky ("Drain already in progress")); 64799eca1b2SHans Petter Selasky timeout_task->f |= DT_DRAIN_IN_PROGRESS; 64899eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 64999eca1b2SHans Petter Selasky 650b2ad91f2SKonstantin Belousov callout_drain(&timeout_task->c); 651b2ad91f2SKonstantin Belousov taskqueue_drain(queue, &timeout_task->t); 65299eca1b2SHans Petter Selasky 65399eca1b2SHans Petter Selasky /* 65499eca1b2SHans Petter Selasky * Clear flag to allow timer to re-start: 65599eca1b2SHans Petter Selasky */ 65699eca1b2SHans Petter Selasky TQ_LOCK(queue); 65799eca1b2SHans Petter Selasky timeout_task->f &= ~DT_DRAIN_IN_PROGRESS; 65899eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 659b2ad91f2SKonstantin Belousov } 660b2ad91f2SKonstantin Belousov 661bb58b5d6SMark Johnston void 662bb58b5d6SMark Johnston taskqueue_quiesce(struct taskqueue *queue) 663bb58b5d6SMark Johnston { 664bb58b5d6SMark Johnston int ret; 665bb58b5d6SMark Johnston 666bb58b5d6SMark Johnston TQ_LOCK(queue); 667bb58b5d6SMark Johnston do { 668bb58b5d6SMark Johnston ret = taskqueue_drain_tq_queue(queue); 669bb58b5d6SMark Johnston if (ret == 0) 670bb58b5d6SMark Johnston ret = taskqueue_drain_tq_active(queue); 671bb58b5d6SMark Johnston } while (ret != 0); 672bb58b5d6SMark Johnston TQ_UNLOCK(queue); 673bb58b5d6SMark Johnston } 674bb58b5d6SMark Johnston 675ca2e0534SDoug Rabson static void 676ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 677ca2e0534SDoug Rabson { 678c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 679ca2e0534SDoug Rabson } 680ca2e0534SDoug Rabson 681ca2e0534SDoug Rabson static void 6828088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 683ca2e0534SDoug Rabson { 684bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi); 685ca2e0534SDoug Rabson } 686ca2e0534SDoug Rabson 6877874f606SScott Long static void 6887874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 6897874f606SScott Long { 6907874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 6917874f606SScott Long } 6927874f606SScott Long 6937874f606SScott Long static void 6947874f606SScott Long taskqueue_swi_giant_run(void *dummy) 6957874f606SScott Long { 696bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi_giant); 6977874f606SScott Long } 6987874f606SScott Long 6995a6f0eeeSAdrian Chadd static int 7005a6f0eeeSAdrian Chadd _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 7015fdc2c04SAndriy Gapon cpuset_t *mask, struct proc *p, const char *name, va_list ap) 7020f92108dSScott Long { 703bfa102caSAdrian Chadd char ktname[MAXCOMLEN + 1]; 70475b773aeSSam Leffler struct thread *td; 705175611b6SSam Leffler struct taskqueue *tq; 70600537061SSam Leffler int i, error; 7070f92108dSScott Long 7080f92108dSScott Long if (count <= 0) 7090f92108dSScott Long return (EINVAL); 710175611b6SSam Leffler 711bfa102caSAdrian Chadd vsnprintf(ktname, sizeof(ktname), name, ap); 7120f92108dSScott Long tq = *tqp; 7130f92108dSScott Long 714ac2fffa4SPedro F. Giffuni tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 715ac2fffa4SPedro F. Giffuni M_NOWAIT | M_ZERO); 716175611b6SSam Leffler if (tq->tq_threads == NULL) { 71700537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 71800537061SSam Leffler return (ENOMEM); 71900537061SSam Leffler } 72000537061SSam Leffler 7210f92108dSScott Long for (i = 0; i < count; i++) { 7220f92108dSScott Long if (count == 1) 7235fdc2c04SAndriy Gapon error = kthread_add(taskqueue_thread_loop, tqp, p, 7241bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 7250f92108dSScott Long else 7265fdc2c04SAndriy Gapon error = kthread_add(taskqueue_thread_loop, tqp, p, 727175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 728175611b6SSam Leffler "%s_%d", ktname, i); 72975b773aeSSam Leffler if (error) { 73000537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 731175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 732175611b6SSam Leffler ktname, error); 733175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 73475b773aeSSam Leffler } else 735175611b6SSam Leffler tq->tq_tcount++; 73600537061SSam Leffler } 737da2ded65SPatrick Kelsey if (tq->tq_tcount == 0) { 738da2ded65SPatrick Kelsey free(tq->tq_threads, M_TASKQUEUE); 739da2ded65SPatrick Kelsey tq->tq_threads = NULL; 740da2ded65SPatrick Kelsey return (ENOMEM); 741da2ded65SPatrick Kelsey } 74275b773aeSSam Leffler for (i = 0; i < count; i++) { 743175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 74475b773aeSSam Leffler continue; 745175611b6SSam Leffler td = tq->tq_threads[i]; 7465a6f0eeeSAdrian Chadd if (mask) { 7473e400979SAndrey V. Elsukov error = cpuset_setthread(td->td_tid, mask); 7485a6f0eeeSAdrian Chadd /* 7495a6f0eeeSAdrian Chadd * Failing to pin is rarely an actual fatal error; 7505a6f0eeeSAdrian Chadd * it'll just affect performance. 7515a6f0eeeSAdrian Chadd */ 7525a6f0eeeSAdrian Chadd if (error) 7535a6f0eeeSAdrian Chadd printf("%s: curthread=%llu: can't pin; " 7545a6f0eeeSAdrian Chadd "error=%d\n", 7555a6f0eeeSAdrian Chadd __func__, 7565a6f0eeeSAdrian Chadd (unsigned long long) td->td_tid, 7575a6f0eeeSAdrian Chadd error); 7585a6f0eeeSAdrian Chadd } 759982d11f8SJeff Roberson thread_lock(td); 76075b773aeSSam Leffler sched_prio(td, pri); 761f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 7620f92108dSScott Long } 7630f92108dSScott Long 7640f92108dSScott Long return (0); 7650f92108dSScott Long } 7660f92108dSScott Long 7675a6f0eeeSAdrian Chadd int 7685a6f0eeeSAdrian Chadd taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 7695a6f0eeeSAdrian Chadd const char *name, ...) 7705a6f0eeeSAdrian Chadd { 7715a6f0eeeSAdrian Chadd va_list ap; 772bfa102caSAdrian Chadd int error; 7735a6f0eeeSAdrian Chadd 7745a6f0eeeSAdrian Chadd va_start(ap, name); 7755fdc2c04SAndriy Gapon error = _taskqueue_start_threads(tqp, count, pri, NULL, NULL, name, ap); 7765fdc2c04SAndriy Gapon va_end(ap); 7775fdc2c04SAndriy Gapon return (error); 7785fdc2c04SAndriy Gapon } 7795fdc2c04SAndriy Gapon 7805fdc2c04SAndriy Gapon int 7815fdc2c04SAndriy Gapon taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri, 7825fdc2c04SAndriy Gapon struct proc *proc, const char *name, ...) 7835fdc2c04SAndriy Gapon { 7845fdc2c04SAndriy Gapon va_list ap; 7855fdc2c04SAndriy Gapon int error; 7865fdc2c04SAndriy Gapon 7875fdc2c04SAndriy Gapon va_start(ap, name); 7885fdc2c04SAndriy Gapon error = _taskqueue_start_threads(tqp, count, pri, NULL, proc, name, ap); 7895a6f0eeeSAdrian Chadd va_end(ap); 790bfa102caSAdrian Chadd return (error); 791bfa102caSAdrian Chadd } 7925a6f0eeeSAdrian Chadd 793bfa102caSAdrian Chadd int 794bfa102caSAdrian Chadd taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, 795bfa102caSAdrian Chadd cpuset_t *mask, const char *name, ...) 796bfa102caSAdrian Chadd { 797bfa102caSAdrian Chadd va_list ap; 798bfa102caSAdrian Chadd int error; 799bfa102caSAdrian Chadd 800bfa102caSAdrian Chadd va_start(ap, name); 8015fdc2c04SAndriy Gapon error = _taskqueue_start_threads(tqp, count, pri, mask, NULL, name, ap); 802bfa102caSAdrian Chadd va_end(ap); 803bfa102caSAdrian Chadd return (error); 8045a6f0eeeSAdrian Chadd } 8055a6f0eeeSAdrian Chadd 806fdbc7174SWill Andrews static inline void 807fdbc7174SWill Andrews taskqueue_run_callback(struct taskqueue *tq, 808fdbc7174SWill Andrews enum taskqueue_callback_type cb_type) 809fdbc7174SWill Andrews { 810fdbc7174SWill Andrews taskqueue_callback_fn tq_callback; 811fdbc7174SWill Andrews 812fdbc7174SWill Andrews TQ_ASSERT_UNLOCKED(tq); 813fdbc7174SWill Andrews tq_callback = tq->tq_callbacks[cb_type]; 814fdbc7174SWill Andrews if (tq_callback != NULL) 815fdbc7174SWill Andrews tq_callback(tq->tq_cb_contexts[cb_type]); 816fdbc7174SWill Andrews } 817fdbc7174SWill Andrews 818227559d1SJohn-Mark Gurney void 819227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 820cb32189eSKenneth D. Merry { 821227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 822bd83e879SJohn Baldwin 823227559d1SJohn-Mark Gurney tqp = arg; 824227559d1SJohn-Mark Gurney tq = *tqp; 825fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); 8269df1a6ddSScott Long TQ_LOCK(tq); 82724ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 8284c7070dbSScott Long /* XXX ? */ 829bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 8306a3b2893SPawel Jakub Dawidek /* 8316a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 8326a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 8336a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 8346a3b2893SPawel Jakub Dawidek */ 8356a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 8366a3b2893SPawel Jakub Dawidek break; 8373db35ffaSAlexander Motin TQ_SLEEP(tq, tq, "-"); 838a1797ef6SAndrew Thompson } 839bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 840fdbc7174SWill Andrews /* 841fdbc7174SWill Andrews * This thread is on its way out, so just drop the lock temporarily 842fdbc7174SWill Andrews * in order to call the shutdown callback. This allows the callback 843fdbc7174SWill Andrews * to look at the taskqueue, even just before it dies. 844fdbc7174SWill Andrews */ 845fdbc7174SWill Andrews TQ_UNLOCK(tq); 846fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); 847fdbc7174SWill Andrews TQ_LOCK(tq); 848fdbc7174SWill Andrews 84952bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 850175611b6SSam Leffler tq->tq_tcount--; 851175611b6SSam Leffler wakeup_one(tq->tq_threads); 8529df1a6ddSScott Long TQ_UNLOCK(tq); 85303c7442dSJohn Baldwin kthread_exit(); 854cb32189eSKenneth D. Merry } 855cb32189eSKenneth D. Merry 856227559d1SJohn-Mark Gurney void 857cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 858cb32189eSKenneth D. Merry { 859227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 860bd83e879SJohn Baldwin 861227559d1SJohn-Mark Gurney tqp = context; 862227559d1SJohn-Mark Gurney tq = *tqp; 863f91aa773SAlexander Motin wakeup_any(tq); 864cb32189eSKenneth D. Merry } 865cb32189eSKenneth D. Merry 866d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 8677874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 8687874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 8697874f606SScott Long 870d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 8716caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 8727874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 873cb32189eSKenneth D. Merry 874227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 875f82c9e70SSam Leffler 8769df1a6ddSScott Long struct taskqueue * 8779df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 8780f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 8799df1a6ddSScott Long { 8800f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 8819df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 8829df1a6ddSScott Long } 8839df1a6ddSScott Long 884f82c9e70SSam Leffler static void *taskqueue_fast_ih; 885f82c9e70SSam Leffler 886f82c9e70SSam Leffler static void 8879df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 888f82c9e70SSam Leffler { 889f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 890f82c9e70SSam Leffler } 891f82c9e70SSam Leffler 892f82c9e70SSam Leffler static void 893f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 894f82c9e70SSam Leffler { 895bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_fast); 896f82c9e70SSam Leffler } 897f82c9e70SSam Leffler 898d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 89910f0ab39SJohn Baldwin swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, 9009df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 901159ef108SPawel Jakub Dawidek 902159ef108SPawel Jakub Dawidek int 903159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 904159ef108SPawel Jakub Dawidek { 905159ef108SPawel Jakub Dawidek int i, j, ret = 0; 906159ef108SPawel Jakub Dawidek 907159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 908159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 909159ef108SPawel Jakub Dawidek continue; 910159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 911159ef108SPawel Jakub Dawidek ret = 1; 912159ef108SPawel Jakub Dawidek break; 913159ef108SPawel Jakub Dawidek } 914159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 915159ef108SPawel Jakub Dawidek break; 916159ef108SPawel Jakub Dawidek } 917159ef108SPawel Jakub Dawidek return (ret); 918159ef108SPawel Jakub Dawidek } 919