1ca2e0534SDoug Rabson /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4ca2e0534SDoug Rabson * Copyright (c) 2000 Doug Rabson 5ca2e0534SDoug Rabson * All rights reserved. 6ca2e0534SDoug Rabson * 7ca2e0534SDoug Rabson * Redistribution and use in source and binary forms, with or without 8ca2e0534SDoug Rabson * modification, are permitted provided that the following conditions 9ca2e0534SDoug Rabson * are met: 10ca2e0534SDoug Rabson * 1. Redistributions of source code must retain the above copyright 11ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer. 12ca2e0534SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 13ca2e0534SDoug Rabson * notice, this list of conditions and the following disclaimer in the 14ca2e0534SDoug Rabson * documentation and/or other materials provided with the distribution. 15ca2e0534SDoug Rabson * 16ca2e0534SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17ca2e0534SDoug Rabson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18ca2e0534SDoug Rabson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19ca2e0534SDoug Rabson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20ca2e0534SDoug Rabson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21ca2e0534SDoug Rabson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22ca2e0534SDoug Rabson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23ca2e0534SDoug Rabson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24ca2e0534SDoug Rabson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25ca2e0534SDoug Rabson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26ca2e0534SDoug Rabson * SUCH DAMAGE. 27ca2e0534SDoug Rabson */ 28ca2e0534SDoug Rabson 29677b542eSDavid E. O'Brien #include <sys/cdefs.h> 30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 31677b542eSDavid E. O'Brien 32ca2e0534SDoug Rabson #include <sys/param.h> 33ca2e0534SDoug Rabson #include <sys/systm.h> 341de1c550SJohn Baldwin #include <sys/bus.h> 355a6f0eeeSAdrian Chadd #include <sys/cpuset.h> 36282873e2SJohn Baldwin #include <sys/interrupt.h> 37ca2e0534SDoug Rabson #include <sys/kernel.h> 38eb5b0e05SJohn Baldwin #include <sys/kthread.h> 394c7070dbSScott Long #include <sys/libkern.h> 40d2849f27SAdrian Chadd #include <sys/limits.h> 411de1c550SJohn Baldwin #include <sys/lock.h> 42ca2e0534SDoug Rabson #include <sys/malloc.h> 431de1c550SJohn Baldwin #include <sys/mutex.h> 4452bc746aSSam Leffler #include <sys/proc.h> 454426b2e6SGleb Smirnoff #include <sys/epoch.h> 460f92108dSScott Long #include <sys/sched.h> 474c7070dbSScott Long #include <sys/smp.h> 481de1c550SJohn Baldwin #include <sys/taskqueue.h> 49cb32189eSKenneth D. Merry #include <sys/unistd.h> 500f92108dSScott Long #include <machine/stdarg.h> 51ca2e0534SDoug Rabson 52959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 537874f606SScott Long static void *taskqueue_giant_ih; 54eb5b0e05SJohn Baldwin static void *taskqueue_ih; 556d545f4cSAlexander Motin static void taskqueue_fast_enqueue(void *); 566d545f4cSAlexander Motin static void taskqueue_swi_enqueue(void *); 576d545f4cSAlexander Motin static void taskqueue_swi_giant_enqueue(void *); 588088699fSJohn Baldwin 59bf73d4d2SMatthew D Fleming struct taskqueue_busy { 60bf73d4d2SMatthew D Fleming struct task *tb_running; 613db35ffaSAlexander Motin u_int tb_seq; 623db35ffaSAlexander Motin LIST_ENTRY(taskqueue_busy) tb_link; 63bf73d4d2SMatthew D Fleming }; 64bf73d4d2SMatthew D Fleming 65ca2e0534SDoug Rabson struct taskqueue { 66ca2e0534SDoug Rabson STAILQ_HEAD(, task) tq_queue; 673db35ffaSAlexander Motin LIST_HEAD(, taskqueue_busy) tq_active; 683db35ffaSAlexander Motin struct task *tq_hint; 693db35ffaSAlexander Motin u_int tq_seq; 703db35ffaSAlexander Motin int tq_callouts; 713db35ffaSAlexander Motin struct mtx_padalign tq_mutex; 72ca2e0534SDoug Rabson taskqueue_enqueue_fn tq_enqueue; 73ca2e0534SDoug Rabson void *tq_context; 744c7070dbSScott Long char *tq_name; 75175611b6SSam Leffler struct thread **tq_threads; 76175611b6SSam Leffler int tq_tcount; 77694382c8SKip Macy int tq_spin; 780f92108dSScott Long int tq_flags; 79fdbc7174SWill Andrews taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; 80fdbc7174SWill Andrews void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; 81ca2e0534SDoug Rabson }; 82ca2e0534SDoug Rabson 830f92108dSScott Long #define TQ_FLAGS_ACTIVE (1 << 0) 84478cfc73SScott Long #define TQ_FLAGS_BLOCKED (1 << 1) 856d545f4cSAlexander Motin #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) 860f92108dSScott Long 87b2ad91f2SKonstantin Belousov #define DT_CALLOUT_ARMED (1 << 0) 8899eca1b2SHans Petter Selasky #define DT_DRAIN_IN_PROGRESS (1 << 1) 89b2ad91f2SKonstantin Belousov 90b79b28b6SJuli Mallett #define TQ_LOCK(tq) \ 91b79b28b6SJuli Mallett do { \ 92b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 93b79b28b6SJuli Mallett mtx_lock_spin(&(tq)->tq_mutex); \ 94b79b28b6SJuli Mallett else \ 95b79b28b6SJuli Mallett mtx_lock(&(tq)->tq_mutex); \ 96b79b28b6SJuli Mallett } while (0) 97fdbc7174SWill Andrews #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) 989df1a6ddSScott Long 99b79b28b6SJuli Mallett #define TQ_UNLOCK(tq) \ 100b79b28b6SJuli Mallett do { \ 101b79b28b6SJuli Mallett if ((tq)->tq_spin) \ 102b79b28b6SJuli Mallett mtx_unlock_spin(&(tq)->tq_mutex); \ 103b79b28b6SJuli Mallett else \ 104b79b28b6SJuli Mallett mtx_unlock(&(tq)->tq_mutex); \ 105b79b28b6SJuli Mallett } while (0) 106fdbc7174SWill Andrews #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) 1079df1a6ddSScott Long 108b2ad91f2SKonstantin Belousov void 109b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 110b2ad91f2SKonstantin Belousov int priority, task_fn_t func, void *context) 111b2ad91f2SKonstantin Belousov { 112b2ad91f2SKonstantin Belousov 113b2ad91f2SKonstantin Belousov TASK_INIT(&timeout_task->t, priority, func, context); 1146d545f4cSAlexander Motin callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 1156d545f4cSAlexander Motin CALLOUT_RETURNUNLOCKED); 116b2ad91f2SKonstantin Belousov timeout_task->q = queue; 117b2ad91f2SKonstantin Belousov timeout_task->f = 0; 118b2ad91f2SKonstantin Belousov } 119b2ad91f2SKonstantin Belousov 1209df1a6ddSScott Long static __inline int 1213db35ffaSAlexander Motin TQ_SLEEP(struct taskqueue *tq, void *p, const char *wm) 1229df1a6ddSScott Long { 123694382c8SKip Macy if (tq->tq_spin) 1243db35ffaSAlexander Motin return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0)); 1253db35ffaSAlexander Motin return (msleep(p, &tq->tq_mutex, 0, wm, 0)); 1269df1a6ddSScott Long } 1279df1a6ddSScott Long 1289df1a6ddSScott Long static struct taskqueue * 1294c7070dbSScott Long _taskqueue_create(const char *name, int mflags, 13052bc746aSSam Leffler taskqueue_enqueue_fn enqueue, void *context, 1314c7070dbSScott Long int mtxflags, const char *mtxname __unused) 132ca2e0534SDoug Rabson { 133ca2e0534SDoug Rabson struct taskqueue *queue; 1347e52504fSScott Long char *tq_name; 1354c7070dbSScott Long 1367e52504fSScott Long tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO); 137f3c8e16eSMateusz Guzik if (tq_name == NULL) 1387e52504fSScott Long return (NULL); 1397e52504fSScott Long 1401de1c550SJohn Baldwin queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 141f3c8e16eSMateusz Guzik if (queue == NULL) { 142f3c8e16eSMateusz Guzik free(tq_name, M_TASKQUEUE); 1437e52504fSScott Long return (NULL); 144f3c8e16eSMateusz Guzik } 145f3c8e16eSMateusz Guzik 146f3c8e16eSMateusz Guzik snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); 147694382c8SKip Macy 148ca2e0534SDoug Rabson STAILQ_INIT(&queue->tq_queue); 1493db35ffaSAlexander Motin LIST_INIT(&queue->tq_active); 150ca2e0534SDoug Rabson queue->tq_enqueue = enqueue; 151ca2e0534SDoug Rabson queue->tq_context = context; 1524c7070dbSScott Long queue->tq_name = tq_name; 153694382c8SKip Macy queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 154694382c8SKip Macy queue->tq_flags |= TQ_FLAGS_ACTIVE; 1556d545f4cSAlexander Motin if (enqueue == taskqueue_fast_enqueue || 1566d545f4cSAlexander Motin enqueue == taskqueue_swi_enqueue || 1576d545f4cSAlexander Motin enqueue == taskqueue_swi_giant_enqueue || 1586d545f4cSAlexander Motin enqueue == taskqueue_thread_enqueue) 1596d545f4cSAlexander Motin queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; 1604c7070dbSScott Long mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); 161ca2e0534SDoug Rabson 1627e52504fSScott Long return (queue); 163ca2e0534SDoug Rabson } 164ca2e0534SDoug Rabson 1659df1a6ddSScott Long struct taskqueue * 1669df1a6ddSScott Long taskqueue_create(const char *name, int mflags, 1670f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 1689df1a6ddSScott Long { 1694c7070dbSScott Long 1700f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 1714c7070dbSScott Long MTX_DEF, name); 1729df1a6ddSScott Long } 1739df1a6ddSScott Long 174fdbc7174SWill Andrews void 175fdbc7174SWill Andrews taskqueue_set_callback(struct taskqueue *queue, 176fdbc7174SWill Andrews enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, 177fdbc7174SWill Andrews void *context) 178fdbc7174SWill Andrews { 179fdbc7174SWill Andrews 180fdbc7174SWill Andrews KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && 181fdbc7174SWill Andrews (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), 182fdbc7174SWill Andrews ("Callback type %d not valid, must be %d-%d", cb_type, 183fdbc7174SWill Andrews TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); 184fdbc7174SWill Andrews KASSERT((queue->tq_callbacks[cb_type] == NULL), 185fdbc7174SWill Andrews ("Re-initialization of taskqueue callback?")); 186fdbc7174SWill Andrews 187fdbc7174SWill Andrews queue->tq_callbacks[cb_type] = callback; 188fdbc7174SWill Andrews queue->tq_cb_contexts[cb_type] = context; 189fdbc7174SWill Andrews } 190fdbc7174SWill Andrews 19152bc746aSSam Leffler /* 19252bc746aSSam Leffler * Signal a taskqueue thread to terminate. 19352bc746aSSam Leffler */ 19452bc746aSSam Leffler static void 195175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 19652bc746aSSam Leffler { 19752bc746aSSam Leffler 198b2ad91f2SKonstantin Belousov while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 1990f92108dSScott Long wakeup(tq); 2003db35ffaSAlexander Motin TQ_SLEEP(tq, pp, "tq_destroy"); 20152bc746aSSam Leffler } 20252bc746aSSam Leffler } 20352bc746aSSam Leffler 204ca2e0534SDoug Rabson void 205ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue) 206ca2e0534SDoug Rabson { 2071de1c550SJohn Baldwin 2089df1a6ddSScott Long TQ_LOCK(queue); 2090f92108dSScott Long queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 210175611b6SSam Leffler taskqueue_terminate(queue->tq_threads, queue); 2113db35ffaSAlexander Motin KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?")); 212b2ad91f2SKonstantin Belousov KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 2131de1c550SJohn Baldwin mtx_destroy(&queue->tq_mutex); 214175611b6SSam Leffler free(queue->tq_threads, M_TASKQUEUE); 2154c7070dbSScott Long free(queue->tq_name, M_TASKQUEUE); 216ca2e0534SDoug Rabson free(queue, M_TASKQUEUE); 217ca2e0534SDoug Rabson } 218ca2e0534SDoug Rabson 219b2ad91f2SKonstantin Belousov static int 220b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 221ca2e0534SDoug Rabson { 222ca2e0534SDoug Rabson struct task *ins; 223ca2e0534SDoug Rabson struct task *prev; 224ca2e0534SDoug Rabson 2254c7070dbSScott Long KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func")); 226ca2e0534SDoug Rabson /* 227ca2e0534SDoug Rabson * Count multiple enqueues. 228ca2e0534SDoug Rabson */ 229694382c8SKip Macy if (task->ta_pending) { 2307107bed0SAndriy Gapon if (task->ta_pending < USHRT_MAX) 231ca2e0534SDoug Rabson task->ta_pending++; 2326d545f4cSAlexander Motin TQ_UNLOCK(queue); 233b2ad91f2SKonstantin Belousov return (0); 234ca2e0534SDoug Rabson } 235ca2e0534SDoug Rabson 236ca2e0534SDoug Rabson /* 2373db35ffaSAlexander Motin * Optimise cases when all tasks use small set of priorities. 2383db35ffaSAlexander Motin * In case of only one priority we always insert at the end. 2393db35ffaSAlexander Motin * In case of two tq_hint typically gives the insertion point. 2403db35ffaSAlexander Motin * In case of more then two tq_hint should halve the search. 241ca2e0534SDoug Rabson */ 24251b86781SJeffrey Hsu prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 243ca2e0534SDoug Rabson if (!prev || prev->ta_priority >= task->ta_priority) { 244ca2e0534SDoug Rabson STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 245ca2e0534SDoug Rabson } else { 2463db35ffaSAlexander Motin prev = queue->tq_hint; 2473db35ffaSAlexander Motin if (prev && prev->ta_priority >= task->ta_priority) { 2483db35ffaSAlexander Motin ins = STAILQ_NEXT(prev, ta_link); 2493db35ffaSAlexander Motin } else { 250d710cae7SWarner Losh prev = NULL; 2513db35ffaSAlexander Motin ins = STAILQ_FIRST(&queue->tq_queue); 2523db35ffaSAlexander Motin } 2533db35ffaSAlexander Motin for (; ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 254ca2e0534SDoug Rabson if (ins->ta_priority < task->ta_priority) 255ca2e0534SDoug Rabson break; 256ca2e0534SDoug Rabson 2573db35ffaSAlexander Motin if (prev) { 258ca2e0534SDoug Rabson STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 2593db35ffaSAlexander Motin queue->tq_hint = task; 2603db35ffaSAlexander Motin } else 261ca2e0534SDoug Rabson STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 262ca2e0534SDoug Rabson } 263ca2e0534SDoug Rabson 264ca2e0534SDoug Rabson task->ta_pending = 1; 2656d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) 2666d545f4cSAlexander Motin TQ_UNLOCK(queue); 267694382c8SKip Macy if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 268ca2e0534SDoug Rabson queue->tq_enqueue(queue->tq_context); 2696d545f4cSAlexander Motin if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) 2706d545f4cSAlexander Motin TQ_UNLOCK(queue); 271282873e2SJohn Baldwin 27218093155SAlexander Motin /* Return with lock released. */ 273b2ad91f2SKonstantin Belousov return (0); 274b2ad91f2SKonstantin Belousov } 2755b326a32SJustin T. Gibbs 276b2ad91f2SKonstantin Belousov int 277b2ad91f2SKonstantin Belousov taskqueue_enqueue(struct taskqueue *queue, struct task *task) 278b2ad91f2SKonstantin Belousov { 279b2ad91f2SKonstantin Belousov int res; 280b2ad91f2SKonstantin Belousov 281b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 282b2ad91f2SKonstantin Belousov res = taskqueue_enqueue_locked(queue, task); 28318093155SAlexander Motin /* The lock is released inside. */ 284282873e2SJohn Baldwin 285b2ad91f2SKonstantin Belousov return (res); 286b2ad91f2SKonstantin Belousov } 287b2ad91f2SKonstantin Belousov 288b2ad91f2SKonstantin Belousov static void 289b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg) 290b2ad91f2SKonstantin Belousov { 291b2ad91f2SKonstantin Belousov struct taskqueue *queue; 292b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task; 293b2ad91f2SKonstantin Belousov 294b2ad91f2SKonstantin Belousov timeout_task = arg; 295b2ad91f2SKonstantin Belousov queue = timeout_task->q; 296b2ad91f2SKonstantin Belousov KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 297b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 298b2ad91f2SKonstantin Belousov queue->tq_callouts--; 299b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 30018093155SAlexander Motin /* The lock is released inside. */ 301b2ad91f2SKonstantin Belousov } 302b2ad91f2SKonstantin Belousov 303b2ad91f2SKonstantin Belousov int 304f37b7fc2SIan Lepore taskqueue_enqueue_timeout_sbt(struct taskqueue *queue, 305f37b7fc2SIan Lepore struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags) 306b2ad91f2SKonstantin Belousov { 307b2ad91f2SKonstantin Belousov int res; 308b2ad91f2SKonstantin Belousov 309b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 310b2ad91f2SKonstantin Belousov KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 311b2ad91f2SKonstantin Belousov ("Migrated queue")); 312b2ad91f2SKonstantin Belousov KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); 313b2ad91f2SKonstantin Belousov timeout_task->q = queue; 314b2ad91f2SKonstantin Belousov res = timeout_task->t.ta_pending; 31599eca1b2SHans Petter Selasky if (timeout_task->f & DT_DRAIN_IN_PROGRESS) { 31699eca1b2SHans Petter Selasky /* Do nothing */ 31799eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 31899eca1b2SHans Petter Selasky res = -1; 319f37b7fc2SIan Lepore } else if (sbt == 0) { 320b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(queue, &timeout_task->t); 32118093155SAlexander Motin /* The lock is released inside. */ 322b2ad91f2SKonstantin Belousov } else { 323b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 324b2ad91f2SKonstantin Belousov res++; 325b2ad91f2SKonstantin Belousov } else { 326b2ad91f2SKonstantin Belousov queue->tq_callouts++; 327b2ad91f2SKonstantin Belousov timeout_task->f |= DT_CALLOUT_ARMED; 328f37b7fc2SIan Lepore if (sbt < 0) 329f37b7fc2SIan Lepore sbt = -sbt; /* Ignore overflow. */ 330b2ad91f2SKonstantin Belousov } 331f37b7fc2SIan Lepore if (sbt > 0) { 332f37b7fc2SIan Lepore callout_reset_sbt(&timeout_task->c, sbt, pr, 333f37b7fc2SIan Lepore taskqueue_timeout_func, timeout_task, flags); 334b7c8d2f2SKonstantin Belousov } 335b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 3366d545f4cSAlexander Motin } 337b2ad91f2SKonstantin Belousov return (res); 338ca2e0534SDoug Rabson } 339ca2e0534SDoug Rabson 340f37b7fc2SIan Lepore int 341f37b7fc2SIan Lepore taskqueue_enqueue_timeout(struct taskqueue *queue, 342f37b7fc2SIan Lepore struct timeout_task *ttask, int ticks) 343f37b7fc2SIan Lepore { 344f37b7fc2SIan Lepore 345f37b7fc2SIan Lepore return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt, 346*706b1a57SAlexander Motin 0, C_HARDCLOCK)); 347f37b7fc2SIan Lepore } 348f37b7fc2SIan Lepore 34973f82099SAndriy Gapon static void 3505b326a32SJustin T. Gibbs taskqueue_task_nop_fn(void *context, int pending) 35173f82099SAndriy Gapon { 3525b326a32SJustin T. Gibbs } 35373f82099SAndriy Gapon 3545b326a32SJustin T. Gibbs /* 3555b326a32SJustin T. Gibbs * Block until all currently queued tasks in this taskqueue 3565b326a32SJustin T. Gibbs * have begun execution. Tasks queued during execution of 3575b326a32SJustin T. Gibbs * this function are ignored. 3585b326a32SJustin T. Gibbs */ 359bb58b5d6SMark Johnston static int 3605b326a32SJustin T. Gibbs taskqueue_drain_tq_queue(struct taskqueue *queue) 3615b326a32SJustin T. Gibbs { 3625b326a32SJustin T. Gibbs struct task t_barrier; 3635b326a32SJustin T. Gibbs 3645b326a32SJustin T. Gibbs if (STAILQ_EMPTY(&queue->tq_queue)) 365bb58b5d6SMark Johnston return (0); 3665b326a32SJustin T. Gibbs 3675b326a32SJustin T. Gibbs /* 368eb3d0c5dSXin LI * Enqueue our barrier after all current tasks, but with 369eb3d0c5dSXin LI * the highest priority so that newly queued tasks cannot 370eb3d0c5dSXin LI * pass it. Because of the high priority, we can not use 371eb3d0c5dSXin LI * taskqueue_enqueue_locked directly (which drops the lock 372eb3d0c5dSXin LI * anyway) so just insert it at tail while we have the 373eb3d0c5dSXin LI * queue lock. 3745b326a32SJustin T. Gibbs */ 3754426b2e6SGleb Smirnoff TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier); 376eb3d0c5dSXin LI STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); 3773db35ffaSAlexander Motin queue->tq_hint = &t_barrier; 378eb3d0c5dSXin LI t_barrier.ta_pending = 1; 3795b326a32SJustin T. Gibbs 3805b326a32SJustin T. Gibbs /* 3815b326a32SJustin T. Gibbs * Once the barrier has executed, all previously queued tasks 3825b326a32SJustin T. Gibbs * have completed or are currently executing. 3835b326a32SJustin T. Gibbs */ 3845b326a32SJustin T. Gibbs while (t_barrier.ta_pending != 0) 3853db35ffaSAlexander Motin TQ_SLEEP(queue, &t_barrier, "tq_qdrain"); 386bb58b5d6SMark Johnston return (1); 3875b326a32SJustin T. Gibbs } 3885b326a32SJustin T. Gibbs 3895b326a32SJustin T. Gibbs /* 3905b326a32SJustin T. Gibbs * Block until all currently executing tasks for this taskqueue 3915b326a32SJustin T. Gibbs * complete. Tasks that begin execution during the execution 3925b326a32SJustin T. Gibbs * of this function are ignored. 3935b326a32SJustin T. Gibbs */ 394bb58b5d6SMark Johnston static int 3955b326a32SJustin T. Gibbs taskqueue_drain_tq_active(struct taskqueue *queue) 3965b326a32SJustin T. Gibbs { 3973db35ffaSAlexander Motin struct taskqueue_busy *tb; 3983db35ffaSAlexander Motin u_int seq; 3995b326a32SJustin T. Gibbs 4003db35ffaSAlexander Motin if (LIST_EMPTY(&queue->tq_active)) 401bb58b5d6SMark Johnston return (0); 4025b326a32SJustin T. Gibbs 4035b326a32SJustin T. Gibbs /* Block taskq_terminate().*/ 4045b326a32SJustin T. Gibbs queue->tq_callouts++; 4055b326a32SJustin T. Gibbs 4063db35ffaSAlexander Motin /* Wait for any active task with sequence from the past. */ 4073db35ffaSAlexander Motin seq = queue->tq_seq; 4083db35ffaSAlexander Motin restart: 4093db35ffaSAlexander Motin LIST_FOREACH(tb, &queue->tq_active, tb_link) { 4103db35ffaSAlexander Motin if ((int)(tb->tb_seq - seq) <= 0) { 4113db35ffaSAlexander Motin TQ_SLEEP(queue, tb->tb_running, "tq_adrain"); 4123db35ffaSAlexander Motin goto restart; 4133db35ffaSAlexander Motin } 4143db35ffaSAlexander Motin } 4155b326a32SJustin T. Gibbs 4165b326a32SJustin T. Gibbs /* Release taskqueue_terminate(). */ 4175b326a32SJustin T. Gibbs queue->tq_callouts--; 4185b326a32SJustin T. Gibbs if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) 4195b326a32SJustin T. Gibbs wakeup_one(queue->tq_threads); 420bb58b5d6SMark Johnston return (1); 42173f82099SAndriy Gapon } 42273f82099SAndriy Gapon 423ca2e0534SDoug Rabson void 424478cfc73SScott Long taskqueue_block(struct taskqueue *queue) 425478cfc73SScott Long { 426478cfc73SScott Long 427478cfc73SScott Long TQ_LOCK(queue); 428478cfc73SScott Long queue->tq_flags |= TQ_FLAGS_BLOCKED; 429478cfc73SScott Long TQ_UNLOCK(queue); 430478cfc73SScott Long } 431478cfc73SScott Long 432478cfc73SScott Long void 433478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue) 434478cfc73SScott Long { 435478cfc73SScott Long 436478cfc73SScott Long TQ_LOCK(queue); 437478cfc73SScott Long queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 4381d1e92f1SAlexander Motin if (!STAILQ_EMPTY(&queue->tq_queue)) 439478cfc73SScott Long queue->tq_enqueue(queue->tq_context); 440478cfc73SScott Long TQ_UNLOCK(queue); 441478cfc73SScott Long } 442478cfc73SScott Long 443bf73d4d2SMatthew D Fleming static void 444bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue) 445ca2e0534SDoug Rabson { 4464426b2e6SGleb Smirnoff struct epoch_tracker et; 447bf73d4d2SMatthew D Fleming struct taskqueue_busy tb; 448033459c8SMatthew D Fleming struct task *task; 4494426b2e6SGleb Smirnoff bool in_net_epoch; 450242ed5d9SMatthew D Fleming int pending; 451ca2e0534SDoug Rabson 4524c7070dbSScott Long KASSERT(queue != NULL, ("tq is NULL")); 453fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 454bf73d4d2SMatthew D Fleming tb.tb_running = NULL; 4553db35ffaSAlexander Motin LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link); 4564426b2e6SGleb Smirnoff in_net_epoch = false; 457bf73d4d2SMatthew D Fleming 4583db35ffaSAlexander Motin while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) { 459ca2e0534SDoug Rabson STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 4603db35ffaSAlexander Motin if (queue->tq_hint == task) 4613db35ffaSAlexander Motin queue->tq_hint = NULL; 462ca2e0534SDoug Rabson pending = task->ta_pending; 463ca2e0534SDoug Rabson task->ta_pending = 0; 464bf73d4d2SMatthew D Fleming tb.tb_running = task; 4653db35ffaSAlexander Motin tb.tb_seq = ++queue->tq_seq; 4669df1a6ddSScott Long TQ_UNLOCK(queue); 467ca2e0534SDoug Rabson 4684c7070dbSScott Long KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); 4694426b2e6SGleb Smirnoff if (!in_net_epoch && TASK_IS_NET(task)) { 4704426b2e6SGleb Smirnoff in_net_epoch = true; 4714426b2e6SGleb Smirnoff NET_EPOCH_ENTER(et); 4724426b2e6SGleb Smirnoff } else if (in_net_epoch && !TASK_IS_NET(task)) { 4734426b2e6SGleb Smirnoff NET_EPOCH_EXIT(et); 4744426b2e6SGleb Smirnoff in_net_epoch = false; 4754426b2e6SGleb Smirnoff } 476282873e2SJohn Baldwin task->ta_func(task->ta_context, pending); 477ca2e0534SDoug Rabson 4789df1a6ddSScott Long TQ_LOCK(queue); 47914889b42SWarner Losh wakeup(task); 4805b326a32SJustin T. Gibbs } 4814426b2e6SGleb Smirnoff if (in_net_epoch) 4824426b2e6SGleb Smirnoff NET_EPOCH_EXIT(et); 4833db35ffaSAlexander Motin LIST_REMOVE(&tb, tb_link); 484bf73d4d2SMatthew D Fleming } 485bf73d4d2SMatthew D Fleming 486bf73d4d2SMatthew D Fleming void 487bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue) 488bf73d4d2SMatthew D Fleming { 489bf73d4d2SMatthew D Fleming 490bf73d4d2SMatthew D Fleming TQ_LOCK(queue); 491bf73d4d2SMatthew D Fleming taskqueue_run_locked(queue); 492bf73d4d2SMatthew D Fleming TQ_UNLOCK(queue); 493bf73d4d2SMatthew D Fleming } 494bf73d4d2SMatthew D Fleming 495bf73d4d2SMatthew D Fleming static int 496bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task) 497bf73d4d2SMatthew D Fleming { 498bf73d4d2SMatthew D Fleming struct taskqueue_busy *tb; 499bf73d4d2SMatthew D Fleming 500fdbc7174SWill Andrews TQ_ASSERT_LOCKED(queue); 5013db35ffaSAlexander Motin LIST_FOREACH(tb, &queue->tq_active, tb_link) { 502bf73d4d2SMatthew D Fleming if (tb->tb_running == task) 503bf73d4d2SMatthew D Fleming return (1); 504bf73d4d2SMatthew D Fleming } 505bf73d4d2SMatthew D Fleming return (0); 506ca2e0534SDoug Rabson } 507ca2e0534SDoug Rabson 508403f4a31SHans Petter Selasky /* 509403f4a31SHans Petter Selasky * Only use this function in single threaded contexts. It returns 510403f4a31SHans Petter Selasky * non-zero if the given task is either pending or running. Else the 511403f4a31SHans Petter Selasky * task is idle and can be queued again or freed. 512403f4a31SHans Petter Selasky */ 513403f4a31SHans Petter Selasky int 514403f4a31SHans Petter Selasky taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task) 515403f4a31SHans Petter Selasky { 516403f4a31SHans Petter Selasky int retval; 517403f4a31SHans Petter Selasky 518403f4a31SHans Petter Selasky TQ_LOCK(queue); 519403f4a31SHans Petter Selasky retval = task->ta_pending > 0 || task_is_running(queue, task); 520403f4a31SHans Petter Selasky TQ_UNLOCK(queue); 521403f4a31SHans Petter Selasky 522403f4a31SHans Petter Selasky return (retval); 523403f4a31SHans Petter Selasky } 524403f4a31SHans Petter Selasky 525b2ad91f2SKonstantin Belousov static int 526b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 527b2ad91f2SKonstantin Belousov u_int *pendp) 528b2ad91f2SKonstantin Belousov { 529b2ad91f2SKonstantin Belousov 5303db35ffaSAlexander Motin if (task->ta_pending > 0) { 531b2ad91f2SKonstantin Belousov STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 5323db35ffaSAlexander Motin if (queue->tq_hint == task) 5333db35ffaSAlexander Motin queue->tq_hint = NULL; 5343db35ffaSAlexander Motin } 535b2ad91f2SKonstantin Belousov if (pendp != NULL) 536b2ad91f2SKonstantin Belousov *pendp = task->ta_pending; 537b2ad91f2SKonstantin Belousov task->ta_pending = 0; 538b2ad91f2SKonstantin Belousov return (task_is_running(queue, task) ? EBUSY : 0); 539b2ad91f2SKonstantin Belousov } 540b2ad91f2SKonstantin Belousov 541f46276a9SMatthew D Fleming int 542f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 543f46276a9SMatthew D Fleming { 544f46276a9SMatthew D Fleming int error; 545f46276a9SMatthew D Fleming 546f46276a9SMatthew D Fleming TQ_LOCK(queue); 547b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, task, pendp); 548b2ad91f2SKonstantin Belousov TQ_UNLOCK(queue); 549b2ad91f2SKonstantin Belousov 550b2ad91f2SKonstantin Belousov return (error); 551b2ad91f2SKonstantin Belousov } 552b2ad91f2SKonstantin Belousov 553b2ad91f2SKonstantin Belousov int 554b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue, 555b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task, u_int *pendp) 556b2ad91f2SKonstantin Belousov { 557b2ad91f2SKonstantin Belousov u_int pending, pending1; 558b2ad91f2SKonstantin Belousov int error; 559b2ad91f2SKonstantin Belousov 560b2ad91f2SKonstantin Belousov TQ_LOCK(queue); 5617c4676ddSRandall Stewart pending = !!(callout_stop(&timeout_task->c) > 0); 562b2ad91f2SKonstantin Belousov error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 563b2ad91f2SKonstantin Belousov if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 564b2ad91f2SKonstantin Belousov timeout_task->f &= ~DT_CALLOUT_ARMED; 565b2ad91f2SKonstantin Belousov queue->tq_callouts--; 566b2ad91f2SKonstantin Belousov } 567f46276a9SMatthew D Fleming TQ_UNLOCK(queue); 568f46276a9SMatthew D Fleming 569f46276a9SMatthew D Fleming if (pendp != NULL) 570b2ad91f2SKonstantin Belousov *pendp = pending + pending1; 571f46276a9SMatthew D Fleming return (error); 572f46276a9SMatthew D Fleming } 573f46276a9SMatthew D Fleming 57414889b42SWarner Losh void 57514889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task) 57614889b42SWarner Losh { 5773d336cd0SPawel Jakub Dawidek 5783d336cd0SPawel Jakub Dawidek if (!queue->tq_spin) 5799df1a6ddSScott Long WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 58052bc746aSSam Leffler 5813d336cd0SPawel Jakub Dawidek TQ_LOCK(queue); 582bf73d4d2SMatthew D Fleming while (task->ta_pending != 0 || task_is_running(queue, task)) 5833db35ffaSAlexander Motin TQ_SLEEP(queue, task, "tq_drain"); 5843d336cd0SPawel Jakub Dawidek TQ_UNLOCK(queue); 5859df1a6ddSScott Long } 58614889b42SWarner Losh 587b2ad91f2SKonstantin Belousov void 58873f82099SAndriy Gapon taskqueue_drain_all(struct taskqueue *queue) 58973f82099SAndriy Gapon { 59073f82099SAndriy Gapon 59173f82099SAndriy Gapon if (!queue->tq_spin) 59273f82099SAndriy Gapon WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 59373f82099SAndriy Gapon 59473f82099SAndriy Gapon TQ_LOCK(queue); 595bb58b5d6SMark Johnston (void)taskqueue_drain_tq_queue(queue); 596bb58b5d6SMark Johnston (void)taskqueue_drain_tq_active(queue); 59773f82099SAndriy Gapon TQ_UNLOCK(queue); 59873f82099SAndriy Gapon } 59973f82099SAndriy Gapon 60073f82099SAndriy Gapon void 601b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue, 602b2ad91f2SKonstantin Belousov struct timeout_task *timeout_task) 603b2ad91f2SKonstantin Belousov { 604b2ad91f2SKonstantin Belousov 60599eca1b2SHans Petter Selasky /* 60699eca1b2SHans Petter Selasky * Set flag to prevent timer from re-starting during drain: 60799eca1b2SHans Petter Selasky */ 60899eca1b2SHans Petter Selasky TQ_LOCK(queue); 60999eca1b2SHans Petter Selasky KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0, 61099eca1b2SHans Petter Selasky ("Drain already in progress")); 61199eca1b2SHans Petter Selasky timeout_task->f |= DT_DRAIN_IN_PROGRESS; 61299eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 61399eca1b2SHans Petter Selasky 614b2ad91f2SKonstantin Belousov callout_drain(&timeout_task->c); 615b2ad91f2SKonstantin Belousov taskqueue_drain(queue, &timeout_task->t); 61699eca1b2SHans Petter Selasky 61799eca1b2SHans Petter Selasky /* 61899eca1b2SHans Petter Selasky * Clear flag to allow timer to re-start: 61999eca1b2SHans Petter Selasky */ 62099eca1b2SHans Petter Selasky TQ_LOCK(queue); 62199eca1b2SHans Petter Selasky timeout_task->f &= ~DT_DRAIN_IN_PROGRESS; 62299eca1b2SHans Petter Selasky TQ_UNLOCK(queue); 623b2ad91f2SKonstantin Belousov } 624b2ad91f2SKonstantin Belousov 625bb58b5d6SMark Johnston void 626bb58b5d6SMark Johnston taskqueue_quiesce(struct taskqueue *queue) 627bb58b5d6SMark Johnston { 628bb58b5d6SMark Johnston int ret; 629bb58b5d6SMark Johnston 630bb58b5d6SMark Johnston TQ_LOCK(queue); 631bb58b5d6SMark Johnston do { 632bb58b5d6SMark Johnston ret = taskqueue_drain_tq_queue(queue); 633bb58b5d6SMark Johnston if (ret == 0) 634bb58b5d6SMark Johnston ret = taskqueue_drain_tq_active(queue); 635bb58b5d6SMark Johnston } while (ret != 0); 636bb58b5d6SMark Johnston TQ_UNLOCK(queue); 637bb58b5d6SMark Johnston } 638bb58b5d6SMark Johnston 639ca2e0534SDoug Rabson static void 640ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context) 641ca2e0534SDoug Rabson { 642c86b6ff5SJohn Baldwin swi_sched(taskqueue_ih, 0); 643ca2e0534SDoug Rabson } 644ca2e0534SDoug Rabson 645ca2e0534SDoug Rabson static void 6468088699fSJohn Baldwin taskqueue_swi_run(void *dummy) 647ca2e0534SDoug Rabson { 648bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi); 649ca2e0534SDoug Rabson } 650ca2e0534SDoug Rabson 6517874f606SScott Long static void 6527874f606SScott Long taskqueue_swi_giant_enqueue(void *context) 6537874f606SScott Long { 6547874f606SScott Long swi_sched(taskqueue_giant_ih, 0); 6557874f606SScott Long } 6567874f606SScott Long 6577874f606SScott Long static void 6587874f606SScott Long taskqueue_swi_giant_run(void *dummy) 6597874f606SScott Long { 660bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_swi_giant); 6617874f606SScott Long } 6627874f606SScott Long 6635a6f0eeeSAdrian Chadd static int 6645a6f0eeeSAdrian Chadd _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 6655fdc2c04SAndriy Gapon cpuset_t *mask, struct proc *p, const char *name, va_list ap) 6660f92108dSScott Long { 667bfa102caSAdrian Chadd char ktname[MAXCOMLEN + 1]; 66875b773aeSSam Leffler struct thread *td; 669175611b6SSam Leffler struct taskqueue *tq; 67000537061SSam Leffler int i, error; 6710f92108dSScott Long 6720f92108dSScott Long if (count <= 0) 6730f92108dSScott Long return (EINVAL); 674175611b6SSam Leffler 675bfa102caSAdrian Chadd vsnprintf(ktname, sizeof(ktname), name, ap); 6760f92108dSScott Long tq = *tqp; 6770f92108dSScott Long 678ac2fffa4SPedro F. Giffuni tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 679ac2fffa4SPedro F. Giffuni M_NOWAIT | M_ZERO); 680175611b6SSam Leffler if (tq->tq_threads == NULL) { 68100537061SSam Leffler printf("%s: no memory for %s threads\n", __func__, ktname); 68200537061SSam Leffler return (ENOMEM); 68300537061SSam Leffler } 68400537061SSam Leffler 6850f92108dSScott Long for (i = 0; i < count; i++) { 6860f92108dSScott Long if (count == 1) 6875fdc2c04SAndriy Gapon error = kthread_add(taskqueue_thread_loop, tqp, p, 6881bdfff22SAndriy Gapon &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 6890f92108dSScott Long else 6905fdc2c04SAndriy Gapon error = kthread_add(taskqueue_thread_loop, tqp, p, 691175611b6SSam Leffler &tq->tq_threads[i], RFSTOPPED, 0, 692175611b6SSam Leffler "%s_%d", ktname, i); 69375b773aeSSam Leffler if (error) { 69400537061SSam Leffler /* should be ok to continue, taskqueue_free will dtrt */ 695175611b6SSam Leffler printf("%s: kthread_add(%s): error %d", __func__, 696175611b6SSam Leffler ktname, error); 697175611b6SSam Leffler tq->tq_threads[i] = NULL; /* paranoid */ 69875b773aeSSam Leffler } else 699175611b6SSam Leffler tq->tq_tcount++; 70000537061SSam Leffler } 701da2ded65SPatrick Kelsey if (tq->tq_tcount == 0) { 702da2ded65SPatrick Kelsey free(tq->tq_threads, M_TASKQUEUE); 703da2ded65SPatrick Kelsey tq->tq_threads = NULL; 704da2ded65SPatrick Kelsey return (ENOMEM); 705da2ded65SPatrick Kelsey } 70675b773aeSSam Leffler for (i = 0; i < count; i++) { 707175611b6SSam Leffler if (tq->tq_threads[i] == NULL) 70875b773aeSSam Leffler continue; 709175611b6SSam Leffler td = tq->tq_threads[i]; 7105a6f0eeeSAdrian Chadd if (mask) { 7113e400979SAndrey V. Elsukov error = cpuset_setthread(td->td_tid, mask); 7125a6f0eeeSAdrian Chadd /* 7135a6f0eeeSAdrian Chadd * Failing to pin is rarely an actual fatal error; 7145a6f0eeeSAdrian Chadd * it'll just affect performance. 7155a6f0eeeSAdrian Chadd */ 7165a6f0eeeSAdrian Chadd if (error) 7175a6f0eeeSAdrian Chadd printf("%s: curthread=%llu: can't pin; " 7185a6f0eeeSAdrian Chadd "error=%d\n", 7195a6f0eeeSAdrian Chadd __func__, 7205a6f0eeeSAdrian Chadd (unsigned long long) td->td_tid, 7215a6f0eeeSAdrian Chadd error); 7225a6f0eeeSAdrian Chadd } 723982d11f8SJeff Roberson thread_lock(td); 72475b773aeSSam Leffler sched_prio(td, pri); 725f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 7260f92108dSScott Long } 7270f92108dSScott Long 7280f92108dSScott Long return (0); 7290f92108dSScott Long } 7300f92108dSScott Long 7315a6f0eeeSAdrian Chadd int 7325a6f0eeeSAdrian Chadd taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 7335a6f0eeeSAdrian Chadd const char *name, ...) 7345a6f0eeeSAdrian Chadd { 7355a6f0eeeSAdrian Chadd va_list ap; 736bfa102caSAdrian Chadd int error; 7375a6f0eeeSAdrian Chadd 7385a6f0eeeSAdrian Chadd va_start(ap, name); 7395fdc2c04SAndriy Gapon error = _taskqueue_start_threads(tqp, count, pri, NULL, NULL, name, ap); 7405fdc2c04SAndriy Gapon va_end(ap); 7415fdc2c04SAndriy Gapon return (error); 7425fdc2c04SAndriy Gapon } 7435fdc2c04SAndriy Gapon 7445fdc2c04SAndriy Gapon int 7455fdc2c04SAndriy Gapon taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri, 7465fdc2c04SAndriy Gapon struct proc *proc, const char *name, ...) 7475fdc2c04SAndriy Gapon { 7485fdc2c04SAndriy Gapon va_list ap; 7495fdc2c04SAndriy Gapon int error; 7505fdc2c04SAndriy Gapon 7515fdc2c04SAndriy Gapon va_start(ap, name); 7525fdc2c04SAndriy Gapon error = _taskqueue_start_threads(tqp, count, pri, NULL, proc, name, ap); 7535a6f0eeeSAdrian Chadd va_end(ap); 754bfa102caSAdrian Chadd return (error); 755bfa102caSAdrian Chadd } 7565a6f0eeeSAdrian Chadd 757bfa102caSAdrian Chadd int 758bfa102caSAdrian Chadd taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, 759bfa102caSAdrian Chadd cpuset_t *mask, const char *name, ...) 760bfa102caSAdrian Chadd { 761bfa102caSAdrian Chadd va_list ap; 762bfa102caSAdrian Chadd int error; 763bfa102caSAdrian Chadd 764bfa102caSAdrian Chadd va_start(ap, name); 7655fdc2c04SAndriy Gapon error = _taskqueue_start_threads(tqp, count, pri, mask, NULL, name, ap); 766bfa102caSAdrian Chadd va_end(ap); 767bfa102caSAdrian Chadd return (error); 7685a6f0eeeSAdrian Chadd } 7695a6f0eeeSAdrian Chadd 770fdbc7174SWill Andrews static inline void 771fdbc7174SWill Andrews taskqueue_run_callback(struct taskqueue *tq, 772fdbc7174SWill Andrews enum taskqueue_callback_type cb_type) 773fdbc7174SWill Andrews { 774fdbc7174SWill Andrews taskqueue_callback_fn tq_callback; 775fdbc7174SWill Andrews 776fdbc7174SWill Andrews TQ_ASSERT_UNLOCKED(tq); 777fdbc7174SWill Andrews tq_callback = tq->tq_callbacks[cb_type]; 778fdbc7174SWill Andrews if (tq_callback != NULL) 779fdbc7174SWill Andrews tq_callback(tq->tq_cb_contexts[cb_type]); 780fdbc7174SWill Andrews } 781fdbc7174SWill Andrews 782227559d1SJohn-Mark Gurney void 783227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg) 784cb32189eSKenneth D. Merry { 785227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 786bd83e879SJohn Baldwin 787227559d1SJohn-Mark Gurney tqp = arg; 788227559d1SJohn-Mark Gurney tq = *tqp; 789fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); 7909df1a6ddSScott Long TQ_LOCK(tq); 79124ef0701SAndrew Thompson while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 7924c7070dbSScott Long /* XXX ? */ 793bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 7946a3b2893SPawel Jakub Dawidek /* 7956a3b2893SPawel Jakub Dawidek * Because taskqueue_run() can drop tq_mutex, we need to 7966a3b2893SPawel Jakub Dawidek * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 7976a3b2893SPawel Jakub Dawidek * meantime, which means we missed a wakeup. 7986a3b2893SPawel Jakub Dawidek */ 7996a3b2893SPawel Jakub Dawidek if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 8006a3b2893SPawel Jakub Dawidek break; 8013db35ffaSAlexander Motin TQ_SLEEP(tq, tq, "-"); 802a1797ef6SAndrew Thompson } 803bf73d4d2SMatthew D Fleming taskqueue_run_locked(tq); 804fdbc7174SWill Andrews /* 805fdbc7174SWill Andrews * This thread is on its way out, so just drop the lock temporarily 806fdbc7174SWill Andrews * in order to call the shutdown callback. This allows the callback 807fdbc7174SWill Andrews * to look at the taskqueue, even just before it dies. 808fdbc7174SWill Andrews */ 809fdbc7174SWill Andrews TQ_UNLOCK(tq); 810fdbc7174SWill Andrews taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); 811fdbc7174SWill Andrews TQ_LOCK(tq); 812fdbc7174SWill Andrews 81352bc746aSSam Leffler /* rendezvous with thread that asked us to terminate */ 814175611b6SSam Leffler tq->tq_tcount--; 815175611b6SSam Leffler wakeup_one(tq->tq_threads); 8169df1a6ddSScott Long TQ_UNLOCK(tq); 81703c7442dSJohn Baldwin kthread_exit(); 818cb32189eSKenneth D. Merry } 819cb32189eSKenneth D. Merry 820227559d1SJohn-Mark Gurney void 821cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context) 822cb32189eSKenneth D. Merry { 823227559d1SJohn-Mark Gurney struct taskqueue **tqp, *tq; 824bd83e879SJohn Baldwin 825227559d1SJohn-Mark Gurney tqp = context; 826227559d1SJohn-Mark Gurney tq = *tqp; 827f91aa773SAlexander Motin wakeup_any(tq); 828cb32189eSKenneth D. Merry } 829cb32189eSKenneth D. Merry 830d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 8317874f606SScott Long swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 8327874f606SScott Long INTR_MPSAFE, &taskqueue_ih)); 8337874f606SScott Long 834d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 8356caf758eSJohn Baldwin swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 8367874f606SScott Long NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 837cb32189eSKenneth D. Merry 838227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread); 839f82c9e70SSam Leffler 8409df1a6ddSScott Long struct taskqueue * 8419df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags, 8420f92108dSScott Long taskqueue_enqueue_fn enqueue, void *context) 8439df1a6ddSScott Long { 8440f92108dSScott Long return _taskqueue_create(name, mflags, enqueue, context, 8459df1a6ddSScott Long MTX_SPIN, "fast_taskqueue"); 8469df1a6ddSScott Long } 8479df1a6ddSScott Long 848f82c9e70SSam Leffler static void *taskqueue_fast_ih; 849f82c9e70SSam Leffler 850f82c9e70SSam Leffler static void 8519df1a6ddSScott Long taskqueue_fast_enqueue(void *context) 852f82c9e70SSam Leffler { 853f82c9e70SSam Leffler swi_sched(taskqueue_fast_ih, 0); 854f82c9e70SSam Leffler } 855f82c9e70SSam Leffler 856f82c9e70SSam Leffler static void 857f82c9e70SSam Leffler taskqueue_fast_run(void *dummy) 858f82c9e70SSam Leffler { 859bf73d4d2SMatthew D Fleming taskqueue_run(taskqueue_fast); 860f82c9e70SSam Leffler } 861f82c9e70SSam Leffler 862d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 86310f0ab39SJohn Baldwin swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, 8649df1a6ddSScott Long SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 865159ef108SPawel Jakub Dawidek 866159ef108SPawel Jakub Dawidek int 867159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td) 868159ef108SPawel Jakub Dawidek { 869159ef108SPawel Jakub Dawidek int i, j, ret = 0; 870159ef108SPawel Jakub Dawidek 871159ef108SPawel Jakub Dawidek for (i = 0, j = 0; ; i++) { 872159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == NULL) 873159ef108SPawel Jakub Dawidek continue; 874159ef108SPawel Jakub Dawidek if (queue->tq_threads[i] == td) { 875159ef108SPawel Jakub Dawidek ret = 1; 876159ef108SPawel Jakub Dawidek break; 877159ef108SPawel Jakub Dawidek } 878159ef108SPawel Jakub Dawidek if (++j >= queue->tq_tcount) 879159ef108SPawel Jakub Dawidek break; 880159ef108SPawel Jakub Dawidek } 881159ef108SPawel Jakub Dawidek return (ret); 882159ef108SPawel Jakub Dawidek } 883