xref: /freebsd/sys/kern/subr_taskqueue.c (revision 7e52504fc2d1331cfe8451af5463de32c3fd908c)
1ca2e0534SDoug Rabson /*-
2ca2e0534SDoug Rabson  * Copyright (c) 2000 Doug Rabson
3ca2e0534SDoug Rabson  * All rights reserved.
4ca2e0534SDoug Rabson  *
5ca2e0534SDoug Rabson  * Redistribution and use in source and binary forms, with or without
6ca2e0534SDoug Rabson  * modification, are permitted provided that the following conditions
7ca2e0534SDoug Rabson  * are met:
8ca2e0534SDoug Rabson  * 1. Redistributions of source code must retain the above copyright
9ca2e0534SDoug Rabson  *    notice, this list of conditions and the following disclaimer.
10ca2e0534SDoug Rabson  * 2. Redistributions in binary form must reproduce the above copyright
11ca2e0534SDoug Rabson  *    notice, this list of conditions and the following disclaimer in the
12ca2e0534SDoug Rabson  *    documentation and/or other materials provided with the distribution.
13ca2e0534SDoug Rabson  *
14ca2e0534SDoug Rabson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15ca2e0534SDoug Rabson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16ca2e0534SDoug Rabson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17ca2e0534SDoug Rabson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18ca2e0534SDoug Rabson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19ca2e0534SDoug Rabson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20ca2e0534SDoug Rabson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21ca2e0534SDoug Rabson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22ca2e0534SDoug Rabson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23ca2e0534SDoug Rabson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24ca2e0534SDoug Rabson  * SUCH DAMAGE.
25ca2e0534SDoug Rabson  */
26ca2e0534SDoug Rabson 
27677b542eSDavid E. O'Brien #include <sys/cdefs.h>
28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
29677b542eSDavid E. O'Brien 
30ca2e0534SDoug Rabson #include <sys/param.h>
31ca2e0534SDoug Rabson #include <sys/systm.h>
321de1c550SJohn Baldwin #include <sys/bus.h>
335a6f0eeeSAdrian Chadd #include <sys/cpuset.h>
34282873e2SJohn Baldwin #include <sys/interrupt.h>
35ca2e0534SDoug Rabson #include <sys/kernel.h>
36eb5b0e05SJohn Baldwin #include <sys/kthread.h>
374c7070dbSScott Long #include <sys/libkern.h>
38d2849f27SAdrian Chadd #include <sys/limits.h>
391de1c550SJohn Baldwin #include <sys/lock.h>
40ca2e0534SDoug Rabson #include <sys/malloc.h>
411de1c550SJohn Baldwin #include <sys/mutex.h>
4252bc746aSSam Leffler #include <sys/proc.h>
430f92108dSScott Long #include <sys/sched.h>
444c7070dbSScott Long #include <sys/smp.h>
451de1c550SJohn Baldwin #include <sys/taskqueue.h>
46cb32189eSKenneth D. Merry #include <sys/unistd.h>
470f92108dSScott Long #include <machine/stdarg.h>
48ca2e0534SDoug Rabson 
49959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
507874f606SScott Long static void	*taskqueue_giant_ih;
51eb5b0e05SJohn Baldwin static void	*taskqueue_ih;
526d545f4cSAlexander Motin static void	 taskqueue_fast_enqueue(void *);
536d545f4cSAlexander Motin static void	 taskqueue_swi_enqueue(void *);
546d545f4cSAlexander Motin static void	 taskqueue_swi_giant_enqueue(void *);
558088699fSJohn Baldwin 
56bf73d4d2SMatthew D Fleming struct taskqueue_busy {
57bf73d4d2SMatthew D Fleming 	struct task	*tb_running;
58bf73d4d2SMatthew D Fleming 	TAILQ_ENTRY(taskqueue_busy) tb_link;
59bf73d4d2SMatthew D Fleming };
60bf73d4d2SMatthew D Fleming 
615b326a32SJustin T. Gibbs struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
625b326a32SJustin T. Gibbs 
63ca2e0534SDoug Rabson struct taskqueue {
64ca2e0534SDoug Rabson 	STAILQ_HEAD(, task)	tq_queue;
65ca2e0534SDoug Rabson 	taskqueue_enqueue_fn	tq_enqueue;
66ca2e0534SDoug Rabson 	void			*tq_context;
674c7070dbSScott Long 	char			*tq_name;
68bf73d4d2SMatthew D Fleming 	TAILQ_HEAD(, taskqueue_busy) tq_active;
691de1c550SJohn Baldwin 	struct mtx		tq_mutex;
70175611b6SSam Leffler 	struct thread		**tq_threads;
714c7070dbSScott Long 	struct thread		*tq_curthread;
72175611b6SSam Leffler 	int			tq_tcount;
73694382c8SKip Macy 	int			tq_spin;
740f92108dSScott Long 	int			tq_flags;
75b2ad91f2SKonstantin Belousov 	int			tq_callouts;
76fdbc7174SWill Andrews 	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
77fdbc7174SWill Andrews 	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
78ca2e0534SDoug Rabson };
79ca2e0534SDoug Rabson 
800f92108dSScott Long #define	TQ_FLAGS_ACTIVE		(1 << 0)
81478cfc73SScott Long #define	TQ_FLAGS_BLOCKED	(1 << 1)
826d545f4cSAlexander Motin #define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
830f92108dSScott Long 
84b2ad91f2SKonstantin Belousov #define	DT_CALLOUT_ARMED	(1 << 0)
85b2ad91f2SKonstantin Belousov 
86b79b28b6SJuli Mallett #define	TQ_LOCK(tq)							\
87b79b28b6SJuli Mallett 	do {								\
88b79b28b6SJuli Mallett 		if ((tq)->tq_spin)					\
89b79b28b6SJuli Mallett 			mtx_lock_spin(&(tq)->tq_mutex);			\
90b79b28b6SJuli Mallett 		else							\
91b79b28b6SJuli Mallett 			mtx_lock(&(tq)->tq_mutex);			\
92b79b28b6SJuli Mallett 	} while (0)
93fdbc7174SWill Andrews #define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
949df1a6ddSScott Long 
95b79b28b6SJuli Mallett #define	TQ_UNLOCK(tq)							\
96b79b28b6SJuli Mallett 	do {								\
97b79b28b6SJuli Mallett 		if ((tq)->tq_spin)					\
98b79b28b6SJuli Mallett 			mtx_unlock_spin(&(tq)->tq_mutex);		\
99b79b28b6SJuli Mallett 		else							\
100b79b28b6SJuli Mallett 			mtx_unlock(&(tq)->tq_mutex);			\
101b79b28b6SJuli Mallett 	} while (0)
102fdbc7174SWill Andrews #define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
1039df1a6ddSScott Long 
104b2ad91f2SKonstantin Belousov void
105b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
106b2ad91f2SKonstantin Belousov     int priority, task_fn_t func, void *context)
107b2ad91f2SKonstantin Belousov {
108b2ad91f2SKonstantin Belousov 
109b2ad91f2SKonstantin Belousov 	TASK_INIT(&timeout_task->t, priority, func, context);
1106d545f4cSAlexander Motin 	callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
1116d545f4cSAlexander Motin 	    CALLOUT_RETURNUNLOCKED);
112b2ad91f2SKonstantin Belousov 	timeout_task->q = queue;
113b2ad91f2SKonstantin Belousov 	timeout_task->f = 0;
114b2ad91f2SKonstantin Belousov }
115b2ad91f2SKonstantin Belousov 
1169df1a6ddSScott Long static __inline int
1179df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
1189df1a6ddSScott Long     int t)
1199df1a6ddSScott Long {
120694382c8SKip Macy 	if (tq->tq_spin)
1219df1a6ddSScott Long 		return (msleep_spin(p, m, wm, t));
1229df1a6ddSScott Long 	return (msleep(p, m, pri, wm, t));
1239df1a6ddSScott Long }
1249df1a6ddSScott Long 
1259df1a6ddSScott Long static struct taskqueue *
1264c7070dbSScott Long _taskqueue_create(const char *name, int mflags,
12752bc746aSSam Leffler 		 taskqueue_enqueue_fn enqueue, void *context,
1284c7070dbSScott Long 		 int mtxflags, const char *mtxname __unused)
129ca2e0534SDoug Rabson {
130ca2e0534SDoug Rabson 	struct taskqueue *queue;
131*7e52504fSScott Long 	char *tq_name;
1324c7070dbSScott Long 
133*7e52504fSScott Long 	tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
134*7e52504fSScott Long 	if (!tq_name)
135*7e52504fSScott Long 		return (NULL);
136*7e52504fSScott Long 
137*7e52504fSScott Long 	snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
138ca2e0534SDoug Rabson 
1391de1c550SJohn Baldwin 	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
140ca2e0534SDoug Rabson 	if (!queue)
141*7e52504fSScott Long 		return (NULL);
142694382c8SKip Macy 
143ca2e0534SDoug Rabson 	STAILQ_INIT(&queue->tq_queue);
144bf73d4d2SMatthew D Fleming 	TAILQ_INIT(&queue->tq_active);
145ca2e0534SDoug Rabson 	queue->tq_enqueue = enqueue;
146ca2e0534SDoug Rabson 	queue->tq_context = context;
1474c7070dbSScott Long 	queue->tq_name = tq_name;
148694382c8SKip Macy 	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
149694382c8SKip Macy 	queue->tq_flags |= TQ_FLAGS_ACTIVE;
1506d545f4cSAlexander Motin 	if (enqueue == taskqueue_fast_enqueue ||
1516d545f4cSAlexander Motin 	    enqueue == taskqueue_swi_enqueue ||
1526d545f4cSAlexander Motin 	    enqueue == taskqueue_swi_giant_enqueue ||
1536d545f4cSAlexander Motin 	    enqueue == taskqueue_thread_enqueue)
1546d545f4cSAlexander Motin 		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
1554c7070dbSScott Long 	mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
156ca2e0534SDoug Rabson 
157*7e52504fSScott Long 	return (queue);
158ca2e0534SDoug Rabson }
159ca2e0534SDoug Rabson 
1609df1a6ddSScott Long struct taskqueue *
1619df1a6ddSScott Long taskqueue_create(const char *name, int mflags,
1620f92108dSScott Long 		 taskqueue_enqueue_fn enqueue, void *context)
1639df1a6ddSScott Long {
1644c7070dbSScott Long 
1650f92108dSScott Long 	return _taskqueue_create(name, mflags, enqueue, context,
1664c7070dbSScott Long 			MTX_DEF, name);
1679df1a6ddSScott Long }
1689df1a6ddSScott Long 
169fdbc7174SWill Andrews void
170fdbc7174SWill Andrews taskqueue_set_callback(struct taskqueue *queue,
171fdbc7174SWill Andrews     enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
172fdbc7174SWill Andrews     void *context)
173fdbc7174SWill Andrews {
174fdbc7174SWill Andrews 
175fdbc7174SWill Andrews 	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
176fdbc7174SWill Andrews 	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
177fdbc7174SWill Andrews 	    ("Callback type %d not valid, must be %d-%d", cb_type,
178fdbc7174SWill Andrews 	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
179fdbc7174SWill Andrews 	KASSERT((queue->tq_callbacks[cb_type] == NULL),
180fdbc7174SWill Andrews 	    ("Re-initialization of taskqueue callback?"));
181fdbc7174SWill Andrews 
182fdbc7174SWill Andrews 	queue->tq_callbacks[cb_type] = callback;
183fdbc7174SWill Andrews 	queue->tq_cb_contexts[cb_type] = context;
184fdbc7174SWill Andrews }
185fdbc7174SWill Andrews 
18652bc746aSSam Leffler /*
18752bc746aSSam Leffler  * Signal a taskqueue thread to terminate.
18852bc746aSSam Leffler  */
18952bc746aSSam Leffler static void
190175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
19152bc746aSSam Leffler {
19252bc746aSSam Leffler 
193b2ad91f2SKonstantin Belousov 	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
1940f92108dSScott Long 		wakeup(tq);
1950f92108dSScott Long 		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
19652bc746aSSam Leffler 	}
19752bc746aSSam Leffler }
19852bc746aSSam Leffler 
199ca2e0534SDoug Rabson void
200ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue)
201ca2e0534SDoug Rabson {
2021de1c550SJohn Baldwin 
2039df1a6ddSScott Long 	TQ_LOCK(queue);
2040f92108dSScott Long 	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
205175611b6SSam Leffler 	taskqueue_terminate(queue->tq_threads, queue);
206bf73d4d2SMatthew D Fleming 	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
207b2ad91f2SKonstantin Belousov 	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
2081de1c550SJohn Baldwin 	mtx_destroy(&queue->tq_mutex);
209175611b6SSam Leffler 	free(queue->tq_threads, M_TASKQUEUE);
2104c7070dbSScott Long 	free(queue->tq_name, M_TASKQUEUE);
211ca2e0534SDoug Rabson 	free(queue, M_TASKQUEUE);
212ca2e0534SDoug Rabson }
213ca2e0534SDoug Rabson 
214b2ad91f2SKonstantin Belousov static int
215b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
216ca2e0534SDoug Rabson {
217ca2e0534SDoug Rabson 	struct task *ins;
218ca2e0534SDoug Rabson 	struct task *prev;
219ca2e0534SDoug Rabson 
2204c7070dbSScott Long 	KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
221ca2e0534SDoug Rabson 	/*
222ca2e0534SDoug Rabson 	 * Count multiple enqueues.
223ca2e0534SDoug Rabson 	 */
224694382c8SKip Macy 	if (task->ta_pending) {
2254c7070dbSScott Long 		if (task->ta_pending < UCHAR_MAX)
226ca2e0534SDoug Rabson 			task->ta_pending++;
2276d545f4cSAlexander Motin 		TQ_UNLOCK(queue);
228b2ad91f2SKonstantin Belousov 		return (0);
229ca2e0534SDoug Rabson 	}
230ca2e0534SDoug Rabson 
231ca2e0534SDoug Rabson 	/*
232ca2e0534SDoug Rabson 	 * Optimise the case when all tasks have the same priority.
233ca2e0534SDoug Rabson 	 */
23451b86781SJeffrey Hsu 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
235ca2e0534SDoug Rabson 	if (!prev || prev->ta_priority >= task->ta_priority) {
236ca2e0534SDoug Rabson 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
237ca2e0534SDoug Rabson 	} else {
238d710cae7SWarner Losh 		prev = NULL;
239ca2e0534SDoug Rabson 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
240ca2e0534SDoug Rabson 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
241ca2e0534SDoug Rabson 			if (ins->ta_priority < task->ta_priority)
242ca2e0534SDoug Rabson 				break;
243ca2e0534SDoug Rabson 
244ca2e0534SDoug Rabson 		if (prev)
245ca2e0534SDoug Rabson 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
246ca2e0534SDoug Rabson 		else
247ca2e0534SDoug Rabson 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
248ca2e0534SDoug Rabson 	}
249ca2e0534SDoug Rabson 
250ca2e0534SDoug Rabson 	task->ta_pending = 1;
2516d545f4cSAlexander Motin 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
2526d545f4cSAlexander Motin 		TQ_UNLOCK(queue);
253694382c8SKip Macy 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
254ca2e0534SDoug Rabson 		queue->tq_enqueue(queue->tq_context);
2556d545f4cSAlexander Motin 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
2566d545f4cSAlexander Motin 		TQ_UNLOCK(queue);
257282873e2SJohn Baldwin 
25818093155SAlexander Motin 	/* Return with lock released. */
259b2ad91f2SKonstantin Belousov 	return (0);
260b2ad91f2SKonstantin Belousov }
2615b326a32SJustin T. Gibbs 
262b2ad91f2SKonstantin Belousov int
2634c7070dbSScott Long grouptaskqueue_enqueue(struct taskqueue *queue, struct task *task)
2644c7070dbSScott Long {
2654c7070dbSScott Long 	TQ_LOCK(queue);
2664c7070dbSScott Long 	if (task->ta_pending) {
2674c7070dbSScott Long 		TQ_UNLOCK(queue);
2684c7070dbSScott Long 		return (0);
2694c7070dbSScott Long 	}
2704c7070dbSScott Long 	STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
2714c7070dbSScott Long 	task->ta_pending = 1;
2724c7070dbSScott Long 	TQ_UNLOCK(queue);
2734c7070dbSScott Long 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
2744c7070dbSScott Long 		queue->tq_enqueue(queue->tq_context);
2754c7070dbSScott Long 	return (0);
2764c7070dbSScott Long }
2774c7070dbSScott Long 
2784c7070dbSScott Long int
279b2ad91f2SKonstantin Belousov taskqueue_enqueue(struct taskqueue *queue, struct task *task)
280b2ad91f2SKonstantin Belousov {
281b2ad91f2SKonstantin Belousov 	int res;
282b2ad91f2SKonstantin Belousov 
283b2ad91f2SKonstantin Belousov 	TQ_LOCK(queue);
284b2ad91f2SKonstantin Belousov 	res = taskqueue_enqueue_locked(queue, task);
28518093155SAlexander Motin 	/* The lock is released inside. */
286282873e2SJohn Baldwin 
287b2ad91f2SKonstantin Belousov 	return (res);
288b2ad91f2SKonstantin Belousov }
289b2ad91f2SKonstantin Belousov 
290b2ad91f2SKonstantin Belousov static void
291b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg)
292b2ad91f2SKonstantin Belousov {
293b2ad91f2SKonstantin Belousov 	struct taskqueue *queue;
294b2ad91f2SKonstantin Belousov 	struct timeout_task *timeout_task;
295b2ad91f2SKonstantin Belousov 
296b2ad91f2SKonstantin Belousov 	timeout_task = arg;
297b2ad91f2SKonstantin Belousov 	queue = timeout_task->q;
298b2ad91f2SKonstantin Belousov 	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
299b2ad91f2SKonstantin Belousov 	timeout_task->f &= ~DT_CALLOUT_ARMED;
300b2ad91f2SKonstantin Belousov 	queue->tq_callouts--;
301b2ad91f2SKonstantin Belousov 	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
30218093155SAlexander Motin 	/* The lock is released inside. */
303b2ad91f2SKonstantin Belousov }
304b2ad91f2SKonstantin Belousov 
305b2ad91f2SKonstantin Belousov int
306b2ad91f2SKonstantin Belousov taskqueue_enqueue_timeout(struct taskqueue *queue,
307b2ad91f2SKonstantin Belousov     struct timeout_task *timeout_task, int ticks)
308b2ad91f2SKonstantin Belousov {
309b2ad91f2SKonstantin Belousov 	int res;
310b2ad91f2SKonstantin Belousov 
311b2ad91f2SKonstantin Belousov 	TQ_LOCK(queue);
312b2ad91f2SKonstantin Belousov 	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
313b2ad91f2SKonstantin Belousov 	    ("Migrated queue"));
314b2ad91f2SKonstantin Belousov 	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
315b2ad91f2SKonstantin Belousov 	timeout_task->q = queue;
316b2ad91f2SKonstantin Belousov 	res = timeout_task->t.ta_pending;
317b2ad91f2SKonstantin Belousov 	if (ticks == 0) {
318b2ad91f2SKonstantin Belousov 		taskqueue_enqueue_locked(queue, &timeout_task->t);
31918093155SAlexander Motin 		/* The lock is released inside. */
320b2ad91f2SKonstantin Belousov 	} else {
321b2ad91f2SKonstantin Belousov 		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
322b2ad91f2SKonstantin Belousov 			res++;
323b2ad91f2SKonstantin Belousov 		} else {
324b2ad91f2SKonstantin Belousov 			queue->tq_callouts++;
325b2ad91f2SKonstantin Belousov 			timeout_task->f |= DT_CALLOUT_ARMED;
326b7c8d2f2SKonstantin Belousov 			if (ticks < 0)
327b7c8d2f2SKonstantin Belousov 				ticks = -ticks; /* Ignore overflow. */
328b2ad91f2SKonstantin Belousov 		}
329b7c8d2f2SKonstantin Belousov 		if (ticks > 0) {
330b7c8d2f2SKonstantin Belousov 			callout_reset(&timeout_task->c, ticks,
331b7c8d2f2SKonstantin Belousov 			    taskqueue_timeout_func, timeout_task);
332b7c8d2f2SKonstantin Belousov 		}
333b2ad91f2SKonstantin Belousov 		TQ_UNLOCK(queue);
3346d545f4cSAlexander Motin 	}
335b2ad91f2SKonstantin Belousov 	return (res);
336ca2e0534SDoug Rabson }
337ca2e0534SDoug Rabson 
33873f82099SAndriy Gapon static void
3395b326a32SJustin T. Gibbs taskqueue_task_nop_fn(void *context, int pending)
34073f82099SAndriy Gapon {
3415b326a32SJustin T. Gibbs }
34273f82099SAndriy Gapon 
3435b326a32SJustin T. Gibbs /*
3445b326a32SJustin T. Gibbs  * Block until all currently queued tasks in this taskqueue
3455b326a32SJustin T. Gibbs  * have begun execution.  Tasks queued during execution of
3465b326a32SJustin T. Gibbs  * this function are ignored.
3475b326a32SJustin T. Gibbs  */
3485b326a32SJustin T. Gibbs static void
3495b326a32SJustin T. Gibbs taskqueue_drain_tq_queue(struct taskqueue *queue)
3505b326a32SJustin T. Gibbs {
3515b326a32SJustin T. Gibbs 	struct task t_barrier;
3525b326a32SJustin T. Gibbs 
3535b326a32SJustin T. Gibbs 	if (STAILQ_EMPTY(&queue->tq_queue))
3545b326a32SJustin T. Gibbs 		return;
3555b326a32SJustin T. Gibbs 
3565b326a32SJustin T. Gibbs 	/*
357eb3d0c5dSXin LI 	 * Enqueue our barrier after all current tasks, but with
358eb3d0c5dSXin LI 	 * the highest priority so that newly queued tasks cannot
359eb3d0c5dSXin LI 	 * pass it.  Because of the high priority, we can not use
360eb3d0c5dSXin LI 	 * taskqueue_enqueue_locked directly (which drops the lock
361eb3d0c5dSXin LI 	 * anyway) so just insert it at tail while we have the
362eb3d0c5dSXin LI 	 * queue lock.
3635b326a32SJustin T. Gibbs 	 */
364eb3d0c5dSXin LI 	TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
365eb3d0c5dSXin LI 	STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
366eb3d0c5dSXin LI 	t_barrier.ta_pending = 1;
3675b326a32SJustin T. Gibbs 
3685b326a32SJustin T. Gibbs 	/*
3695b326a32SJustin T. Gibbs 	 * Once the barrier has executed, all previously queued tasks
3705b326a32SJustin T. Gibbs 	 * have completed or are currently executing.
3715b326a32SJustin T. Gibbs 	 */
3725b326a32SJustin T. Gibbs 	while (t_barrier.ta_pending != 0)
3735b326a32SJustin T. Gibbs 		TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
3745b326a32SJustin T. Gibbs }
3755b326a32SJustin T. Gibbs 
3765b326a32SJustin T. Gibbs /*
3775b326a32SJustin T. Gibbs  * Block until all currently executing tasks for this taskqueue
3785b326a32SJustin T. Gibbs  * complete.  Tasks that begin execution during the execution
3795b326a32SJustin T. Gibbs  * of this function are ignored.
3805b326a32SJustin T. Gibbs  */
3815b326a32SJustin T. Gibbs static void
3825b326a32SJustin T. Gibbs taskqueue_drain_tq_active(struct taskqueue *queue)
3835b326a32SJustin T. Gibbs {
3845b326a32SJustin T. Gibbs 	struct taskqueue_busy tb_marker, *tb_first;
3855b326a32SJustin T. Gibbs 
3865b326a32SJustin T. Gibbs 	if (TAILQ_EMPTY(&queue->tq_active))
3875b326a32SJustin T. Gibbs 		return;
3885b326a32SJustin T. Gibbs 
3895b326a32SJustin T. Gibbs 	/* Block taskq_terminate().*/
3905b326a32SJustin T. Gibbs 	queue->tq_callouts++;
3915b326a32SJustin T. Gibbs 
3925b326a32SJustin T. Gibbs 	/*
3935b326a32SJustin T. Gibbs 	 * Wait for all currently executing taskqueue threads
3945b326a32SJustin T. Gibbs 	 * to go idle.
3955b326a32SJustin T. Gibbs 	 */
3965b326a32SJustin T. Gibbs 	tb_marker.tb_running = TB_DRAIN_WAITER;
3975b326a32SJustin T. Gibbs 	TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
3985b326a32SJustin T. Gibbs 	while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
3995b326a32SJustin T. Gibbs 		TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
4005b326a32SJustin T. Gibbs 	TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
4015b326a32SJustin T. Gibbs 
4025b326a32SJustin T. Gibbs 	/*
4035b326a32SJustin T. Gibbs 	 * Wakeup any other drain waiter that happened to queue up
4045b326a32SJustin T. Gibbs 	 * without any intervening active thread.
4055b326a32SJustin T. Gibbs 	 */
4065b326a32SJustin T. Gibbs 	tb_first = TAILQ_FIRST(&queue->tq_active);
4075b326a32SJustin T. Gibbs 	if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
4085b326a32SJustin T. Gibbs 		wakeup(tb_first);
4095b326a32SJustin T. Gibbs 
4105b326a32SJustin T. Gibbs 	/* Release taskqueue_terminate(). */
4115b326a32SJustin T. Gibbs 	queue->tq_callouts--;
4125b326a32SJustin T. Gibbs 	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
4135b326a32SJustin T. Gibbs 		wakeup_one(queue->tq_threads);
41473f82099SAndriy Gapon }
41573f82099SAndriy Gapon 
416ca2e0534SDoug Rabson void
417478cfc73SScott Long taskqueue_block(struct taskqueue *queue)
418478cfc73SScott Long {
419478cfc73SScott Long 
420478cfc73SScott Long 	TQ_LOCK(queue);
421478cfc73SScott Long 	queue->tq_flags |= TQ_FLAGS_BLOCKED;
422478cfc73SScott Long 	TQ_UNLOCK(queue);
423478cfc73SScott Long }
424478cfc73SScott Long 
425478cfc73SScott Long void
426478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue)
427478cfc73SScott Long {
428478cfc73SScott Long 
429478cfc73SScott Long 	TQ_LOCK(queue);
430478cfc73SScott Long 	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
4311d1e92f1SAlexander Motin 	if (!STAILQ_EMPTY(&queue->tq_queue))
432478cfc73SScott Long 		queue->tq_enqueue(queue->tq_context);
433478cfc73SScott Long 	TQ_UNLOCK(queue);
434478cfc73SScott Long }
435478cfc73SScott Long 
436bf73d4d2SMatthew D Fleming static void
437bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue)
438ca2e0534SDoug Rabson {
439bf73d4d2SMatthew D Fleming 	struct taskqueue_busy tb;
4405b326a32SJustin T. Gibbs 	struct taskqueue_busy *tb_first;
441033459c8SMatthew D Fleming 	struct task *task;
442242ed5d9SMatthew D Fleming 	int pending;
443ca2e0534SDoug Rabson 
4444c7070dbSScott Long 	KASSERT(queue != NULL, ("tq is NULL"));
445fdbc7174SWill Andrews 	TQ_ASSERT_LOCKED(queue);
446bf73d4d2SMatthew D Fleming 	tb.tb_running = NULL;
447bf73d4d2SMatthew D Fleming 
448ca2e0534SDoug Rabson 	while (STAILQ_FIRST(&queue->tq_queue)) {
4495b326a32SJustin T. Gibbs 		TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
4505b326a32SJustin T. Gibbs 
451ca2e0534SDoug Rabson 		/*
452ca2e0534SDoug Rabson 		 * Carefully remove the first task from the queue and
453ca2e0534SDoug Rabson 		 * zero its pending count.
454ca2e0534SDoug Rabson 		 */
455ca2e0534SDoug Rabson 		task = STAILQ_FIRST(&queue->tq_queue);
4564c7070dbSScott Long 		KASSERT(task != NULL, ("task is NULL"));
457ca2e0534SDoug Rabson 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
458ca2e0534SDoug Rabson 		pending = task->ta_pending;
459ca2e0534SDoug Rabson 		task->ta_pending = 0;
460bf73d4d2SMatthew D Fleming 		tb.tb_running = task;
4619df1a6ddSScott Long 		TQ_UNLOCK(queue);
462ca2e0534SDoug Rabson 
4634c7070dbSScott Long 		KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
464282873e2SJohn Baldwin 		task->ta_func(task->ta_context, pending);
465ca2e0534SDoug Rabson 
4669df1a6ddSScott Long 		TQ_LOCK(queue);
467bf73d4d2SMatthew D Fleming 		tb.tb_running = NULL;
4684c7070dbSScott Long 		if ((task->ta_flags & TASK_SKIP_WAKEUP) == 0)
46914889b42SWarner Losh 			wakeup(task);
4705b326a32SJustin T. Gibbs 
471bf73d4d2SMatthew D Fleming 		TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
4725b326a32SJustin T. Gibbs 		tb_first = TAILQ_FIRST(&queue->tq_active);
4735b326a32SJustin T. Gibbs 		if (tb_first != NULL &&
4745b326a32SJustin T. Gibbs 		    tb_first->tb_running == TB_DRAIN_WAITER)
4755b326a32SJustin T. Gibbs 			wakeup(tb_first);
4765b326a32SJustin T. Gibbs 	}
477bf73d4d2SMatthew D Fleming }
478bf73d4d2SMatthew D Fleming 
479bf73d4d2SMatthew D Fleming void
480bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue)
481bf73d4d2SMatthew D Fleming {
482bf73d4d2SMatthew D Fleming 
483bf73d4d2SMatthew D Fleming 	TQ_LOCK(queue);
4844c7070dbSScott Long 	queue->tq_curthread = curthread;
485bf73d4d2SMatthew D Fleming 	taskqueue_run_locked(queue);
4864c7070dbSScott Long 	queue->tq_curthread = NULL;
487bf73d4d2SMatthew D Fleming 	TQ_UNLOCK(queue);
488bf73d4d2SMatthew D Fleming }
489bf73d4d2SMatthew D Fleming 
490bf73d4d2SMatthew D Fleming static int
491bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task)
492bf73d4d2SMatthew D Fleming {
493bf73d4d2SMatthew D Fleming 	struct taskqueue_busy *tb;
494bf73d4d2SMatthew D Fleming 
495fdbc7174SWill Andrews 	TQ_ASSERT_LOCKED(queue);
496bf73d4d2SMatthew D Fleming 	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
497bf73d4d2SMatthew D Fleming 		if (tb->tb_running == task)
498bf73d4d2SMatthew D Fleming 			return (1);
499bf73d4d2SMatthew D Fleming 	}
500bf73d4d2SMatthew D Fleming 	return (0);
501ca2e0534SDoug Rabson }
502ca2e0534SDoug Rabson 
503b2ad91f2SKonstantin Belousov static int
504b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
505b2ad91f2SKonstantin Belousov     u_int *pendp)
506b2ad91f2SKonstantin Belousov {
507b2ad91f2SKonstantin Belousov 
508b2ad91f2SKonstantin Belousov 	if (task->ta_pending > 0)
509b2ad91f2SKonstantin Belousov 		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
510b2ad91f2SKonstantin Belousov 	if (pendp != NULL)
511b2ad91f2SKonstantin Belousov 		*pendp = task->ta_pending;
512b2ad91f2SKonstantin Belousov 	task->ta_pending = 0;
513b2ad91f2SKonstantin Belousov 	return (task_is_running(queue, task) ? EBUSY : 0);
514b2ad91f2SKonstantin Belousov }
515b2ad91f2SKonstantin Belousov 
516f46276a9SMatthew D Fleming int
517f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
518f46276a9SMatthew D Fleming {
519f46276a9SMatthew D Fleming 	int error;
520f46276a9SMatthew D Fleming 
521f46276a9SMatthew D Fleming 	TQ_LOCK(queue);
522b2ad91f2SKonstantin Belousov 	error = taskqueue_cancel_locked(queue, task, pendp);
523b2ad91f2SKonstantin Belousov 	TQ_UNLOCK(queue);
524b2ad91f2SKonstantin Belousov 
525b2ad91f2SKonstantin Belousov 	return (error);
526b2ad91f2SKonstantin Belousov }
527b2ad91f2SKonstantin Belousov 
528b2ad91f2SKonstantin Belousov int
529b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue,
530b2ad91f2SKonstantin Belousov     struct timeout_task *timeout_task, u_int *pendp)
531b2ad91f2SKonstantin Belousov {
532b2ad91f2SKonstantin Belousov 	u_int pending, pending1;
533b2ad91f2SKonstantin Belousov 	int error;
534b2ad91f2SKonstantin Belousov 
535b2ad91f2SKonstantin Belousov 	TQ_LOCK(queue);
5367c4676ddSRandall Stewart 	pending = !!(callout_stop(&timeout_task->c) > 0);
537b2ad91f2SKonstantin Belousov 	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
538b2ad91f2SKonstantin Belousov 	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
539b2ad91f2SKonstantin Belousov 		timeout_task->f &= ~DT_CALLOUT_ARMED;
540b2ad91f2SKonstantin Belousov 		queue->tq_callouts--;
541b2ad91f2SKonstantin Belousov 	}
542f46276a9SMatthew D Fleming 	TQ_UNLOCK(queue);
543f46276a9SMatthew D Fleming 
544f46276a9SMatthew D Fleming 	if (pendp != NULL)
545b2ad91f2SKonstantin Belousov 		*pendp = pending + pending1;
546f46276a9SMatthew D Fleming 	return (error);
547f46276a9SMatthew D Fleming }
548f46276a9SMatthew D Fleming 
54914889b42SWarner Losh void
55014889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task)
55114889b42SWarner Losh {
5523d336cd0SPawel Jakub Dawidek 
5533d336cd0SPawel Jakub Dawidek 	if (!queue->tq_spin)
5549df1a6ddSScott Long 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
55552bc746aSSam Leffler 
5563d336cd0SPawel Jakub Dawidek 	TQ_LOCK(queue);
557bf73d4d2SMatthew D Fleming 	while (task->ta_pending != 0 || task_is_running(queue, task))
5583d336cd0SPawel Jakub Dawidek 		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
5593d336cd0SPawel Jakub Dawidek 	TQ_UNLOCK(queue);
5609df1a6ddSScott Long }
56114889b42SWarner Losh 
562b2ad91f2SKonstantin Belousov void
56373f82099SAndriy Gapon taskqueue_drain_all(struct taskqueue *queue)
56473f82099SAndriy Gapon {
56573f82099SAndriy Gapon 
56673f82099SAndriy Gapon 	if (!queue->tq_spin)
56773f82099SAndriy Gapon 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
56873f82099SAndriy Gapon 
56973f82099SAndriy Gapon 	TQ_LOCK(queue);
5705b326a32SJustin T. Gibbs 	taskqueue_drain_tq_queue(queue);
5715b326a32SJustin T. Gibbs 	taskqueue_drain_tq_active(queue);
57273f82099SAndriy Gapon 	TQ_UNLOCK(queue);
57373f82099SAndriy Gapon }
57473f82099SAndriy Gapon 
57573f82099SAndriy Gapon void
576b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue,
577b2ad91f2SKonstantin Belousov     struct timeout_task *timeout_task)
578b2ad91f2SKonstantin Belousov {
579b2ad91f2SKonstantin Belousov 
580b2ad91f2SKonstantin Belousov 	callout_drain(&timeout_task->c);
581b2ad91f2SKonstantin Belousov 	taskqueue_drain(queue, &timeout_task->t);
582b2ad91f2SKonstantin Belousov }
583b2ad91f2SKonstantin Belousov 
584ca2e0534SDoug Rabson static void
585ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context)
586ca2e0534SDoug Rabson {
587c86b6ff5SJohn Baldwin 	swi_sched(taskqueue_ih, 0);
588ca2e0534SDoug Rabson }
589ca2e0534SDoug Rabson 
590ca2e0534SDoug Rabson static void
5918088699fSJohn Baldwin taskqueue_swi_run(void *dummy)
592ca2e0534SDoug Rabson {
593bf73d4d2SMatthew D Fleming 	taskqueue_run(taskqueue_swi);
594ca2e0534SDoug Rabson }
595ca2e0534SDoug Rabson 
5967874f606SScott Long static void
5977874f606SScott Long taskqueue_swi_giant_enqueue(void *context)
5987874f606SScott Long {
5997874f606SScott Long 	swi_sched(taskqueue_giant_ih, 0);
6007874f606SScott Long }
6017874f606SScott Long 
6027874f606SScott Long static void
6037874f606SScott Long taskqueue_swi_giant_run(void *dummy)
6047874f606SScott Long {
605bf73d4d2SMatthew D Fleming 	taskqueue_run(taskqueue_swi_giant);
6067874f606SScott Long }
6077874f606SScott Long 
6085a6f0eeeSAdrian Chadd static int
6095a6f0eeeSAdrian Chadd _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
610bfa102caSAdrian Chadd     cpuset_t *mask, const char *name, va_list ap)
6110f92108dSScott Long {
612bfa102caSAdrian Chadd 	char ktname[MAXCOMLEN + 1];
61375b773aeSSam Leffler 	struct thread *td;
614175611b6SSam Leffler 	struct taskqueue *tq;
61500537061SSam Leffler 	int i, error;
6160f92108dSScott Long 
6170f92108dSScott Long 	if (count <= 0)
6180f92108dSScott Long 		return (EINVAL);
619175611b6SSam Leffler 
620bfa102caSAdrian Chadd 	vsnprintf(ktname, sizeof(ktname), name, ap);
6210f92108dSScott Long 	tq = *tqp;
6220f92108dSScott Long 
623175611b6SSam Leffler 	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
62400537061SSam Leffler 	    M_NOWAIT | M_ZERO);
625175611b6SSam Leffler 	if (tq->tq_threads == NULL) {
62600537061SSam Leffler 		printf("%s: no memory for %s threads\n", __func__, ktname);
62700537061SSam Leffler 		return (ENOMEM);
62800537061SSam Leffler 	}
62900537061SSam Leffler 
6300f92108dSScott Long 	for (i = 0; i < count; i++) {
6310f92108dSScott Long 		if (count == 1)
632175611b6SSam Leffler 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
6331bdfff22SAndriy Gapon 			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
6340f92108dSScott Long 		else
635175611b6SSam Leffler 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
636175611b6SSam Leffler 			    &tq->tq_threads[i], RFSTOPPED, 0,
637175611b6SSam Leffler 			    "%s_%d", ktname, i);
63875b773aeSSam Leffler 		if (error) {
63900537061SSam Leffler 			/* should be ok to continue, taskqueue_free will dtrt */
640175611b6SSam Leffler 			printf("%s: kthread_add(%s): error %d", __func__,
641175611b6SSam Leffler 			    ktname, error);
642175611b6SSam Leffler 			tq->tq_threads[i] = NULL;		/* paranoid */
64375b773aeSSam Leffler 		} else
644175611b6SSam Leffler 			tq->tq_tcount++;
64500537061SSam Leffler 	}
64675b773aeSSam Leffler 	for (i = 0; i < count; i++) {
647175611b6SSam Leffler 		if (tq->tq_threads[i] == NULL)
64875b773aeSSam Leffler 			continue;
649175611b6SSam Leffler 		td = tq->tq_threads[i];
6505a6f0eeeSAdrian Chadd 		if (mask) {
6513e400979SAndrey V. Elsukov 			error = cpuset_setthread(td->td_tid, mask);
6525a6f0eeeSAdrian Chadd 			/*
6535a6f0eeeSAdrian Chadd 			 * Failing to pin is rarely an actual fatal error;
6545a6f0eeeSAdrian Chadd 			 * it'll just affect performance.
6555a6f0eeeSAdrian Chadd 			 */
6565a6f0eeeSAdrian Chadd 			if (error)
6575a6f0eeeSAdrian Chadd 				printf("%s: curthread=%llu: can't pin; "
6585a6f0eeeSAdrian Chadd 				    "error=%d\n",
6595a6f0eeeSAdrian Chadd 				    __func__,
6605a6f0eeeSAdrian Chadd 				    (unsigned long long) td->td_tid,
6615a6f0eeeSAdrian Chadd 				    error);
6625a6f0eeeSAdrian Chadd 		}
663982d11f8SJeff Roberson 		thread_lock(td);
66475b773aeSSam Leffler 		sched_prio(td, pri);
665f0393f06SJeff Roberson 		sched_add(td, SRQ_BORING);
666982d11f8SJeff Roberson 		thread_unlock(td);
6670f92108dSScott Long 	}
6680f92108dSScott Long 
6690f92108dSScott Long 	return (0);
6700f92108dSScott Long }
6710f92108dSScott Long 
6725a6f0eeeSAdrian Chadd int
6735a6f0eeeSAdrian Chadd taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
6745a6f0eeeSAdrian Chadd     const char *name, ...)
6755a6f0eeeSAdrian Chadd {
6765a6f0eeeSAdrian Chadd 	va_list ap;
677bfa102caSAdrian Chadd 	int error;
6785a6f0eeeSAdrian Chadd 
6795a6f0eeeSAdrian Chadd 	va_start(ap, name);
680bfa102caSAdrian Chadd 	error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
6815a6f0eeeSAdrian Chadd 	va_end(ap);
682bfa102caSAdrian Chadd 	return (error);
683bfa102caSAdrian Chadd }
6845a6f0eeeSAdrian Chadd 
685bfa102caSAdrian Chadd int
686bfa102caSAdrian Chadd taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
687bfa102caSAdrian Chadd     cpuset_t *mask, const char *name, ...)
688bfa102caSAdrian Chadd {
689bfa102caSAdrian Chadd 	va_list ap;
690bfa102caSAdrian Chadd 	int error;
691bfa102caSAdrian Chadd 
692bfa102caSAdrian Chadd 	va_start(ap, name);
693bfa102caSAdrian Chadd 	error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
694bfa102caSAdrian Chadd 	va_end(ap);
695bfa102caSAdrian Chadd 	return (error);
6965a6f0eeeSAdrian Chadd }
6975a6f0eeeSAdrian Chadd 
698fdbc7174SWill Andrews static inline void
699fdbc7174SWill Andrews taskqueue_run_callback(struct taskqueue *tq,
700fdbc7174SWill Andrews     enum taskqueue_callback_type cb_type)
701fdbc7174SWill Andrews {
702fdbc7174SWill Andrews 	taskqueue_callback_fn tq_callback;
703fdbc7174SWill Andrews 
704fdbc7174SWill Andrews 	TQ_ASSERT_UNLOCKED(tq);
705fdbc7174SWill Andrews 	tq_callback = tq->tq_callbacks[cb_type];
706fdbc7174SWill Andrews 	if (tq_callback != NULL)
707fdbc7174SWill Andrews 		tq_callback(tq->tq_cb_contexts[cb_type]);
708fdbc7174SWill Andrews }
709fdbc7174SWill Andrews 
710227559d1SJohn-Mark Gurney void
711227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg)
712cb32189eSKenneth D. Merry {
713227559d1SJohn-Mark Gurney 	struct taskqueue **tqp, *tq;
714bd83e879SJohn Baldwin 
715227559d1SJohn-Mark Gurney 	tqp = arg;
716227559d1SJohn-Mark Gurney 	tq = *tqp;
717fdbc7174SWill Andrews 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
7189df1a6ddSScott Long 	TQ_LOCK(tq);
7194c7070dbSScott Long 	tq->tq_curthread = curthread;
72024ef0701SAndrew Thompson 	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
7214c7070dbSScott Long 		/* XXX ? */
722bf73d4d2SMatthew D Fleming 		taskqueue_run_locked(tq);
7236a3b2893SPawel Jakub Dawidek 		/*
7246a3b2893SPawel Jakub Dawidek 		 * Because taskqueue_run() can drop tq_mutex, we need to
7256a3b2893SPawel Jakub Dawidek 		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
7266a3b2893SPawel Jakub Dawidek 		 * meantime, which means we missed a wakeup.
7276a3b2893SPawel Jakub Dawidek 		 */
7286a3b2893SPawel Jakub Dawidek 		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
7296a3b2893SPawel Jakub Dawidek 			break;
7300f180a7cSJohn Baldwin 		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
731a1797ef6SAndrew Thompson 	}
732bf73d4d2SMatthew D Fleming 	taskqueue_run_locked(tq);
7334c7070dbSScott Long 	tq->tq_curthread = NULL;
734fdbc7174SWill Andrews 	/*
735fdbc7174SWill Andrews 	 * This thread is on its way out, so just drop the lock temporarily
736fdbc7174SWill Andrews 	 * in order to call the shutdown callback.  This allows the callback
737fdbc7174SWill Andrews 	 * to look at the taskqueue, even just before it dies.
738fdbc7174SWill Andrews 	 */
739fdbc7174SWill Andrews 	TQ_UNLOCK(tq);
740fdbc7174SWill Andrews 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
741fdbc7174SWill Andrews 	TQ_LOCK(tq);
742fdbc7174SWill Andrews 
74352bc746aSSam Leffler 	/* rendezvous with thread that asked us to terminate */
744175611b6SSam Leffler 	tq->tq_tcount--;
745175611b6SSam Leffler 	wakeup_one(tq->tq_threads);
7469df1a6ddSScott Long 	TQ_UNLOCK(tq);
74703c7442dSJohn Baldwin 	kthread_exit();
748cb32189eSKenneth D. Merry }
749cb32189eSKenneth D. Merry 
750227559d1SJohn-Mark Gurney void
751cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context)
752cb32189eSKenneth D. Merry {
753227559d1SJohn-Mark Gurney 	struct taskqueue **tqp, *tq;
754bd83e879SJohn Baldwin 
755227559d1SJohn-Mark Gurney 	tqp = context;
756227559d1SJohn-Mark Gurney 	tq = *tqp;
7574c7070dbSScott Long 	if (tq->tq_curthread != curthread)
75852bc746aSSam Leffler 		wakeup_one(tq);
759cb32189eSKenneth D. Merry }
760cb32189eSKenneth D. Merry 
761d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
7627874f606SScott Long 		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
7637874f606SScott Long 		     INTR_MPSAFE, &taskqueue_ih));
7647874f606SScott Long 
765d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
7666caf758eSJohn Baldwin 		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
7677874f606SScott Long 		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
768cb32189eSKenneth D. Merry 
769227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread);
770f82c9e70SSam Leffler 
7719df1a6ddSScott Long struct taskqueue *
7729df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags,
7730f92108dSScott Long 		 taskqueue_enqueue_fn enqueue, void *context)
7749df1a6ddSScott Long {
7750f92108dSScott Long 	return _taskqueue_create(name, mflags, enqueue, context,
7769df1a6ddSScott Long 			MTX_SPIN, "fast_taskqueue");
7779df1a6ddSScott Long }
7789df1a6ddSScott Long 
779f82c9e70SSam Leffler static void	*taskqueue_fast_ih;
780f82c9e70SSam Leffler 
781f82c9e70SSam Leffler static void
7829df1a6ddSScott Long taskqueue_fast_enqueue(void *context)
783f82c9e70SSam Leffler {
784f82c9e70SSam Leffler 	swi_sched(taskqueue_fast_ih, 0);
785f82c9e70SSam Leffler }
786f82c9e70SSam Leffler 
787f82c9e70SSam Leffler static void
788f82c9e70SSam Leffler taskqueue_fast_run(void *dummy)
789f82c9e70SSam Leffler {
790bf73d4d2SMatthew D Fleming 	taskqueue_run(taskqueue_fast);
791f82c9e70SSam Leffler }
792f82c9e70SSam Leffler 
793d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
79410f0ab39SJohn Baldwin 	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
7959df1a6ddSScott Long 	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
796159ef108SPawel Jakub Dawidek 
797159ef108SPawel Jakub Dawidek int
798159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td)
799159ef108SPawel Jakub Dawidek {
800159ef108SPawel Jakub Dawidek 	int i, j, ret = 0;
801159ef108SPawel Jakub Dawidek 
802159ef108SPawel Jakub Dawidek 	for (i = 0, j = 0; ; i++) {
803159ef108SPawel Jakub Dawidek 		if (queue->tq_threads[i] == NULL)
804159ef108SPawel Jakub Dawidek 			continue;
805159ef108SPawel Jakub Dawidek 		if (queue->tq_threads[i] == td) {
806159ef108SPawel Jakub Dawidek 			ret = 1;
807159ef108SPawel Jakub Dawidek 			break;
808159ef108SPawel Jakub Dawidek 		}
809159ef108SPawel Jakub Dawidek 		if (++j >= queue->tq_tcount)
810159ef108SPawel Jakub Dawidek 			break;
811159ef108SPawel Jakub Dawidek 	}
812159ef108SPawel Jakub Dawidek 	return (ret);
813159ef108SPawel Jakub Dawidek }
8144c7070dbSScott Long 
8154c7070dbSScott Long struct taskqgroup_cpu {
8164c7070dbSScott Long 	LIST_HEAD(, grouptask)	tgc_tasks;
8174c7070dbSScott Long 	struct taskqueue	*tgc_taskq;
8184c7070dbSScott Long 	int	tgc_cnt;
8194c7070dbSScott Long 	int	tgc_cpu;
8204c7070dbSScott Long };
8214c7070dbSScott Long 
8224c7070dbSScott Long struct taskqgroup {
8234c7070dbSScott Long 	struct taskqgroup_cpu tqg_queue[MAXCPU];
8244c7070dbSScott Long 	struct mtx	tqg_lock;
8254c7070dbSScott Long 	char *		tqg_name;
8264c7070dbSScott Long 	int		tqg_adjusting;
8274c7070dbSScott Long 	int		tqg_stride;
8284c7070dbSScott Long 	int		tqg_cnt;
8294c7070dbSScott Long };
8304c7070dbSScott Long 
8314c7070dbSScott Long struct taskq_bind_task {
8324c7070dbSScott Long 	struct task bt_task;
8334c7070dbSScott Long 	int	bt_cpuid;
8344c7070dbSScott Long };
8354c7070dbSScott Long 
8364c7070dbSScott Long static void
8374c7070dbSScott Long taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
8384c7070dbSScott Long {
8394c7070dbSScott Long 	struct taskqgroup_cpu *qcpu;
8404c7070dbSScott Long 
8414c7070dbSScott Long 	qcpu = &qgroup->tqg_queue[idx];
8424c7070dbSScott Long 	LIST_INIT(&qcpu->tgc_tasks);
8434c7070dbSScott Long 	qcpu->tgc_taskq = taskqueue_create_fast(NULL, M_WAITOK,
8444c7070dbSScott Long 	    taskqueue_thread_enqueue, &qcpu->tgc_taskq);
8454c7070dbSScott Long 	taskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
8464c7070dbSScott Long 	    "%s_%d", qgroup->tqg_name, idx);
8474c7070dbSScott Long 	qcpu->tgc_cpu = idx * qgroup->tqg_stride;
8484c7070dbSScott Long }
8494c7070dbSScott Long 
8504c7070dbSScott Long static void
8514c7070dbSScott Long taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
8524c7070dbSScott Long {
8534c7070dbSScott Long 
8544c7070dbSScott Long 	taskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
8554c7070dbSScott Long }
8564c7070dbSScott Long 
8574c7070dbSScott Long /*
8584c7070dbSScott Long  * Find the taskq with least # of tasks that doesn't currently have any
8594c7070dbSScott Long  * other queues from the uniq identifier.
8604c7070dbSScott Long  */
8614c7070dbSScott Long static int
8624c7070dbSScott Long taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
8634c7070dbSScott Long {
8644c7070dbSScott Long 	struct grouptask *n;
8654c7070dbSScott Long 	int i, idx, mincnt;
8664c7070dbSScott Long 	int strict;
8674c7070dbSScott Long 
8684c7070dbSScott Long 	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
8694c7070dbSScott Long 	if (qgroup->tqg_cnt == 0)
8704c7070dbSScott Long 		return (0);
8714c7070dbSScott Long 	idx = -1;
8724c7070dbSScott Long 	mincnt = INT_MAX;
8734c7070dbSScott Long 	/*
8744c7070dbSScott Long 	 * Two passes;  First scan for a queue with the least tasks that
8754c7070dbSScott Long 	 * does not already service this uniq id.  If that fails simply find
8764c7070dbSScott Long 	 * the queue with the least total tasks;
8774c7070dbSScott Long 	 */
8784c7070dbSScott Long 	for (strict = 1; mincnt == INT_MAX; strict = 0) {
8794c7070dbSScott Long 		for (i = 0; i < qgroup->tqg_cnt; i++) {
8804c7070dbSScott Long 			if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
8814c7070dbSScott Long 				continue;
8824c7070dbSScott Long 			if (strict) {
8834c7070dbSScott Long 				LIST_FOREACH(n,
8844c7070dbSScott Long 				    &qgroup->tqg_queue[i].tgc_tasks, gt_list)
8854c7070dbSScott Long 					if (n->gt_uniq == uniq)
8864c7070dbSScott Long 						break;
8874c7070dbSScott Long 				if (n != NULL)
8884c7070dbSScott Long 					continue;
8894c7070dbSScott Long 			}
8904c7070dbSScott Long 			mincnt = qgroup->tqg_queue[i].tgc_cnt;
8914c7070dbSScott Long 			idx = i;
8924c7070dbSScott Long 		}
8934c7070dbSScott Long 	}
8944c7070dbSScott Long 	if (idx == -1)
8954c7070dbSScott Long 		panic("taskqgroup_find: Failed to pick a qid.");
8964c7070dbSScott Long 
8974c7070dbSScott Long 	return (idx);
8984c7070dbSScott Long }
8994c7070dbSScott Long 
9004c7070dbSScott Long void
9014c7070dbSScott Long taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
9024c7070dbSScott Long     void *uniq, int irq, char *name)
9034c7070dbSScott Long {
9044c7070dbSScott Long 	cpuset_t mask;
9054c7070dbSScott Long 	int qid;
9064c7070dbSScott Long 
9074c7070dbSScott Long 	gtask->gt_uniq = uniq;
9084c7070dbSScott Long 	gtask->gt_name = name;
9094c7070dbSScott Long 	gtask->gt_irq = irq;
9104c7070dbSScott Long 	gtask->gt_cpu = -1;
9114c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
9124c7070dbSScott Long 	qid = taskqgroup_find(qgroup, uniq);
9134c7070dbSScott Long 	qgroup->tqg_queue[qid].tgc_cnt++;
9144c7070dbSScott Long 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
9154c7070dbSScott Long 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
9164c7070dbSScott Long 	if (irq != -1 && smp_started) {
9174c7070dbSScott Long 		CPU_ZERO(&mask);
9184c7070dbSScott Long 		CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
9194c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9204c7070dbSScott Long 		intr_setaffinity(irq, &mask);
9214c7070dbSScott Long 	} else
9224c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9234c7070dbSScott Long }
9244c7070dbSScott Long 
9254c7070dbSScott Long int
9264c7070dbSScott Long taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
9274c7070dbSScott Long 	void *uniq, int cpu, int irq, char *name)
9284c7070dbSScott Long {
9294c7070dbSScott Long 	cpuset_t mask;
9304c7070dbSScott Long 	int i, qid;
9314c7070dbSScott Long 
9324c7070dbSScott Long 	qid = -1;
9334c7070dbSScott Long 	gtask->gt_uniq = uniq;
9344c7070dbSScott Long 	gtask->gt_name = name;
9354c7070dbSScott Long 	gtask->gt_irq = irq;
9364c7070dbSScott Long 	gtask->gt_cpu = cpu;
9374c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
9384c7070dbSScott Long 	if (smp_started) {
9394c7070dbSScott Long 		for (i = 0; i < qgroup->tqg_cnt; i++)
9404c7070dbSScott Long 			if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
9414c7070dbSScott Long 				qid = i;
9424c7070dbSScott Long 				break;
9434c7070dbSScott Long 			}
9444c7070dbSScott Long 		if (qid == -1) {
9454c7070dbSScott Long 			mtx_unlock(&qgroup->tqg_lock);
9464c7070dbSScott Long 			return (EINVAL);
9474c7070dbSScott Long 		}
9484c7070dbSScott Long 	} else
9494c7070dbSScott Long 		qid = 0;
9504c7070dbSScott Long 	qgroup->tqg_queue[qid].tgc_cnt++;
9514c7070dbSScott Long 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
9524c7070dbSScott Long 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
9534c7070dbSScott Long 	if (irq != -1 && smp_started) {
9544c7070dbSScott Long 		CPU_ZERO(&mask);
9554c7070dbSScott Long 		CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
9564c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9574c7070dbSScott Long 		intr_setaffinity(irq, &mask);
9584c7070dbSScott Long 	} else
9594c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9604c7070dbSScott Long 	return (0);
9614c7070dbSScott Long }
9624c7070dbSScott Long 
9634c7070dbSScott Long void
9644c7070dbSScott Long taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
9654c7070dbSScott Long {
9664c7070dbSScott Long 	int i;
9674c7070dbSScott Long 
9684c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
9694c7070dbSScott Long 	for (i = 0; i < qgroup->tqg_cnt; i++)
9704c7070dbSScott Long 		if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
9714c7070dbSScott Long 			break;
9724c7070dbSScott Long 	if (i == qgroup->tqg_cnt)
9734c7070dbSScott Long 		panic("taskqgroup_detach: task not in group\n");
9744c7070dbSScott Long 	qgroup->tqg_queue[i].tgc_cnt--;
9754c7070dbSScott Long 	LIST_REMOVE(gtask, gt_list);
9764c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
9774c7070dbSScott Long 	gtask->gt_taskqueue = NULL;
9784c7070dbSScott Long }
9794c7070dbSScott Long 
9804c7070dbSScott Long static void
9814c7070dbSScott Long taskqgroup_binder(void *ctx, int pending)
9824c7070dbSScott Long {
9834c7070dbSScott Long 	struct taskq_bind_task *task = (struct taskq_bind_task *)ctx;
9844c7070dbSScott Long 	cpuset_t mask;
9854c7070dbSScott Long 	int error;
9864c7070dbSScott Long 
9874c7070dbSScott Long 	CPU_ZERO(&mask);
9884c7070dbSScott Long 	CPU_SET(task->bt_cpuid, &mask);
9894c7070dbSScott Long 	error = cpuset_setthread(curthread->td_tid, &mask);
9904c7070dbSScott Long 	thread_lock(curthread);
9914c7070dbSScott Long 	sched_bind(curthread, task->bt_cpuid);
9924c7070dbSScott Long 	thread_unlock(curthread);
9934c7070dbSScott Long 
9944c7070dbSScott Long 	if (error)
9954c7070dbSScott Long 		printf("taskqgroup_binder: setaffinity failed: %d\n",
9964c7070dbSScott Long 		    error);
9974c7070dbSScott Long 	free(task, M_DEVBUF);
9984c7070dbSScott Long }
9994c7070dbSScott Long 
10004c7070dbSScott Long static void
10014c7070dbSScott Long taskqgroup_bind(struct taskqgroup *qgroup)
10024c7070dbSScott Long {
10034c7070dbSScott Long 	struct taskq_bind_task *task;
10044c7070dbSScott Long 	int i;
10054c7070dbSScott Long 
10064c7070dbSScott Long 	/*
10074c7070dbSScott Long 	 * Bind taskqueue threads to specific CPUs, if they have been assigned
10084c7070dbSScott Long 	 * one.
10094c7070dbSScott Long 	 */
10104c7070dbSScott Long 	for (i = 0; i < qgroup->tqg_cnt; i++) {
10114c7070dbSScott Long 		task = malloc(sizeof (*task), M_DEVBUF, M_NOWAIT);
10124c7070dbSScott Long 		TASK_INIT(&task->bt_task, 0, taskqgroup_binder, task);
10134c7070dbSScott Long 		task->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
10144c7070dbSScott Long 		taskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
10154c7070dbSScott Long 		    &task->bt_task);
10164c7070dbSScott Long 	}
10174c7070dbSScott Long }
10184c7070dbSScott Long 
10194c7070dbSScott Long static int
10204c7070dbSScott Long _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
10214c7070dbSScott Long {
10224c7070dbSScott Long 	LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
10234c7070dbSScott Long 	cpuset_t mask;
10244c7070dbSScott Long 	struct grouptask *gtask;
10254c7070dbSScott Long 	int i, old_cnt, qid;
10264c7070dbSScott Long 
10274c7070dbSScott Long 	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
10284c7070dbSScott Long 
10294c7070dbSScott Long 	if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
10304c7070dbSScott Long 		printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n",
10314c7070dbSScott Long 			   cnt, stride, mp_ncpus, smp_started);
10324c7070dbSScott Long 		return (EINVAL);
10334c7070dbSScott Long 	}
10344c7070dbSScott Long 	if (qgroup->tqg_adjusting) {
10354c7070dbSScott Long 		printf("taskqgroup_adjust failed: adjusting\n");
10364c7070dbSScott Long 		return (EBUSY);
10374c7070dbSScott Long 	}
10384c7070dbSScott Long 	qgroup->tqg_adjusting = 1;
10394c7070dbSScott Long 	old_cnt = qgroup->tqg_cnt;
10404c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
10414c7070dbSScott Long 	/*
10424c7070dbSScott Long 	 * Set up queue for tasks added before boot.
10434c7070dbSScott Long 	 */
10444c7070dbSScott Long 	if (old_cnt == 0) {
10454c7070dbSScott Long 		LIST_SWAP(&gtask_head, &qgroup->tqg_queue[0].tgc_tasks,
10464c7070dbSScott Long 		    grouptask, gt_list);
10474c7070dbSScott Long 		qgroup->tqg_queue[0].tgc_cnt = 0;
10484c7070dbSScott Long 	}
10494c7070dbSScott Long 
10504c7070dbSScott Long 	/*
10514c7070dbSScott Long 	 * If new taskq threads have been added.
10524c7070dbSScott Long 	 */
10534c7070dbSScott Long 	for (i = old_cnt; i < cnt; i++)
10544c7070dbSScott Long 		taskqgroup_cpu_create(qgroup, i);
10554c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
10564c7070dbSScott Long 	qgroup->tqg_cnt = cnt;
10574c7070dbSScott Long 	qgroup->tqg_stride = stride;
10584c7070dbSScott Long 
10594c7070dbSScott Long 	/*
10604c7070dbSScott Long 	 * Adjust drivers to use new taskqs.
10614c7070dbSScott Long 	 */
10624c7070dbSScott Long 	for (i = 0; i < old_cnt; i++) {
10634c7070dbSScott Long 		while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
10644c7070dbSScott Long 			LIST_REMOVE(gtask, gt_list);
10654c7070dbSScott Long 			qgroup->tqg_queue[i].tgc_cnt--;
10664c7070dbSScott Long 			LIST_INSERT_HEAD(&gtask_head, gtask, gt_list);
10674c7070dbSScott Long 		}
10684c7070dbSScott Long 	}
10694c7070dbSScott Long 
10704c7070dbSScott Long 	while ((gtask = LIST_FIRST(&gtask_head))) {
10714c7070dbSScott Long 		LIST_REMOVE(gtask, gt_list);
10724c7070dbSScott Long 		if (gtask->gt_cpu == -1)
10734c7070dbSScott Long 			qid = taskqgroup_find(qgroup, gtask->gt_uniq);
10744c7070dbSScott Long 		else {
10754c7070dbSScott Long 			for (i = 0; i < qgroup->tqg_cnt; i++)
10764c7070dbSScott Long 				if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) {
10774c7070dbSScott Long 					qid = i;
10784c7070dbSScott Long 					break;
10794c7070dbSScott Long 				}
10804c7070dbSScott Long 		}
10814c7070dbSScott Long 		qgroup->tqg_queue[qid].tgc_cnt++;
10824c7070dbSScott Long 		LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
10834c7070dbSScott Long 		    gt_list);
10844c7070dbSScott Long 		gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
10854c7070dbSScott Long 	}
10864c7070dbSScott Long 	/*
10874c7070dbSScott Long 	 * Set new CPU and IRQ affinity
10884c7070dbSScott Long 	 */
10894c7070dbSScott Long 	for (i = 0; i < cnt; i++) {
10904c7070dbSScott Long 		qgroup->tqg_queue[i].tgc_cpu = i * qgroup->tqg_stride;
10914c7070dbSScott Long 		CPU_ZERO(&mask);
10924c7070dbSScott Long 		CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
10934c7070dbSScott Long 		LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
10944c7070dbSScott Long 			if (gtask->gt_irq == -1)
10954c7070dbSScott Long 				continue;
10964c7070dbSScott Long 			intr_setaffinity(gtask->gt_irq, &mask);
10974c7070dbSScott Long 		}
10984c7070dbSScott Long 	}
10994c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
11004c7070dbSScott Long 
11014c7070dbSScott Long 	/*
11024c7070dbSScott Long 	 * If taskq thread count has been reduced.
11034c7070dbSScott Long 	 */
11044c7070dbSScott Long 	for (i = cnt; i < old_cnt; i++)
11054c7070dbSScott Long 		taskqgroup_cpu_remove(qgroup, i);
11064c7070dbSScott Long 
11074c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
11084c7070dbSScott Long 	qgroup->tqg_adjusting = 0;
11094c7070dbSScott Long 
11104c7070dbSScott Long 	taskqgroup_bind(qgroup);
11114c7070dbSScott Long 
11124c7070dbSScott Long 	return (0);
11134c7070dbSScott Long }
11144c7070dbSScott Long 
11154c7070dbSScott Long int
11164c7070dbSScott Long taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
11174c7070dbSScott Long {
11184c7070dbSScott Long 	int error;
11194c7070dbSScott Long 
11204c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
11214c7070dbSScott Long 	error = _taskqgroup_adjust(qgroup, cpu, stride);
11224c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
11234c7070dbSScott Long 
11244c7070dbSScott Long 	return (error);
11254c7070dbSScott Long }
11264c7070dbSScott Long 
11274c7070dbSScott Long struct taskqgroup *
11284c7070dbSScott Long taskqgroup_create(char *name)
11294c7070dbSScott Long {
11304c7070dbSScott Long 	struct taskqgroup *qgroup;
11314c7070dbSScott Long 
11324c7070dbSScott Long 	qgroup = malloc(sizeof(*qgroup), M_TASKQUEUE, M_WAITOK | M_ZERO);
11334c7070dbSScott Long 	mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
11344c7070dbSScott Long 	qgroup->tqg_name = name;
11354c7070dbSScott Long 	LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
11364c7070dbSScott Long 
11374c7070dbSScott Long 	return (qgroup);
11384c7070dbSScott Long }
11394c7070dbSScott Long 
11404c7070dbSScott Long void
11414c7070dbSScott Long taskqgroup_destroy(struct taskqgroup *qgroup)
11424c7070dbSScott Long {
11434c7070dbSScott Long 
11444c7070dbSScott Long }
1145