xref: /freebsd/sys/kern/subr_taskqueue.c (revision 7107bed0f07362e106669b6f06fd6ec8cb183483)
1ca2e0534SDoug Rabson /*-
2ca2e0534SDoug Rabson  * Copyright (c) 2000 Doug Rabson
3ca2e0534SDoug Rabson  * All rights reserved.
4ca2e0534SDoug Rabson  *
5ca2e0534SDoug Rabson  * Redistribution and use in source and binary forms, with or without
6ca2e0534SDoug Rabson  * modification, are permitted provided that the following conditions
7ca2e0534SDoug Rabson  * are met:
8ca2e0534SDoug Rabson  * 1. Redistributions of source code must retain the above copyright
9ca2e0534SDoug Rabson  *    notice, this list of conditions and the following disclaimer.
10ca2e0534SDoug Rabson  * 2. Redistributions in binary form must reproduce the above copyright
11ca2e0534SDoug Rabson  *    notice, this list of conditions and the following disclaimer in the
12ca2e0534SDoug Rabson  *    documentation and/or other materials provided with the distribution.
13ca2e0534SDoug Rabson  *
14ca2e0534SDoug Rabson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15ca2e0534SDoug Rabson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16ca2e0534SDoug Rabson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17ca2e0534SDoug Rabson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18ca2e0534SDoug Rabson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19ca2e0534SDoug Rabson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20ca2e0534SDoug Rabson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21ca2e0534SDoug Rabson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22ca2e0534SDoug Rabson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23ca2e0534SDoug Rabson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24ca2e0534SDoug Rabson  * SUCH DAMAGE.
25ca2e0534SDoug Rabson  */
26ca2e0534SDoug Rabson 
27677b542eSDavid E. O'Brien #include <sys/cdefs.h>
28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
29677b542eSDavid E. O'Brien 
30ca2e0534SDoug Rabson #include <sys/param.h>
31ca2e0534SDoug Rabson #include <sys/systm.h>
321de1c550SJohn Baldwin #include <sys/bus.h>
335a6f0eeeSAdrian Chadd #include <sys/cpuset.h>
34282873e2SJohn Baldwin #include <sys/interrupt.h>
35ca2e0534SDoug Rabson #include <sys/kernel.h>
36eb5b0e05SJohn Baldwin #include <sys/kthread.h>
374c7070dbSScott Long #include <sys/libkern.h>
38d2849f27SAdrian Chadd #include <sys/limits.h>
391de1c550SJohn Baldwin #include <sys/lock.h>
40ca2e0534SDoug Rabson #include <sys/malloc.h>
411de1c550SJohn Baldwin #include <sys/mutex.h>
4252bc746aSSam Leffler #include <sys/proc.h>
430f92108dSScott Long #include <sys/sched.h>
444c7070dbSScott Long #include <sys/smp.h>
451de1c550SJohn Baldwin #include <sys/taskqueue.h>
46cb32189eSKenneth D. Merry #include <sys/unistd.h>
470f92108dSScott Long #include <machine/stdarg.h>
48ca2e0534SDoug Rabson 
49959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
507874f606SScott Long static void	*taskqueue_giant_ih;
51eb5b0e05SJohn Baldwin static void	*taskqueue_ih;
526d545f4cSAlexander Motin static void	 taskqueue_fast_enqueue(void *);
536d545f4cSAlexander Motin static void	 taskqueue_swi_enqueue(void *);
546d545f4cSAlexander Motin static void	 taskqueue_swi_giant_enqueue(void *);
558088699fSJohn Baldwin 
56bf73d4d2SMatthew D Fleming struct taskqueue_busy {
57bf73d4d2SMatthew D Fleming 	struct task	*tb_running;
58bf73d4d2SMatthew D Fleming 	TAILQ_ENTRY(taskqueue_busy) tb_link;
59bf73d4d2SMatthew D Fleming };
60bf73d4d2SMatthew D Fleming 
615b326a32SJustin T. Gibbs struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
625b326a32SJustin T. Gibbs 
63ca2e0534SDoug Rabson struct taskqueue {
64ca2e0534SDoug Rabson 	STAILQ_HEAD(, task)	tq_queue;
65ca2e0534SDoug Rabson 	taskqueue_enqueue_fn	tq_enqueue;
66ca2e0534SDoug Rabson 	void			*tq_context;
674c7070dbSScott Long 	char			*tq_name;
68bf73d4d2SMatthew D Fleming 	TAILQ_HEAD(, taskqueue_busy) tq_active;
691de1c550SJohn Baldwin 	struct mtx		tq_mutex;
70175611b6SSam Leffler 	struct thread		**tq_threads;
71175611b6SSam Leffler 	int			tq_tcount;
72694382c8SKip Macy 	int			tq_spin;
730f92108dSScott Long 	int			tq_flags;
74b2ad91f2SKonstantin Belousov 	int			tq_callouts;
75fdbc7174SWill Andrews 	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
76fdbc7174SWill Andrews 	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
77ca2e0534SDoug Rabson };
78ca2e0534SDoug Rabson 
790f92108dSScott Long #define	TQ_FLAGS_ACTIVE		(1 << 0)
80478cfc73SScott Long #define	TQ_FLAGS_BLOCKED	(1 << 1)
816d545f4cSAlexander Motin #define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
820f92108dSScott Long 
83b2ad91f2SKonstantin Belousov #define	DT_CALLOUT_ARMED	(1 << 0)
84b2ad91f2SKonstantin Belousov 
85b79b28b6SJuli Mallett #define	TQ_LOCK(tq)							\
86b79b28b6SJuli Mallett 	do {								\
87b79b28b6SJuli Mallett 		if ((tq)->tq_spin)					\
88b79b28b6SJuli Mallett 			mtx_lock_spin(&(tq)->tq_mutex);			\
89b79b28b6SJuli Mallett 		else							\
90b79b28b6SJuli Mallett 			mtx_lock(&(tq)->tq_mutex);			\
91b79b28b6SJuli Mallett 	} while (0)
92fdbc7174SWill Andrews #define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
939df1a6ddSScott Long 
94b79b28b6SJuli Mallett #define	TQ_UNLOCK(tq)							\
95b79b28b6SJuli Mallett 	do {								\
96b79b28b6SJuli Mallett 		if ((tq)->tq_spin)					\
97b79b28b6SJuli Mallett 			mtx_unlock_spin(&(tq)->tq_mutex);		\
98b79b28b6SJuli Mallett 		else							\
99b79b28b6SJuli Mallett 			mtx_unlock(&(tq)->tq_mutex);			\
100b79b28b6SJuli Mallett 	} while (0)
101fdbc7174SWill Andrews #define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
1029df1a6ddSScott Long 
103b2ad91f2SKonstantin Belousov void
104b2ad91f2SKonstantin Belousov _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
105b2ad91f2SKonstantin Belousov     int priority, task_fn_t func, void *context)
106b2ad91f2SKonstantin Belousov {
107b2ad91f2SKonstantin Belousov 
108b2ad91f2SKonstantin Belousov 	TASK_INIT(&timeout_task->t, priority, func, context);
1096d545f4cSAlexander Motin 	callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
1106d545f4cSAlexander Motin 	    CALLOUT_RETURNUNLOCKED);
111b2ad91f2SKonstantin Belousov 	timeout_task->q = queue;
112b2ad91f2SKonstantin Belousov 	timeout_task->f = 0;
113b2ad91f2SKonstantin Belousov }
114b2ad91f2SKonstantin Belousov 
1159df1a6ddSScott Long static __inline int
1169df1a6ddSScott Long TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
1179df1a6ddSScott Long     int t)
1189df1a6ddSScott Long {
119694382c8SKip Macy 	if (tq->tq_spin)
1209df1a6ddSScott Long 		return (msleep_spin(p, m, wm, t));
1219df1a6ddSScott Long 	return (msleep(p, m, pri, wm, t));
1229df1a6ddSScott Long }
1239df1a6ddSScott Long 
1249df1a6ddSScott Long static struct taskqueue *
1254c7070dbSScott Long _taskqueue_create(const char *name, int mflags,
12652bc746aSSam Leffler 		 taskqueue_enqueue_fn enqueue, void *context,
1274c7070dbSScott Long 		 int mtxflags, const char *mtxname __unused)
128ca2e0534SDoug Rabson {
129ca2e0534SDoug Rabson 	struct taskqueue *queue;
1307e52504fSScott Long 	char *tq_name;
1314c7070dbSScott Long 
1327e52504fSScott Long 	tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
1337e52504fSScott Long 	if (!tq_name)
1347e52504fSScott Long 		return (NULL);
1357e52504fSScott Long 
1367e52504fSScott Long 	snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
137ca2e0534SDoug Rabson 
1381de1c550SJohn Baldwin 	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
139ca2e0534SDoug Rabson 	if (!queue)
1407e52504fSScott Long 		return (NULL);
141694382c8SKip Macy 
142ca2e0534SDoug Rabson 	STAILQ_INIT(&queue->tq_queue);
143bf73d4d2SMatthew D Fleming 	TAILQ_INIT(&queue->tq_active);
144ca2e0534SDoug Rabson 	queue->tq_enqueue = enqueue;
145ca2e0534SDoug Rabson 	queue->tq_context = context;
1464c7070dbSScott Long 	queue->tq_name = tq_name;
147694382c8SKip Macy 	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
148694382c8SKip Macy 	queue->tq_flags |= TQ_FLAGS_ACTIVE;
1496d545f4cSAlexander Motin 	if (enqueue == taskqueue_fast_enqueue ||
1506d545f4cSAlexander Motin 	    enqueue == taskqueue_swi_enqueue ||
1516d545f4cSAlexander Motin 	    enqueue == taskqueue_swi_giant_enqueue ||
1526d545f4cSAlexander Motin 	    enqueue == taskqueue_thread_enqueue)
1536d545f4cSAlexander Motin 		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
1544c7070dbSScott Long 	mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
155ca2e0534SDoug Rabson 
1567e52504fSScott Long 	return (queue);
157ca2e0534SDoug Rabson }
158ca2e0534SDoug Rabson 
1599df1a6ddSScott Long struct taskqueue *
1609df1a6ddSScott Long taskqueue_create(const char *name, int mflags,
1610f92108dSScott Long 		 taskqueue_enqueue_fn enqueue, void *context)
1629df1a6ddSScott Long {
1634c7070dbSScott Long 
1640f92108dSScott Long 	return _taskqueue_create(name, mflags, enqueue, context,
1654c7070dbSScott Long 			MTX_DEF, name);
1669df1a6ddSScott Long }
1679df1a6ddSScott Long 
168fdbc7174SWill Andrews void
169fdbc7174SWill Andrews taskqueue_set_callback(struct taskqueue *queue,
170fdbc7174SWill Andrews     enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
171fdbc7174SWill Andrews     void *context)
172fdbc7174SWill Andrews {
173fdbc7174SWill Andrews 
174fdbc7174SWill Andrews 	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
175fdbc7174SWill Andrews 	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
176fdbc7174SWill Andrews 	    ("Callback type %d not valid, must be %d-%d", cb_type,
177fdbc7174SWill Andrews 	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
178fdbc7174SWill Andrews 	KASSERT((queue->tq_callbacks[cb_type] == NULL),
179fdbc7174SWill Andrews 	    ("Re-initialization of taskqueue callback?"));
180fdbc7174SWill Andrews 
181fdbc7174SWill Andrews 	queue->tq_callbacks[cb_type] = callback;
182fdbc7174SWill Andrews 	queue->tq_cb_contexts[cb_type] = context;
183fdbc7174SWill Andrews }
184fdbc7174SWill Andrews 
18552bc746aSSam Leffler /*
18652bc746aSSam Leffler  * Signal a taskqueue thread to terminate.
18752bc746aSSam Leffler  */
18852bc746aSSam Leffler static void
189175611b6SSam Leffler taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
19052bc746aSSam Leffler {
19152bc746aSSam Leffler 
192b2ad91f2SKonstantin Belousov 	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
1930f92108dSScott Long 		wakeup(tq);
1940f92108dSScott Long 		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
19552bc746aSSam Leffler 	}
19652bc746aSSam Leffler }
19752bc746aSSam Leffler 
198ca2e0534SDoug Rabson void
199ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue)
200ca2e0534SDoug Rabson {
2011de1c550SJohn Baldwin 
2029df1a6ddSScott Long 	TQ_LOCK(queue);
2030f92108dSScott Long 	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
204175611b6SSam Leffler 	taskqueue_terminate(queue->tq_threads, queue);
205bf73d4d2SMatthew D Fleming 	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
206b2ad91f2SKonstantin Belousov 	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
2071de1c550SJohn Baldwin 	mtx_destroy(&queue->tq_mutex);
208175611b6SSam Leffler 	free(queue->tq_threads, M_TASKQUEUE);
2094c7070dbSScott Long 	free(queue->tq_name, M_TASKQUEUE);
210ca2e0534SDoug Rabson 	free(queue, M_TASKQUEUE);
211ca2e0534SDoug Rabson }
212ca2e0534SDoug Rabson 
213b2ad91f2SKonstantin Belousov static int
214b2ad91f2SKonstantin Belousov taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
215ca2e0534SDoug Rabson {
216ca2e0534SDoug Rabson 	struct task *ins;
217ca2e0534SDoug Rabson 	struct task *prev;
218ca2e0534SDoug Rabson 
2194c7070dbSScott Long 	KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
220ca2e0534SDoug Rabson 	/*
221ca2e0534SDoug Rabson 	 * Count multiple enqueues.
222ca2e0534SDoug Rabson 	 */
223694382c8SKip Macy 	if (task->ta_pending) {
224*7107bed0SAndriy Gapon 		if (task->ta_pending < USHRT_MAX)
225ca2e0534SDoug Rabson 			task->ta_pending++;
2266d545f4cSAlexander Motin 		TQ_UNLOCK(queue);
227b2ad91f2SKonstantin Belousov 		return (0);
228ca2e0534SDoug Rabson 	}
229ca2e0534SDoug Rabson 
230ca2e0534SDoug Rabson 	/*
231ca2e0534SDoug Rabson 	 * Optimise the case when all tasks have the same priority.
232ca2e0534SDoug Rabson 	 */
23351b86781SJeffrey Hsu 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
234ca2e0534SDoug Rabson 	if (!prev || prev->ta_priority >= task->ta_priority) {
235ca2e0534SDoug Rabson 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
236ca2e0534SDoug Rabson 	} else {
237d710cae7SWarner Losh 		prev = NULL;
238ca2e0534SDoug Rabson 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
239ca2e0534SDoug Rabson 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
240ca2e0534SDoug Rabson 			if (ins->ta_priority < task->ta_priority)
241ca2e0534SDoug Rabson 				break;
242ca2e0534SDoug Rabson 
243ca2e0534SDoug Rabson 		if (prev)
244ca2e0534SDoug Rabson 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
245ca2e0534SDoug Rabson 		else
246ca2e0534SDoug Rabson 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
247ca2e0534SDoug Rabson 	}
248ca2e0534SDoug Rabson 
249ca2e0534SDoug Rabson 	task->ta_pending = 1;
2506d545f4cSAlexander Motin 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
2516d545f4cSAlexander Motin 		TQ_UNLOCK(queue);
252694382c8SKip Macy 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
253ca2e0534SDoug Rabson 		queue->tq_enqueue(queue->tq_context);
2546d545f4cSAlexander Motin 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
2556d545f4cSAlexander Motin 		TQ_UNLOCK(queue);
256282873e2SJohn Baldwin 
25718093155SAlexander Motin 	/* Return with lock released. */
258b2ad91f2SKonstantin Belousov 	return (0);
259b2ad91f2SKonstantin Belousov }
2605b326a32SJustin T. Gibbs 
261b2ad91f2SKonstantin Belousov int
2624c7070dbSScott Long grouptaskqueue_enqueue(struct taskqueue *queue, struct task *task)
2634c7070dbSScott Long {
2644c7070dbSScott Long 	TQ_LOCK(queue);
2654c7070dbSScott Long 	if (task->ta_pending) {
2664c7070dbSScott Long 		TQ_UNLOCK(queue);
2674c7070dbSScott Long 		return (0);
2684c7070dbSScott Long 	}
2694c7070dbSScott Long 	STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
2704c7070dbSScott Long 	task->ta_pending = 1;
2714c7070dbSScott Long 	TQ_UNLOCK(queue);
2724c7070dbSScott Long 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
2734c7070dbSScott Long 		queue->tq_enqueue(queue->tq_context);
2744c7070dbSScott Long 	return (0);
2754c7070dbSScott Long }
2764c7070dbSScott Long 
2774c7070dbSScott Long int
278b2ad91f2SKonstantin Belousov taskqueue_enqueue(struct taskqueue *queue, struct task *task)
279b2ad91f2SKonstantin Belousov {
280b2ad91f2SKonstantin Belousov 	int res;
281b2ad91f2SKonstantin Belousov 
282b2ad91f2SKonstantin Belousov 	TQ_LOCK(queue);
283b2ad91f2SKonstantin Belousov 	res = taskqueue_enqueue_locked(queue, task);
28418093155SAlexander Motin 	/* The lock is released inside. */
285282873e2SJohn Baldwin 
286b2ad91f2SKonstantin Belousov 	return (res);
287b2ad91f2SKonstantin Belousov }
288b2ad91f2SKonstantin Belousov 
289b2ad91f2SKonstantin Belousov static void
290b2ad91f2SKonstantin Belousov taskqueue_timeout_func(void *arg)
291b2ad91f2SKonstantin Belousov {
292b2ad91f2SKonstantin Belousov 	struct taskqueue *queue;
293b2ad91f2SKonstantin Belousov 	struct timeout_task *timeout_task;
294b2ad91f2SKonstantin Belousov 
295b2ad91f2SKonstantin Belousov 	timeout_task = arg;
296b2ad91f2SKonstantin Belousov 	queue = timeout_task->q;
297b2ad91f2SKonstantin Belousov 	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
298b2ad91f2SKonstantin Belousov 	timeout_task->f &= ~DT_CALLOUT_ARMED;
299b2ad91f2SKonstantin Belousov 	queue->tq_callouts--;
300b2ad91f2SKonstantin Belousov 	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
30118093155SAlexander Motin 	/* The lock is released inside. */
302b2ad91f2SKonstantin Belousov }
303b2ad91f2SKonstantin Belousov 
304b2ad91f2SKonstantin Belousov int
305b2ad91f2SKonstantin Belousov taskqueue_enqueue_timeout(struct taskqueue *queue,
306b2ad91f2SKonstantin Belousov     struct timeout_task *timeout_task, int ticks)
307b2ad91f2SKonstantin Belousov {
308b2ad91f2SKonstantin Belousov 	int res;
309b2ad91f2SKonstantin Belousov 
310b2ad91f2SKonstantin Belousov 	TQ_LOCK(queue);
311b2ad91f2SKonstantin Belousov 	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
312b2ad91f2SKonstantin Belousov 	    ("Migrated queue"));
313b2ad91f2SKonstantin Belousov 	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
314b2ad91f2SKonstantin Belousov 	timeout_task->q = queue;
315b2ad91f2SKonstantin Belousov 	res = timeout_task->t.ta_pending;
316b2ad91f2SKonstantin Belousov 	if (ticks == 0) {
317b2ad91f2SKonstantin Belousov 		taskqueue_enqueue_locked(queue, &timeout_task->t);
31818093155SAlexander Motin 		/* The lock is released inside. */
319b2ad91f2SKonstantin Belousov 	} else {
320b2ad91f2SKonstantin Belousov 		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
321b2ad91f2SKonstantin Belousov 			res++;
322b2ad91f2SKonstantin Belousov 		} else {
323b2ad91f2SKonstantin Belousov 			queue->tq_callouts++;
324b2ad91f2SKonstantin Belousov 			timeout_task->f |= DT_CALLOUT_ARMED;
325b7c8d2f2SKonstantin Belousov 			if (ticks < 0)
326b7c8d2f2SKonstantin Belousov 				ticks = -ticks; /* Ignore overflow. */
327b2ad91f2SKonstantin Belousov 		}
328b7c8d2f2SKonstantin Belousov 		if (ticks > 0) {
329b7c8d2f2SKonstantin Belousov 			callout_reset(&timeout_task->c, ticks,
330b7c8d2f2SKonstantin Belousov 			    taskqueue_timeout_func, timeout_task);
331b7c8d2f2SKonstantin Belousov 		}
332b2ad91f2SKonstantin Belousov 		TQ_UNLOCK(queue);
3336d545f4cSAlexander Motin 	}
334b2ad91f2SKonstantin Belousov 	return (res);
335ca2e0534SDoug Rabson }
336ca2e0534SDoug Rabson 
33773f82099SAndriy Gapon static void
3385b326a32SJustin T. Gibbs taskqueue_task_nop_fn(void *context, int pending)
33973f82099SAndriy Gapon {
3405b326a32SJustin T. Gibbs }
34173f82099SAndriy Gapon 
3425b326a32SJustin T. Gibbs /*
3435b326a32SJustin T. Gibbs  * Block until all currently queued tasks in this taskqueue
3445b326a32SJustin T. Gibbs  * have begun execution.  Tasks queued during execution of
3455b326a32SJustin T. Gibbs  * this function are ignored.
3465b326a32SJustin T. Gibbs  */
3475b326a32SJustin T. Gibbs static void
3485b326a32SJustin T. Gibbs taskqueue_drain_tq_queue(struct taskqueue *queue)
3495b326a32SJustin T. Gibbs {
3505b326a32SJustin T. Gibbs 	struct task t_barrier;
3515b326a32SJustin T. Gibbs 
3525b326a32SJustin T. Gibbs 	if (STAILQ_EMPTY(&queue->tq_queue))
3535b326a32SJustin T. Gibbs 		return;
3545b326a32SJustin T. Gibbs 
3555b326a32SJustin T. Gibbs 	/*
356eb3d0c5dSXin LI 	 * Enqueue our barrier after all current tasks, but with
357eb3d0c5dSXin LI 	 * the highest priority so that newly queued tasks cannot
358eb3d0c5dSXin LI 	 * pass it.  Because of the high priority, we can not use
359eb3d0c5dSXin LI 	 * taskqueue_enqueue_locked directly (which drops the lock
360eb3d0c5dSXin LI 	 * anyway) so just insert it at tail while we have the
361eb3d0c5dSXin LI 	 * queue lock.
3625b326a32SJustin T. Gibbs 	 */
363eb3d0c5dSXin LI 	TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
364eb3d0c5dSXin LI 	STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
365eb3d0c5dSXin LI 	t_barrier.ta_pending = 1;
3665b326a32SJustin T. Gibbs 
3675b326a32SJustin T. Gibbs 	/*
3685b326a32SJustin T. Gibbs 	 * Once the barrier has executed, all previously queued tasks
3695b326a32SJustin T. Gibbs 	 * have completed or are currently executing.
3705b326a32SJustin T. Gibbs 	 */
3715b326a32SJustin T. Gibbs 	while (t_barrier.ta_pending != 0)
3725b326a32SJustin T. Gibbs 		TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
3735b326a32SJustin T. Gibbs }
3745b326a32SJustin T. Gibbs 
3755b326a32SJustin T. Gibbs /*
3765b326a32SJustin T. Gibbs  * Block until all currently executing tasks for this taskqueue
3775b326a32SJustin T. Gibbs  * complete.  Tasks that begin execution during the execution
3785b326a32SJustin T. Gibbs  * of this function are ignored.
3795b326a32SJustin T. Gibbs  */
3805b326a32SJustin T. Gibbs static void
3815b326a32SJustin T. Gibbs taskqueue_drain_tq_active(struct taskqueue *queue)
3825b326a32SJustin T. Gibbs {
3835b326a32SJustin T. Gibbs 	struct taskqueue_busy tb_marker, *tb_first;
3845b326a32SJustin T. Gibbs 
3855b326a32SJustin T. Gibbs 	if (TAILQ_EMPTY(&queue->tq_active))
3865b326a32SJustin T. Gibbs 		return;
3875b326a32SJustin T. Gibbs 
3885b326a32SJustin T. Gibbs 	/* Block taskq_terminate().*/
3895b326a32SJustin T. Gibbs 	queue->tq_callouts++;
3905b326a32SJustin T. Gibbs 
3915b326a32SJustin T. Gibbs 	/*
3925b326a32SJustin T. Gibbs 	 * Wait for all currently executing taskqueue threads
3935b326a32SJustin T. Gibbs 	 * to go idle.
3945b326a32SJustin T. Gibbs 	 */
3955b326a32SJustin T. Gibbs 	tb_marker.tb_running = TB_DRAIN_WAITER;
3965b326a32SJustin T. Gibbs 	TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
3975b326a32SJustin T. Gibbs 	while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
3985b326a32SJustin T. Gibbs 		TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
3995b326a32SJustin T. Gibbs 	TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
4005b326a32SJustin T. Gibbs 
4015b326a32SJustin T. Gibbs 	/*
4025b326a32SJustin T. Gibbs 	 * Wakeup any other drain waiter that happened to queue up
4035b326a32SJustin T. Gibbs 	 * without any intervening active thread.
4045b326a32SJustin T. Gibbs 	 */
4055b326a32SJustin T. Gibbs 	tb_first = TAILQ_FIRST(&queue->tq_active);
4065b326a32SJustin T. Gibbs 	if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
4075b326a32SJustin T. Gibbs 		wakeup(tb_first);
4085b326a32SJustin T. Gibbs 
4095b326a32SJustin T. Gibbs 	/* Release taskqueue_terminate(). */
4105b326a32SJustin T. Gibbs 	queue->tq_callouts--;
4115b326a32SJustin T. Gibbs 	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
4125b326a32SJustin T. Gibbs 		wakeup_one(queue->tq_threads);
41373f82099SAndriy Gapon }
41473f82099SAndriy Gapon 
415ca2e0534SDoug Rabson void
416478cfc73SScott Long taskqueue_block(struct taskqueue *queue)
417478cfc73SScott Long {
418478cfc73SScott Long 
419478cfc73SScott Long 	TQ_LOCK(queue);
420478cfc73SScott Long 	queue->tq_flags |= TQ_FLAGS_BLOCKED;
421478cfc73SScott Long 	TQ_UNLOCK(queue);
422478cfc73SScott Long }
423478cfc73SScott Long 
424478cfc73SScott Long void
425478cfc73SScott Long taskqueue_unblock(struct taskqueue *queue)
426478cfc73SScott Long {
427478cfc73SScott Long 
428478cfc73SScott Long 	TQ_LOCK(queue);
429478cfc73SScott Long 	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
4301d1e92f1SAlexander Motin 	if (!STAILQ_EMPTY(&queue->tq_queue))
431478cfc73SScott Long 		queue->tq_enqueue(queue->tq_context);
432478cfc73SScott Long 	TQ_UNLOCK(queue);
433478cfc73SScott Long }
434478cfc73SScott Long 
435bf73d4d2SMatthew D Fleming static void
436bf73d4d2SMatthew D Fleming taskqueue_run_locked(struct taskqueue *queue)
437ca2e0534SDoug Rabson {
438bf73d4d2SMatthew D Fleming 	struct taskqueue_busy tb;
4395b326a32SJustin T. Gibbs 	struct taskqueue_busy *tb_first;
440033459c8SMatthew D Fleming 	struct task *task;
441242ed5d9SMatthew D Fleming 	int pending;
442ca2e0534SDoug Rabson 
4434c7070dbSScott Long 	KASSERT(queue != NULL, ("tq is NULL"));
444fdbc7174SWill Andrews 	TQ_ASSERT_LOCKED(queue);
445bf73d4d2SMatthew D Fleming 	tb.tb_running = NULL;
446bf73d4d2SMatthew D Fleming 
447ca2e0534SDoug Rabson 	while (STAILQ_FIRST(&queue->tq_queue)) {
4485b326a32SJustin T. Gibbs 		TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
4495b326a32SJustin T. Gibbs 
450ca2e0534SDoug Rabson 		/*
451ca2e0534SDoug Rabson 		 * Carefully remove the first task from the queue and
452ca2e0534SDoug Rabson 		 * zero its pending count.
453ca2e0534SDoug Rabson 		 */
454ca2e0534SDoug Rabson 		task = STAILQ_FIRST(&queue->tq_queue);
4554c7070dbSScott Long 		KASSERT(task != NULL, ("task is NULL"));
456ca2e0534SDoug Rabson 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
457ca2e0534SDoug Rabson 		pending = task->ta_pending;
458ca2e0534SDoug Rabson 		task->ta_pending = 0;
459bf73d4d2SMatthew D Fleming 		tb.tb_running = task;
4609df1a6ddSScott Long 		TQ_UNLOCK(queue);
461ca2e0534SDoug Rabson 
4624c7070dbSScott Long 		KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
463282873e2SJohn Baldwin 		task->ta_func(task->ta_context, pending);
464ca2e0534SDoug Rabson 
4659df1a6ddSScott Long 		TQ_LOCK(queue);
466bf73d4d2SMatthew D Fleming 		tb.tb_running = NULL;
46714889b42SWarner Losh 		wakeup(task);
4685b326a32SJustin T. Gibbs 
469bf73d4d2SMatthew D Fleming 		TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
4705b326a32SJustin T. Gibbs 		tb_first = TAILQ_FIRST(&queue->tq_active);
4715b326a32SJustin T. Gibbs 		if (tb_first != NULL &&
4725b326a32SJustin T. Gibbs 		    tb_first->tb_running == TB_DRAIN_WAITER)
4735b326a32SJustin T. Gibbs 			wakeup(tb_first);
4745b326a32SJustin T. Gibbs 	}
475bf73d4d2SMatthew D Fleming }
476bf73d4d2SMatthew D Fleming 
477bf73d4d2SMatthew D Fleming void
478bf73d4d2SMatthew D Fleming taskqueue_run(struct taskqueue *queue)
479bf73d4d2SMatthew D Fleming {
480bf73d4d2SMatthew D Fleming 
481bf73d4d2SMatthew D Fleming 	TQ_LOCK(queue);
482bf73d4d2SMatthew D Fleming 	taskqueue_run_locked(queue);
483bf73d4d2SMatthew D Fleming 	TQ_UNLOCK(queue);
484bf73d4d2SMatthew D Fleming }
485bf73d4d2SMatthew D Fleming 
486bf73d4d2SMatthew D Fleming static int
487bf73d4d2SMatthew D Fleming task_is_running(struct taskqueue *queue, struct task *task)
488bf73d4d2SMatthew D Fleming {
489bf73d4d2SMatthew D Fleming 	struct taskqueue_busy *tb;
490bf73d4d2SMatthew D Fleming 
491fdbc7174SWill Andrews 	TQ_ASSERT_LOCKED(queue);
492bf73d4d2SMatthew D Fleming 	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
493bf73d4d2SMatthew D Fleming 		if (tb->tb_running == task)
494bf73d4d2SMatthew D Fleming 			return (1);
495bf73d4d2SMatthew D Fleming 	}
496bf73d4d2SMatthew D Fleming 	return (0);
497ca2e0534SDoug Rabson }
498ca2e0534SDoug Rabson 
499b2ad91f2SKonstantin Belousov static int
500b2ad91f2SKonstantin Belousov taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
501b2ad91f2SKonstantin Belousov     u_int *pendp)
502b2ad91f2SKonstantin Belousov {
503b2ad91f2SKonstantin Belousov 
504b2ad91f2SKonstantin Belousov 	if (task->ta_pending > 0)
505b2ad91f2SKonstantin Belousov 		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
506b2ad91f2SKonstantin Belousov 	if (pendp != NULL)
507b2ad91f2SKonstantin Belousov 		*pendp = task->ta_pending;
508b2ad91f2SKonstantin Belousov 	task->ta_pending = 0;
509b2ad91f2SKonstantin Belousov 	return (task_is_running(queue, task) ? EBUSY : 0);
510b2ad91f2SKonstantin Belousov }
511b2ad91f2SKonstantin Belousov 
512f46276a9SMatthew D Fleming int
513f46276a9SMatthew D Fleming taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
514f46276a9SMatthew D Fleming {
515f46276a9SMatthew D Fleming 	int error;
516f46276a9SMatthew D Fleming 
517f46276a9SMatthew D Fleming 	TQ_LOCK(queue);
518b2ad91f2SKonstantin Belousov 	error = taskqueue_cancel_locked(queue, task, pendp);
519b2ad91f2SKonstantin Belousov 	TQ_UNLOCK(queue);
520b2ad91f2SKonstantin Belousov 
521b2ad91f2SKonstantin Belousov 	return (error);
522b2ad91f2SKonstantin Belousov }
523b2ad91f2SKonstantin Belousov 
524b2ad91f2SKonstantin Belousov int
525b2ad91f2SKonstantin Belousov taskqueue_cancel_timeout(struct taskqueue *queue,
526b2ad91f2SKonstantin Belousov     struct timeout_task *timeout_task, u_int *pendp)
527b2ad91f2SKonstantin Belousov {
528b2ad91f2SKonstantin Belousov 	u_int pending, pending1;
529b2ad91f2SKonstantin Belousov 	int error;
530b2ad91f2SKonstantin Belousov 
531b2ad91f2SKonstantin Belousov 	TQ_LOCK(queue);
5327c4676ddSRandall Stewart 	pending = !!(callout_stop(&timeout_task->c) > 0);
533b2ad91f2SKonstantin Belousov 	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
534b2ad91f2SKonstantin Belousov 	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
535b2ad91f2SKonstantin Belousov 		timeout_task->f &= ~DT_CALLOUT_ARMED;
536b2ad91f2SKonstantin Belousov 		queue->tq_callouts--;
537b2ad91f2SKonstantin Belousov 	}
538f46276a9SMatthew D Fleming 	TQ_UNLOCK(queue);
539f46276a9SMatthew D Fleming 
540f46276a9SMatthew D Fleming 	if (pendp != NULL)
541b2ad91f2SKonstantin Belousov 		*pendp = pending + pending1;
542f46276a9SMatthew D Fleming 	return (error);
543f46276a9SMatthew D Fleming }
544f46276a9SMatthew D Fleming 
54514889b42SWarner Losh void
54614889b42SWarner Losh taskqueue_drain(struct taskqueue *queue, struct task *task)
54714889b42SWarner Losh {
5483d336cd0SPawel Jakub Dawidek 
5493d336cd0SPawel Jakub Dawidek 	if (!queue->tq_spin)
5509df1a6ddSScott Long 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
55152bc746aSSam Leffler 
5523d336cd0SPawel Jakub Dawidek 	TQ_LOCK(queue);
553bf73d4d2SMatthew D Fleming 	while (task->ta_pending != 0 || task_is_running(queue, task))
5543d336cd0SPawel Jakub Dawidek 		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
5553d336cd0SPawel Jakub Dawidek 	TQ_UNLOCK(queue);
5569df1a6ddSScott Long }
55714889b42SWarner Losh 
558b2ad91f2SKonstantin Belousov void
55973f82099SAndriy Gapon taskqueue_drain_all(struct taskqueue *queue)
56073f82099SAndriy Gapon {
56173f82099SAndriy Gapon 
56273f82099SAndriy Gapon 	if (!queue->tq_spin)
56373f82099SAndriy Gapon 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
56473f82099SAndriy Gapon 
56573f82099SAndriy Gapon 	TQ_LOCK(queue);
5665b326a32SJustin T. Gibbs 	taskqueue_drain_tq_queue(queue);
5675b326a32SJustin T. Gibbs 	taskqueue_drain_tq_active(queue);
56873f82099SAndriy Gapon 	TQ_UNLOCK(queue);
56973f82099SAndriy Gapon }
57073f82099SAndriy Gapon 
57173f82099SAndriy Gapon void
572b2ad91f2SKonstantin Belousov taskqueue_drain_timeout(struct taskqueue *queue,
573b2ad91f2SKonstantin Belousov     struct timeout_task *timeout_task)
574b2ad91f2SKonstantin Belousov {
575b2ad91f2SKonstantin Belousov 
576b2ad91f2SKonstantin Belousov 	callout_drain(&timeout_task->c);
577b2ad91f2SKonstantin Belousov 	taskqueue_drain(queue, &timeout_task->t);
578b2ad91f2SKonstantin Belousov }
579b2ad91f2SKonstantin Belousov 
580ca2e0534SDoug Rabson static void
581ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context)
582ca2e0534SDoug Rabson {
583c86b6ff5SJohn Baldwin 	swi_sched(taskqueue_ih, 0);
584ca2e0534SDoug Rabson }
585ca2e0534SDoug Rabson 
586ca2e0534SDoug Rabson static void
5878088699fSJohn Baldwin taskqueue_swi_run(void *dummy)
588ca2e0534SDoug Rabson {
589bf73d4d2SMatthew D Fleming 	taskqueue_run(taskqueue_swi);
590ca2e0534SDoug Rabson }
591ca2e0534SDoug Rabson 
5927874f606SScott Long static void
5937874f606SScott Long taskqueue_swi_giant_enqueue(void *context)
5947874f606SScott Long {
5957874f606SScott Long 	swi_sched(taskqueue_giant_ih, 0);
5967874f606SScott Long }
5977874f606SScott Long 
5987874f606SScott Long static void
5997874f606SScott Long taskqueue_swi_giant_run(void *dummy)
6007874f606SScott Long {
601bf73d4d2SMatthew D Fleming 	taskqueue_run(taskqueue_swi_giant);
6027874f606SScott Long }
6037874f606SScott Long 
6045a6f0eeeSAdrian Chadd static int
6055a6f0eeeSAdrian Chadd _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
606bfa102caSAdrian Chadd     cpuset_t *mask, const char *name, va_list ap)
6070f92108dSScott Long {
608bfa102caSAdrian Chadd 	char ktname[MAXCOMLEN + 1];
60975b773aeSSam Leffler 	struct thread *td;
610175611b6SSam Leffler 	struct taskqueue *tq;
61100537061SSam Leffler 	int i, error;
6120f92108dSScott Long 
6130f92108dSScott Long 	if (count <= 0)
6140f92108dSScott Long 		return (EINVAL);
615175611b6SSam Leffler 
616bfa102caSAdrian Chadd 	vsnprintf(ktname, sizeof(ktname), name, ap);
6170f92108dSScott Long 	tq = *tqp;
6180f92108dSScott Long 
619175611b6SSam Leffler 	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
62000537061SSam Leffler 	    M_NOWAIT | M_ZERO);
621175611b6SSam Leffler 	if (tq->tq_threads == NULL) {
62200537061SSam Leffler 		printf("%s: no memory for %s threads\n", __func__, ktname);
62300537061SSam Leffler 		return (ENOMEM);
62400537061SSam Leffler 	}
62500537061SSam Leffler 
6260f92108dSScott Long 	for (i = 0; i < count; i++) {
6270f92108dSScott Long 		if (count == 1)
628175611b6SSam Leffler 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
6291bdfff22SAndriy Gapon 			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
6300f92108dSScott Long 		else
631175611b6SSam Leffler 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
632175611b6SSam Leffler 			    &tq->tq_threads[i], RFSTOPPED, 0,
633175611b6SSam Leffler 			    "%s_%d", ktname, i);
63475b773aeSSam Leffler 		if (error) {
63500537061SSam Leffler 			/* should be ok to continue, taskqueue_free will dtrt */
636175611b6SSam Leffler 			printf("%s: kthread_add(%s): error %d", __func__,
637175611b6SSam Leffler 			    ktname, error);
638175611b6SSam Leffler 			tq->tq_threads[i] = NULL;		/* paranoid */
63975b773aeSSam Leffler 		} else
640175611b6SSam Leffler 			tq->tq_tcount++;
64100537061SSam Leffler 	}
64275b773aeSSam Leffler 	for (i = 0; i < count; i++) {
643175611b6SSam Leffler 		if (tq->tq_threads[i] == NULL)
64475b773aeSSam Leffler 			continue;
645175611b6SSam Leffler 		td = tq->tq_threads[i];
6465a6f0eeeSAdrian Chadd 		if (mask) {
6473e400979SAndrey V. Elsukov 			error = cpuset_setthread(td->td_tid, mask);
6485a6f0eeeSAdrian Chadd 			/*
6495a6f0eeeSAdrian Chadd 			 * Failing to pin is rarely an actual fatal error;
6505a6f0eeeSAdrian Chadd 			 * it'll just affect performance.
6515a6f0eeeSAdrian Chadd 			 */
6525a6f0eeeSAdrian Chadd 			if (error)
6535a6f0eeeSAdrian Chadd 				printf("%s: curthread=%llu: can't pin; "
6545a6f0eeeSAdrian Chadd 				    "error=%d\n",
6555a6f0eeeSAdrian Chadd 				    __func__,
6565a6f0eeeSAdrian Chadd 				    (unsigned long long) td->td_tid,
6575a6f0eeeSAdrian Chadd 				    error);
6585a6f0eeeSAdrian Chadd 		}
659982d11f8SJeff Roberson 		thread_lock(td);
66075b773aeSSam Leffler 		sched_prio(td, pri);
661f0393f06SJeff Roberson 		sched_add(td, SRQ_BORING);
662982d11f8SJeff Roberson 		thread_unlock(td);
6630f92108dSScott Long 	}
6640f92108dSScott Long 
6650f92108dSScott Long 	return (0);
6660f92108dSScott Long }
6670f92108dSScott Long 
6685a6f0eeeSAdrian Chadd int
6695a6f0eeeSAdrian Chadd taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
6705a6f0eeeSAdrian Chadd     const char *name, ...)
6715a6f0eeeSAdrian Chadd {
6725a6f0eeeSAdrian Chadd 	va_list ap;
673bfa102caSAdrian Chadd 	int error;
6745a6f0eeeSAdrian Chadd 
6755a6f0eeeSAdrian Chadd 	va_start(ap, name);
676bfa102caSAdrian Chadd 	error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
6775a6f0eeeSAdrian Chadd 	va_end(ap);
678bfa102caSAdrian Chadd 	return (error);
679bfa102caSAdrian Chadd }
6805a6f0eeeSAdrian Chadd 
681bfa102caSAdrian Chadd int
682bfa102caSAdrian Chadd taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
683bfa102caSAdrian Chadd     cpuset_t *mask, const char *name, ...)
684bfa102caSAdrian Chadd {
685bfa102caSAdrian Chadd 	va_list ap;
686bfa102caSAdrian Chadd 	int error;
687bfa102caSAdrian Chadd 
688bfa102caSAdrian Chadd 	va_start(ap, name);
689bfa102caSAdrian Chadd 	error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
690bfa102caSAdrian Chadd 	va_end(ap);
691bfa102caSAdrian Chadd 	return (error);
6925a6f0eeeSAdrian Chadd }
6935a6f0eeeSAdrian Chadd 
694fdbc7174SWill Andrews static inline void
695fdbc7174SWill Andrews taskqueue_run_callback(struct taskqueue *tq,
696fdbc7174SWill Andrews     enum taskqueue_callback_type cb_type)
697fdbc7174SWill Andrews {
698fdbc7174SWill Andrews 	taskqueue_callback_fn tq_callback;
699fdbc7174SWill Andrews 
700fdbc7174SWill Andrews 	TQ_ASSERT_UNLOCKED(tq);
701fdbc7174SWill Andrews 	tq_callback = tq->tq_callbacks[cb_type];
702fdbc7174SWill Andrews 	if (tq_callback != NULL)
703fdbc7174SWill Andrews 		tq_callback(tq->tq_cb_contexts[cb_type]);
704fdbc7174SWill Andrews }
705fdbc7174SWill Andrews 
706227559d1SJohn-Mark Gurney void
707227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg)
708cb32189eSKenneth D. Merry {
709227559d1SJohn-Mark Gurney 	struct taskqueue **tqp, *tq;
710bd83e879SJohn Baldwin 
711227559d1SJohn-Mark Gurney 	tqp = arg;
712227559d1SJohn-Mark Gurney 	tq = *tqp;
713fdbc7174SWill Andrews 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
7149df1a6ddSScott Long 	TQ_LOCK(tq);
71524ef0701SAndrew Thompson 	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
7164c7070dbSScott Long 		/* XXX ? */
717bf73d4d2SMatthew D Fleming 		taskqueue_run_locked(tq);
7186a3b2893SPawel Jakub Dawidek 		/*
7196a3b2893SPawel Jakub Dawidek 		 * Because taskqueue_run() can drop tq_mutex, we need to
7206a3b2893SPawel Jakub Dawidek 		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
7216a3b2893SPawel Jakub Dawidek 		 * meantime, which means we missed a wakeup.
7226a3b2893SPawel Jakub Dawidek 		 */
7236a3b2893SPawel Jakub Dawidek 		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
7246a3b2893SPawel Jakub Dawidek 			break;
7250f180a7cSJohn Baldwin 		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
726a1797ef6SAndrew Thompson 	}
727bf73d4d2SMatthew D Fleming 	taskqueue_run_locked(tq);
728fdbc7174SWill Andrews 	/*
729fdbc7174SWill Andrews 	 * This thread is on its way out, so just drop the lock temporarily
730fdbc7174SWill Andrews 	 * in order to call the shutdown callback.  This allows the callback
731fdbc7174SWill Andrews 	 * to look at the taskqueue, even just before it dies.
732fdbc7174SWill Andrews 	 */
733fdbc7174SWill Andrews 	TQ_UNLOCK(tq);
734fdbc7174SWill Andrews 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
735fdbc7174SWill Andrews 	TQ_LOCK(tq);
736fdbc7174SWill Andrews 
73752bc746aSSam Leffler 	/* rendezvous with thread that asked us to terminate */
738175611b6SSam Leffler 	tq->tq_tcount--;
739175611b6SSam Leffler 	wakeup_one(tq->tq_threads);
7409df1a6ddSScott Long 	TQ_UNLOCK(tq);
74103c7442dSJohn Baldwin 	kthread_exit();
742cb32189eSKenneth D. Merry }
743cb32189eSKenneth D. Merry 
744227559d1SJohn-Mark Gurney void
745cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context)
746cb32189eSKenneth D. Merry {
747227559d1SJohn-Mark Gurney 	struct taskqueue **tqp, *tq;
748bd83e879SJohn Baldwin 
749227559d1SJohn-Mark Gurney 	tqp = context;
750227559d1SJohn-Mark Gurney 	tq = *tqp;
75152bc746aSSam Leffler 	wakeup_one(tq);
752cb32189eSKenneth D. Merry }
753cb32189eSKenneth D. Merry 
754d710cae7SWarner Losh TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
7557874f606SScott Long 		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
7567874f606SScott Long 		     INTR_MPSAFE, &taskqueue_ih));
7577874f606SScott Long 
758d710cae7SWarner Losh TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
7596caf758eSJohn Baldwin 		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
7607874f606SScott Long 		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
761cb32189eSKenneth D. Merry 
762227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread);
763f82c9e70SSam Leffler 
7649df1a6ddSScott Long struct taskqueue *
7659df1a6ddSScott Long taskqueue_create_fast(const char *name, int mflags,
7660f92108dSScott Long 		 taskqueue_enqueue_fn enqueue, void *context)
7679df1a6ddSScott Long {
7680f92108dSScott Long 	return _taskqueue_create(name, mflags, enqueue, context,
7699df1a6ddSScott Long 			MTX_SPIN, "fast_taskqueue");
7709df1a6ddSScott Long }
7719df1a6ddSScott Long 
772f82c9e70SSam Leffler static void	*taskqueue_fast_ih;
773f82c9e70SSam Leffler 
774f82c9e70SSam Leffler static void
7759df1a6ddSScott Long taskqueue_fast_enqueue(void *context)
776f82c9e70SSam Leffler {
777f82c9e70SSam Leffler 	swi_sched(taskqueue_fast_ih, 0);
778f82c9e70SSam Leffler }
779f82c9e70SSam Leffler 
780f82c9e70SSam Leffler static void
781f82c9e70SSam Leffler taskqueue_fast_run(void *dummy)
782f82c9e70SSam Leffler {
783bf73d4d2SMatthew D Fleming 	taskqueue_run(taskqueue_fast);
784f82c9e70SSam Leffler }
785f82c9e70SSam Leffler 
786d710cae7SWarner Losh TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
78710f0ab39SJohn Baldwin 	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
7889df1a6ddSScott Long 	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
789159ef108SPawel Jakub Dawidek 
790159ef108SPawel Jakub Dawidek int
791159ef108SPawel Jakub Dawidek taskqueue_member(struct taskqueue *queue, struct thread *td)
792159ef108SPawel Jakub Dawidek {
793159ef108SPawel Jakub Dawidek 	int i, j, ret = 0;
794159ef108SPawel Jakub Dawidek 
795159ef108SPawel Jakub Dawidek 	for (i = 0, j = 0; ; i++) {
796159ef108SPawel Jakub Dawidek 		if (queue->tq_threads[i] == NULL)
797159ef108SPawel Jakub Dawidek 			continue;
798159ef108SPawel Jakub Dawidek 		if (queue->tq_threads[i] == td) {
799159ef108SPawel Jakub Dawidek 			ret = 1;
800159ef108SPawel Jakub Dawidek 			break;
801159ef108SPawel Jakub Dawidek 		}
802159ef108SPawel Jakub Dawidek 		if (++j >= queue->tq_tcount)
803159ef108SPawel Jakub Dawidek 			break;
804159ef108SPawel Jakub Dawidek 	}
805159ef108SPawel Jakub Dawidek 	return (ret);
806159ef108SPawel Jakub Dawidek }
8074c7070dbSScott Long 
8084c7070dbSScott Long struct taskqgroup_cpu {
8094c7070dbSScott Long 	LIST_HEAD(, grouptask)	tgc_tasks;
8104c7070dbSScott Long 	struct taskqueue	*tgc_taskq;
8114c7070dbSScott Long 	int	tgc_cnt;
8124c7070dbSScott Long 	int	tgc_cpu;
8134c7070dbSScott Long };
8144c7070dbSScott Long 
8154c7070dbSScott Long struct taskqgroup {
8164c7070dbSScott Long 	struct taskqgroup_cpu tqg_queue[MAXCPU];
8174c7070dbSScott Long 	struct mtx	tqg_lock;
8184c7070dbSScott Long 	char *		tqg_name;
8194c7070dbSScott Long 	int		tqg_adjusting;
8204c7070dbSScott Long 	int		tqg_stride;
8214c7070dbSScott Long 	int		tqg_cnt;
8224c7070dbSScott Long };
8234c7070dbSScott Long 
8244c7070dbSScott Long struct taskq_bind_task {
8254c7070dbSScott Long 	struct task bt_task;
8264c7070dbSScott Long 	int	bt_cpuid;
8274c7070dbSScott Long };
8284c7070dbSScott Long 
8294c7070dbSScott Long static void
8304c7070dbSScott Long taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
8314c7070dbSScott Long {
8324c7070dbSScott Long 	struct taskqgroup_cpu *qcpu;
8334c7070dbSScott Long 
8344c7070dbSScott Long 	qcpu = &qgroup->tqg_queue[idx];
8354c7070dbSScott Long 	LIST_INIT(&qcpu->tgc_tasks);
8364c7070dbSScott Long 	qcpu->tgc_taskq = taskqueue_create_fast(NULL, M_WAITOK,
8374c7070dbSScott Long 	    taskqueue_thread_enqueue, &qcpu->tgc_taskq);
8384c7070dbSScott Long 	taskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
8394c7070dbSScott Long 	    "%s_%d", qgroup->tqg_name, idx);
8404c7070dbSScott Long 	qcpu->tgc_cpu = idx * qgroup->tqg_stride;
8414c7070dbSScott Long }
8424c7070dbSScott Long 
8434c7070dbSScott Long static void
8444c7070dbSScott Long taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
8454c7070dbSScott Long {
8464c7070dbSScott Long 
8474c7070dbSScott Long 	taskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
8484c7070dbSScott Long }
8494c7070dbSScott Long 
8504c7070dbSScott Long /*
8514c7070dbSScott Long  * Find the taskq with least # of tasks that doesn't currently have any
8524c7070dbSScott Long  * other queues from the uniq identifier.
8534c7070dbSScott Long  */
8544c7070dbSScott Long static int
8554c7070dbSScott Long taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
8564c7070dbSScott Long {
8574c7070dbSScott Long 	struct grouptask *n;
8584c7070dbSScott Long 	int i, idx, mincnt;
8594c7070dbSScott Long 	int strict;
8604c7070dbSScott Long 
8614c7070dbSScott Long 	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
8624c7070dbSScott Long 	if (qgroup->tqg_cnt == 0)
8634c7070dbSScott Long 		return (0);
8644c7070dbSScott Long 	idx = -1;
8654c7070dbSScott Long 	mincnt = INT_MAX;
8664c7070dbSScott Long 	/*
8674c7070dbSScott Long 	 * Two passes;  First scan for a queue with the least tasks that
8684c7070dbSScott Long 	 * does not already service this uniq id.  If that fails simply find
8694c7070dbSScott Long 	 * the queue with the least total tasks;
8704c7070dbSScott Long 	 */
8714c7070dbSScott Long 	for (strict = 1; mincnt == INT_MAX; strict = 0) {
8724c7070dbSScott Long 		for (i = 0; i < qgroup->tqg_cnt; i++) {
8734c7070dbSScott Long 			if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
8744c7070dbSScott Long 				continue;
8754c7070dbSScott Long 			if (strict) {
8764c7070dbSScott Long 				LIST_FOREACH(n,
8774c7070dbSScott Long 				    &qgroup->tqg_queue[i].tgc_tasks, gt_list)
8784c7070dbSScott Long 					if (n->gt_uniq == uniq)
8794c7070dbSScott Long 						break;
8804c7070dbSScott Long 				if (n != NULL)
8814c7070dbSScott Long 					continue;
8824c7070dbSScott Long 			}
8834c7070dbSScott Long 			mincnt = qgroup->tqg_queue[i].tgc_cnt;
8844c7070dbSScott Long 			idx = i;
8854c7070dbSScott Long 		}
8864c7070dbSScott Long 	}
8874c7070dbSScott Long 	if (idx == -1)
8884c7070dbSScott Long 		panic("taskqgroup_find: Failed to pick a qid.");
8894c7070dbSScott Long 
8904c7070dbSScott Long 	return (idx);
8914c7070dbSScott Long }
8924c7070dbSScott Long 
8934c7070dbSScott Long void
8944c7070dbSScott Long taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
8954c7070dbSScott Long     void *uniq, int irq, char *name)
8964c7070dbSScott Long {
8974c7070dbSScott Long 	cpuset_t mask;
8984c7070dbSScott Long 	int qid;
8994c7070dbSScott Long 
9004c7070dbSScott Long 	gtask->gt_uniq = uniq;
9014c7070dbSScott Long 	gtask->gt_name = name;
9024c7070dbSScott Long 	gtask->gt_irq = irq;
9034c7070dbSScott Long 	gtask->gt_cpu = -1;
9044c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
9054c7070dbSScott Long 	qid = taskqgroup_find(qgroup, uniq);
9064c7070dbSScott Long 	qgroup->tqg_queue[qid].tgc_cnt++;
9074c7070dbSScott Long 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
9084c7070dbSScott Long 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
9094c7070dbSScott Long 	if (irq != -1 && smp_started) {
9104c7070dbSScott Long 		CPU_ZERO(&mask);
9114c7070dbSScott Long 		CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
9124c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9134c7070dbSScott Long 		intr_setaffinity(irq, &mask);
9144c7070dbSScott Long 	} else
9154c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9164c7070dbSScott Long }
9174c7070dbSScott Long 
9184c7070dbSScott Long int
9194c7070dbSScott Long taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
9204c7070dbSScott Long 	void *uniq, int cpu, int irq, char *name)
9214c7070dbSScott Long {
9224c7070dbSScott Long 	cpuset_t mask;
9234c7070dbSScott Long 	int i, qid;
9244c7070dbSScott Long 
9254c7070dbSScott Long 	qid = -1;
9264c7070dbSScott Long 	gtask->gt_uniq = uniq;
9274c7070dbSScott Long 	gtask->gt_name = name;
9284c7070dbSScott Long 	gtask->gt_irq = irq;
9294c7070dbSScott Long 	gtask->gt_cpu = cpu;
9304c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
9314c7070dbSScott Long 	if (smp_started) {
9324c7070dbSScott Long 		for (i = 0; i < qgroup->tqg_cnt; i++)
9334c7070dbSScott Long 			if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
9344c7070dbSScott Long 				qid = i;
9354c7070dbSScott Long 				break;
9364c7070dbSScott Long 			}
9374c7070dbSScott Long 		if (qid == -1) {
9384c7070dbSScott Long 			mtx_unlock(&qgroup->tqg_lock);
9394c7070dbSScott Long 			return (EINVAL);
9404c7070dbSScott Long 		}
9414c7070dbSScott Long 	} else
9424c7070dbSScott Long 		qid = 0;
9434c7070dbSScott Long 	qgroup->tqg_queue[qid].tgc_cnt++;
9444c7070dbSScott Long 	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
9454c7070dbSScott Long 	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
9464c7070dbSScott Long 	if (irq != -1 && smp_started) {
9474c7070dbSScott Long 		CPU_ZERO(&mask);
9484c7070dbSScott Long 		CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
9494c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9504c7070dbSScott Long 		intr_setaffinity(irq, &mask);
9514c7070dbSScott Long 	} else
9524c7070dbSScott Long 		mtx_unlock(&qgroup->tqg_lock);
9534c7070dbSScott Long 	return (0);
9544c7070dbSScott Long }
9554c7070dbSScott Long 
9564c7070dbSScott Long void
9574c7070dbSScott Long taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
9584c7070dbSScott Long {
9594c7070dbSScott Long 	int i;
9604c7070dbSScott Long 
9614c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
9624c7070dbSScott Long 	for (i = 0; i < qgroup->tqg_cnt; i++)
9634c7070dbSScott Long 		if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
9644c7070dbSScott Long 			break;
9654c7070dbSScott Long 	if (i == qgroup->tqg_cnt)
9664c7070dbSScott Long 		panic("taskqgroup_detach: task not in group\n");
9674c7070dbSScott Long 	qgroup->tqg_queue[i].tgc_cnt--;
9684c7070dbSScott Long 	LIST_REMOVE(gtask, gt_list);
9694c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
9704c7070dbSScott Long 	gtask->gt_taskqueue = NULL;
9714c7070dbSScott Long }
9724c7070dbSScott Long 
9734c7070dbSScott Long static void
9744c7070dbSScott Long taskqgroup_binder(void *ctx, int pending)
9754c7070dbSScott Long {
9764c7070dbSScott Long 	struct taskq_bind_task *task = (struct taskq_bind_task *)ctx;
9774c7070dbSScott Long 	cpuset_t mask;
9784c7070dbSScott Long 	int error;
9794c7070dbSScott Long 
9804c7070dbSScott Long 	CPU_ZERO(&mask);
9814c7070dbSScott Long 	CPU_SET(task->bt_cpuid, &mask);
9824c7070dbSScott Long 	error = cpuset_setthread(curthread->td_tid, &mask);
9834c7070dbSScott Long 	thread_lock(curthread);
9844c7070dbSScott Long 	sched_bind(curthread, task->bt_cpuid);
9854c7070dbSScott Long 	thread_unlock(curthread);
9864c7070dbSScott Long 
9874c7070dbSScott Long 	if (error)
9884c7070dbSScott Long 		printf("taskqgroup_binder: setaffinity failed: %d\n",
9894c7070dbSScott Long 		    error);
9904c7070dbSScott Long 	free(task, M_DEVBUF);
9914c7070dbSScott Long }
9924c7070dbSScott Long 
9934c7070dbSScott Long static void
9944c7070dbSScott Long taskqgroup_bind(struct taskqgroup *qgroup)
9954c7070dbSScott Long {
9964c7070dbSScott Long 	struct taskq_bind_task *task;
9974c7070dbSScott Long 	int i;
9984c7070dbSScott Long 
9994c7070dbSScott Long 	/*
10004c7070dbSScott Long 	 * Bind taskqueue threads to specific CPUs, if they have been assigned
10014c7070dbSScott Long 	 * one.
10024c7070dbSScott Long 	 */
10034c7070dbSScott Long 	for (i = 0; i < qgroup->tqg_cnt; i++) {
10044c7070dbSScott Long 		task = malloc(sizeof (*task), M_DEVBUF, M_NOWAIT);
10054c7070dbSScott Long 		TASK_INIT(&task->bt_task, 0, taskqgroup_binder, task);
10064c7070dbSScott Long 		task->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
10074c7070dbSScott Long 		taskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
10084c7070dbSScott Long 		    &task->bt_task);
10094c7070dbSScott Long 	}
10104c7070dbSScott Long }
10114c7070dbSScott Long 
10124c7070dbSScott Long static int
10134c7070dbSScott Long _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
10144c7070dbSScott Long {
10154c7070dbSScott Long 	LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
10164c7070dbSScott Long 	cpuset_t mask;
10174c7070dbSScott Long 	struct grouptask *gtask;
10184c7070dbSScott Long 	int i, old_cnt, qid;
10194c7070dbSScott Long 
10204c7070dbSScott Long 	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
10214c7070dbSScott Long 
10224c7070dbSScott Long 	if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
10234c7070dbSScott Long 		printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n",
10244c7070dbSScott Long 			   cnt, stride, mp_ncpus, smp_started);
10254c7070dbSScott Long 		return (EINVAL);
10264c7070dbSScott Long 	}
10274c7070dbSScott Long 	if (qgroup->tqg_adjusting) {
10284c7070dbSScott Long 		printf("taskqgroup_adjust failed: adjusting\n");
10294c7070dbSScott Long 		return (EBUSY);
10304c7070dbSScott Long 	}
10314c7070dbSScott Long 	qgroup->tqg_adjusting = 1;
10324c7070dbSScott Long 	old_cnt = qgroup->tqg_cnt;
10334c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
10344c7070dbSScott Long 	/*
10354c7070dbSScott Long 	 * Set up queue for tasks added before boot.
10364c7070dbSScott Long 	 */
10374c7070dbSScott Long 	if (old_cnt == 0) {
10384c7070dbSScott Long 		LIST_SWAP(&gtask_head, &qgroup->tqg_queue[0].tgc_tasks,
10394c7070dbSScott Long 		    grouptask, gt_list);
10404c7070dbSScott Long 		qgroup->tqg_queue[0].tgc_cnt = 0;
10414c7070dbSScott Long 	}
10424c7070dbSScott Long 
10434c7070dbSScott Long 	/*
10444c7070dbSScott Long 	 * If new taskq threads have been added.
10454c7070dbSScott Long 	 */
10464c7070dbSScott Long 	for (i = old_cnt; i < cnt; i++)
10474c7070dbSScott Long 		taskqgroup_cpu_create(qgroup, i);
10484c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
10494c7070dbSScott Long 	qgroup->tqg_cnt = cnt;
10504c7070dbSScott Long 	qgroup->tqg_stride = stride;
10514c7070dbSScott Long 
10524c7070dbSScott Long 	/*
10534c7070dbSScott Long 	 * Adjust drivers to use new taskqs.
10544c7070dbSScott Long 	 */
10554c7070dbSScott Long 	for (i = 0; i < old_cnt; i++) {
10564c7070dbSScott Long 		while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
10574c7070dbSScott Long 			LIST_REMOVE(gtask, gt_list);
10584c7070dbSScott Long 			qgroup->tqg_queue[i].tgc_cnt--;
10594c7070dbSScott Long 			LIST_INSERT_HEAD(&gtask_head, gtask, gt_list);
10604c7070dbSScott Long 		}
10614c7070dbSScott Long 	}
10624c7070dbSScott Long 
10634c7070dbSScott Long 	while ((gtask = LIST_FIRST(&gtask_head))) {
10644c7070dbSScott Long 		LIST_REMOVE(gtask, gt_list);
10654c7070dbSScott Long 		if (gtask->gt_cpu == -1)
10664c7070dbSScott Long 			qid = taskqgroup_find(qgroup, gtask->gt_uniq);
10674c7070dbSScott Long 		else {
10684c7070dbSScott Long 			for (i = 0; i < qgroup->tqg_cnt; i++)
10694c7070dbSScott Long 				if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) {
10704c7070dbSScott Long 					qid = i;
10714c7070dbSScott Long 					break;
10724c7070dbSScott Long 				}
10734c7070dbSScott Long 		}
10744c7070dbSScott Long 		qgroup->tqg_queue[qid].tgc_cnt++;
10754c7070dbSScott Long 		LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
10764c7070dbSScott Long 		    gt_list);
10774c7070dbSScott Long 		gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
10784c7070dbSScott Long 	}
10794c7070dbSScott Long 	/*
10804c7070dbSScott Long 	 * Set new CPU and IRQ affinity
10814c7070dbSScott Long 	 */
10824c7070dbSScott Long 	for (i = 0; i < cnt; i++) {
10834c7070dbSScott Long 		qgroup->tqg_queue[i].tgc_cpu = i * qgroup->tqg_stride;
10844c7070dbSScott Long 		CPU_ZERO(&mask);
10854c7070dbSScott Long 		CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
10864c7070dbSScott Long 		LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
10874c7070dbSScott Long 			if (gtask->gt_irq == -1)
10884c7070dbSScott Long 				continue;
10894c7070dbSScott Long 			intr_setaffinity(gtask->gt_irq, &mask);
10904c7070dbSScott Long 		}
10914c7070dbSScott Long 	}
10924c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
10934c7070dbSScott Long 
10944c7070dbSScott Long 	/*
10954c7070dbSScott Long 	 * If taskq thread count has been reduced.
10964c7070dbSScott Long 	 */
10974c7070dbSScott Long 	for (i = cnt; i < old_cnt; i++)
10984c7070dbSScott Long 		taskqgroup_cpu_remove(qgroup, i);
10994c7070dbSScott Long 
11004c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
11014c7070dbSScott Long 	qgroup->tqg_adjusting = 0;
11024c7070dbSScott Long 
11034c7070dbSScott Long 	taskqgroup_bind(qgroup);
11044c7070dbSScott Long 
11054c7070dbSScott Long 	return (0);
11064c7070dbSScott Long }
11074c7070dbSScott Long 
11084c7070dbSScott Long int
11094c7070dbSScott Long taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
11104c7070dbSScott Long {
11114c7070dbSScott Long 	int error;
11124c7070dbSScott Long 
11134c7070dbSScott Long 	mtx_lock(&qgroup->tqg_lock);
11144c7070dbSScott Long 	error = _taskqgroup_adjust(qgroup, cpu, stride);
11154c7070dbSScott Long 	mtx_unlock(&qgroup->tqg_lock);
11164c7070dbSScott Long 
11174c7070dbSScott Long 	return (error);
11184c7070dbSScott Long }
11194c7070dbSScott Long 
11204c7070dbSScott Long struct taskqgroup *
11214c7070dbSScott Long taskqgroup_create(char *name)
11224c7070dbSScott Long {
11234c7070dbSScott Long 	struct taskqgroup *qgroup;
11244c7070dbSScott Long 
11254c7070dbSScott Long 	qgroup = malloc(sizeof(*qgroup), M_TASKQUEUE, M_WAITOK | M_ZERO);
11264c7070dbSScott Long 	mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
11274c7070dbSScott Long 	qgroup->tqg_name = name;
11284c7070dbSScott Long 	LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
11294c7070dbSScott Long 
11304c7070dbSScott Long 	return (qgroup);
11314c7070dbSScott Long }
11324c7070dbSScott Long 
11334c7070dbSScott Long void
11344c7070dbSScott Long taskqgroup_destroy(struct taskqgroup *qgroup)
11354c7070dbSScott Long {
11364c7070dbSScott Long 
11374c7070dbSScott Long }
1138