xref: /freebsd/sys/kern/subr_taskqueue.c (revision 227559d11f2868bad58c0259117d64efffcff184)
1ca2e0534SDoug Rabson /*-
2ca2e0534SDoug Rabson  * Copyright (c) 2000 Doug Rabson
3ca2e0534SDoug Rabson  * All rights reserved.
4ca2e0534SDoug Rabson  *
5ca2e0534SDoug Rabson  * Redistribution and use in source and binary forms, with or without
6ca2e0534SDoug Rabson  * modification, are permitted provided that the following conditions
7ca2e0534SDoug Rabson  * are met:
8ca2e0534SDoug Rabson  * 1. Redistributions of source code must retain the above copyright
9ca2e0534SDoug Rabson  *    notice, this list of conditions and the following disclaimer.
10ca2e0534SDoug Rabson  * 2. Redistributions in binary form must reproduce the above copyright
11ca2e0534SDoug Rabson  *    notice, this list of conditions and the following disclaimer in the
12ca2e0534SDoug Rabson  *    documentation and/or other materials provided with the distribution.
13ca2e0534SDoug Rabson  *
14ca2e0534SDoug Rabson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15ca2e0534SDoug Rabson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16ca2e0534SDoug Rabson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17ca2e0534SDoug Rabson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18ca2e0534SDoug Rabson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19ca2e0534SDoug Rabson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20ca2e0534SDoug Rabson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21ca2e0534SDoug Rabson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22ca2e0534SDoug Rabson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23ca2e0534SDoug Rabson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24ca2e0534SDoug Rabson  * SUCH DAMAGE.
25ca2e0534SDoug Rabson  */
26ca2e0534SDoug Rabson 
27677b542eSDavid E. O'Brien #include <sys/cdefs.h>
28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
29677b542eSDavid E. O'Brien 
30ca2e0534SDoug Rabson #include <sys/param.h>
31ca2e0534SDoug Rabson #include <sys/systm.h>
321de1c550SJohn Baldwin #include <sys/bus.h>
33282873e2SJohn Baldwin #include <sys/interrupt.h>
34ca2e0534SDoug Rabson #include <sys/kernel.h>
35eb5b0e05SJohn Baldwin #include <sys/kthread.h>
361de1c550SJohn Baldwin #include <sys/lock.h>
37ca2e0534SDoug Rabson #include <sys/malloc.h>
381de1c550SJohn Baldwin #include <sys/mutex.h>
391de1c550SJohn Baldwin #include <sys/taskqueue.h>
40cb32189eSKenneth D. Merry #include <sys/unistd.h>
41ca2e0534SDoug Rabson 
42959b7375SPoul-Henning Kamp static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
437874f606SScott Long static void	*taskqueue_giant_ih;
44eb5b0e05SJohn Baldwin static void	*taskqueue_ih;
45eb5b0e05SJohn Baldwin static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
461de1c550SJohn Baldwin static struct mtx taskqueue_queues_mutex;
478088699fSJohn Baldwin 
48ca2e0534SDoug Rabson struct taskqueue {
49ca2e0534SDoug Rabson 	STAILQ_ENTRY(taskqueue)	tq_link;
50ca2e0534SDoug Rabson 	STAILQ_HEAD(, task)	tq_queue;
51ca2e0534SDoug Rabson 	const char		*tq_name;
52ca2e0534SDoug Rabson 	taskqueue_enqueue_fn	tq_enqueue;
53ca2e0534SDoug Rabson 	void			*tq_context;
541de1c550SJohn Baldwin 	struct mtx		tq_mutex;
55ca2e0534SDoug Rabson };
56ca2e0534SDoug Rabson 
571de1c550SJohn Baldwin static void	init_taskqueue_list(void *data);
581de1c550SJohn Baldwin 
591de1c550SJohn Baldwin static void
601de1c550SJohn Baldwin init_taskqueue_list(void *data __unused)
611de1c550SJohn Baldwin {
621de1c550SJohn Baldwin 
636008862bSJohn Baldwin 	mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
641de1c550SJohn Baldwin 	STAILQ_INIT(&taskqueue_queues);
651de1c550SJohn Baldwin }
661de1c550SJohn Baldwin SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
671de1c550SJohn Baldwin     NULL);
681de1c550SJohn Baldwin 
69ca2e0534SDoug Rabson struct taskqueue *
70ca2e0534SDoug Rabson taskqueue_create(const char *name, int mflags,
71ca2e0534SDoug Rabson 		 taskqueue_enqueue_fn enqueue, void *context)
72ca2e0534SDoug Rabson {
73ca2e0534SDoug Rabson 	struct taskqueue *queue;
74ca2e0534SDoug Rabson 
751de1c550SJohn Baldwin 	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
76ca2e0534SDoug Rabson 	if (!queue)
77ca2e0534SDoug Rabson 		return 0;
781de1c550SJohn Baldwin 
79ca2e0534SDoug Rabson 	STAILQ_INIT(&queue->tq_queue);
80ca2e0534SDoug Rabson 	queue->tq_name = name;
81ca2e0534SDoug Rabson 	queue->tq_enqueue = enqueue;
82ca2e0534SDoug Rabson 	queue->tq_context = context;
836008862bSJohn Baldwin 	mtx_init(&queue->tq_mutex, "taskqueue", NULL, MTX_DEF);
84ca2e0534SDoug Rabson 
851de1c550SJohn Baldwin 	mtx_lock(&taskqueue_queues_mutex);
86ca2e0534SDoug Rabson 	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
871de1c550SJohn Baldwin 	mtx_unlock(&taskqueue_queues_mutex);
88ca2e0534SDoug Rabson 
89ca2e0534SDoug Rabson 	return queue;
90ca2e0534SDoug Rabson }
91ca2e0534SDoug Rabson 
92ca2e0534SDoug Rabson void
93ca2e0534SDoug Rabson taskqueue_free(struct taskqueue *queue)
94ca2e0534SDoug Rabson {
951de1c550SJohn Baldwin 
961de1c550SJohn Baldwin 	mtx_lock(&taskqueue_queues_mutex);
97ca2e0534SDoug Rabson 	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
981de1c550SJohn Baldwin 	mtx_unlock(&taskqueue_queues_mutex);
99ca2e0534SDoug Rabson 
100bd83e879SJohn Baldwin 	mtx_lock(&queue->tq_mutex);
101bd83e879SJohn Baldwin 	taskqueue_run(queue);
1021de1c550SJohn Baldwin 	mtx_destroy(&queue->tq_mutex);
103ca2e0534SDoug Rabson 	free(queue, M_TASKQUEUE);
104ca2e0534SDoug Rabson }
105ca2e0534SDoug Rabson 
1061de1c550SJohn Baldwin /*
1071de1c550SJohn Baldwin  * Returns with the taskqueue locked.
1081de1c550SJohn Baldwin  */
109ca2e0534SDoug Rabson struct taskqueue *
110ca2e0534SDoug Rabson taskqueue_find(const char *name)
111ca2e0534SDoug Rabson {
112ca2e0534SDoug Rabson 	struct taskqueue *queue;
113ca2e0534SDoug Rabson 
1141de1c550SJohn Baldwin 	mtx_lock(&taskqueue_queues_mutex);
1151de1c550SJohn Baldwin 	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
116eb5b0e05SJohn Baldwin 		if (strcmp(queue->tq_name, name) == 0) {
117bd83e879SJohn Baldwin 			mtx_lock(&queue->tq_mutex);
1181de1c550SJohn Baldwin 			mtx_unlock(&taskqueue_queues_mutex);
119ca2e0534SDoug Rabson 			return queue;
120ca2e0534SDoug Rabson 		}
1211de1c550SJohn Baldwin 	}
1221de1c550SJohn Baldwin 	mtx_unlock(&taskqueue_queues_mutex);
123eb5b0e05SJohn Baldwin 	return NULL;
124ca2e0534SDoug Rabson }
125ca2e0534SDoug Rabson 
126ca2e0534SDoug Rabson int
127ca2e0534SDoug Rabson taskqueue_enqueue(struct taskqueue *queue, struct task *task)
128ca2e0534SDoug Rabson {
129ca2e0534SDoug Rabson 	struct task *ins;
130ca2e0534SDoug Rabson 	struct task *prev;
131ca2e0534SDoug Rabson 
132282873e2SJohn Baldwin 	mtx_lock(&queue->tq_mutex);
133282873e2SJohn Baldwin 
134ca2e0534SDoug Rabson 	/*
135ca2e0534SDoug Rabson 	 * Count multiple enqueues.
136ca2e0534SDoug Rabson 	 */
137ca2e0534SDoug Rabson 	if (task->ta_pending) {
138ca2e0534SDoug Rabson 		task->ta_pending++;
1391de1c550SJohn Baldwin 		mtx_unlock(&queue->tq_mutex);
140ca2e0534SDoug Rabson 		return 0;
141ca2e0534SDoug Rabson 	}
142ca2e0534SDoug Rabson 
143ca2e0534SDoug Rabson 	/*
144ca2e0534SDoug Rabson 	 * Optimise the case when all tasks have the same priority.
145ca2e0534SDoug Rabson 	 */
14651b86781SJeffrey Hsu 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
147ca2e0534SDoug Rabson 	if (!prev || prev->ta_priority >= task->ta_priority) {
148ca2e0534SDoug Rabson 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
149ca2e0534SDoug Rabson 	} else {
150ca2e0534SDoug Rabson 		prev = 0;
151ca2e0534SDoug Rabson 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
152ca2e0534SDoug Rabson 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
153ca2e0534SDoug Rabson 			if (ins->ta_priority < task->ta_priority)
154ca2e0534SDoug Rabson 				break;
155ca2e0534SDoug Rabson 
156ca2e0534SDoug Rabson 		if (prev)
157ca2e0534SDoug Rabson 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
158ca2e0534SDoug Rabson 		else
159ca2e0534SDoug Rabson 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
160ca2e0534SDoug Rabson 	}
161ca2e0534SDoug Rabson 
162ca2e0534SDoug Rabson 	task->ta_pending = 1;
163ca2e0534SDoug Rabson 	if (queue->tq_enqueue)
164ca2e0534SDoug Rabson 		queue->tq_enqueue(queue->tq_context);
165282873e2SJohn Baldwin 
1661de1c550SJohn Baldwin 	mtx_unlock(&queue->tq_mutex);
167282873e2SJohn Baldwin 
168ca2e0534SDoug Rabson 	return 0;
169ca2e0534SDoug Rabson }
170ca2e0534SDoug Rabson 
171ca2e0534SDoug Rabson void
172ca2e0534SDoug Rabson taskqueue_run(struct taskqueue *queue)
173ca2e0534SDoug Rabson {
174ca2e0534SDoug Rabson 	struct task *task;
175bd83e879SJohn Baldwin 	int owned, pending;
176ca2e0534SDoug Rabson 
177bd83e879SJohn Baldwin 	owned = mtx_owned(&queue->tq_mutex);
178bd83e879SJohn Baldwin 	if (!owned)
1791de1c550SJohn Baldwin 		mtx_lock(&queue->tq_mutex);
180ca2e0534SDoug Rabson 	while (STAILQ_FIRST(&queue->tq_queue)) {
181ca2e0534SDoug Rabson 		/*
182ca2e0534SDoug Rabson 		 * Carefully remove the first task from the queue and
183ca2e0534SDoug Rabson 		 * zero its pending count.
184ca2e0534SDoug Rabson 		 */
185ca2e0534SDoug Rabson 		task = STAILQ_FIRST(&queue->tq_queue);
186ca2e0534SDoug Rabson 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
187ca2e0534SDoug Rabson 		pending = task->ta_pending;
188ca2e0534SDoug Rabson 		task->ta_pending = 0;
189282873e2SJohn Baldwin 		mtx_unlock(&queue->tq_mutex);
190ca2e0534SDoug Rabson 
191282873e2SJohn Baldwin 		task->ta_func(task->ta_context, pending);
192ca2e0534SDoug Rabson 
1931de1c550SJohn Baldwin 		mtx_lock(&queue->tq_mutex);
194ca2e0534SDoug Rabson 	}
195bd83e879SJohn Baldwin 
196bd83e879SJohn Baldwin 	/*
197bd83e879SJohn Baldwin 	 * For compatibility, unlock on return if the queue was not locked
198bd83e879SJohn Baldwin 	 * on entry, although this opens a race window.
199bd83e879SJohn Baldwin 	 */
200bd83e879SJohn Baldwin 	if (!owned)
2011de1c550SJohn Baldwin 		mtx_unlock(&queue->tq_mutex);
202ca2e0534SDoug Rabson }
203ca2e0534SDoug Rabson 
204ca2e0534SDoug Rabson static void
205ca2e0534SDoug Rabson taskqueue_swi_enqueue(void *context)
206ca2e0534SDoug Rabson {
207c86b6ff5SJohn Baldwin 	swi_sched(taskqueue_ih, 0);
208ca2e0534SDoug Rabson }
209ca2e0534SDoug Rabson 
210ca2e0534SDoug Rabson static void
2118088699fSJohn Baldwin taskqueue_swi_run(void *dummy)
212ca2e0534SDoug Rabson {
213ca2e0534SDoug Rabson 	taskqueue_run(taskqueue_swi);
214ca2e0534SDoug Rabson }
215ca2e0534SDoug Rabson 
2167874f606SScott Long static void
2177874f606SScott Long taskqueue_swi_giant_enqueue(void *context)
2187874f606SScott Long {
2197874f606SScott Long 	swi_sched(taskqueue_giant_ih, 0);
2207874f606SScott Long }
2217874f606SScott Long 
2227874f606SScott Long static void
2237874f606SScott Long taskqueue_swi_giant_run(void *dummy)
2247874f606SScott Long {
2257874f606SScott Long 	taskqueue_run(taskqueue_swi_giant);
2267874f606SScott Long }
2277874f606SScott Long 
228227559d1SJohn-Mark Gurney void
229227559d1SJohn-Mark Gurney taskqueue_thread_loop(void *arg)
230cb32189eSKenneth D. Merry {
231227559d1SJohn-Mark Gurney 	struct taskqueue **tqp, *tq;
232bd83e879SJohn Baldwin 
233227559d1SJohn-Mark Gurney 	tqp = arg;
234227559d1SJohn-Mark Gurney 	tq = *tqp;
235227559d1SJohn-Mark Gurney 	mtx_lock(&tq->tq_mutex);
236bd83e879SJohn Baldwin 	for (;;) {
237227559d1SJohn-Mark Gurney 		taskqueue_run(tq);
238227559d1SJohn-Mark Gurney 		msleep(tq, &tq->tq_mutex, PWAIT, "-", 0);
239cb32189eSKenneth D. Merry 	}
240cb32189eSKenneth D. Merry }
241cb32189eSKenneth D. Merry 
242227559d1SJohn-Mark Gurney void
243cb32189eSKenneth D. Merry taskqueue_thread_enqueue(void *context)
244cb32189eSKenneth D. Merry {
245227559d1SJohn-Mark Gurney 	struct taskqueue **tqp, *tq;
246bd83e879SJohn Baldwin 
247227559d1SJohn-Mark Gurney 	tqp = context;
248227559d1SJohn-Mark Gurney 	tq = *tqp;
249227559d1SJohn-Mark Gurney 
250227559d1SJohn-Mark Gurney 	mtx_assert(&tq->tq_mutex, MA_OWNED);
251227559d1SJohn-Mark Gurney 	wakeup(tq);
252cb32189eSKenneth D. Merry }
253cb32189eSKenneth D. Merry 
254ca2e0534SDoug Rabson TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
2557874f606SScott Long 		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
2567874f606SScott Long 		     INTR_MPSAFE, &taskqueue_ih));
2577874f606SScott Long 
2587874f606SScott Long TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
2597874f606SScott Long 		 swi_add(NULL, "Giant task queue", taskqueue_swi_giant_run,
2607874f606SScott Long 		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
261cb32189eSKenneth D. Merry 
262227559d1SJohn-Mark Gurney TASKQUEUE_DEFINE_THREAD(thread);
263f82c9e70SSam Leffler 
264f82c9e70SSam Leffler int
265f82c9e70SSam Leffler taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
266f82c9e70SSam Leffler {
267f82c9e70SSam Leffler 	struct task *ins;
268f82c9e70SSam Leffler 	struct task *prev;
269f82c9e70SSam Leffler 
270f82c9e70SSam Leffler 	mtx_lock_spin(&queue->tq_mutex);
271f82c9e70SSam Leffler 
272f82c9e70SSam Leffler 	/*
273f82c9e70SSam Leffler 	 * Count multiple enqueues.
274f82c9e70SSam Leffler 	 */
275f82c9e70SSam Leffler 	if (task->ta_pending) {
276f82c9e70SSam Leffler 		task->ta_pending++;
277f82c9e70SSam Leffler 		mtx_unlock_spin(&queue->tq_mutex);
278f82c9e70SSam Leffler 		return 0;
279f82c9e70SSam Leffler 	}
280f82c9e70SSam Leffler 
281f82c9e70SSam Leffler 	/*
282f82c9e70SSam Leffler 	 * Optimise the case when all tasks have the same priority.
283f82c9e70SSam Leffler 	 */
284f82c9e70SSam Leffler 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
285f82c9e70SSam Leffler 	if (!prev || prev->ta_priority >= task->ta_priority) {
286f82c9e70SSam Leffler 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
287f82c9e70SSam Leffler 	} else {
288f82c9e70SSam Leffler 		prev = 0;
289f82c9e70SSam Leffler 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
290f82c9e70SSam Leffler 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
291f82c9e70SSam Leffler 			if (ins->ta_priority < task->ta_priority)
292f82c9e70SSam Leffler 				break;
293f82c9e70SSam Leffler 
294f82c9e70SSam Leffler 		if (prev)
295f82c9e70SSam Leffler 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
296f82c9e70SSam Leffler 		else
297f82c9e70SSam Leffler 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
298f82c9e70SSam Leffler 	}
299f82c9e70SSam Leffler 
300f82c9e70SSam Leffler 	task->ta_pending = 1;
301f82c9e70SSam Leffler 	if (queue->tq_enqueue)
302f82c9e70SSam Leffler 		queue->tq_enqueue(queue->tq_context);
303f82c9e70SSam Leffler 
304f82c9e70SSam Leffler 	mtx_unlock_spin(&queue->tq_mutex);
305f82c9e70SSam Leffler 
306f82c9e70SSam Leffler 	return 0;
307f82c9e70SSam Leffler }
308f82c9e70SSam Leffler 
309f82c9e70SSam Leffler static void
310f82c9e70SSam Leffler taskqueue_run_fast(struct taskqueue *queue)
311f82c9e70SSam Leffler {
312f82c9e70SSam Leffler 	struct task *task;
313f82c9e70SSam Leffler 	int pending;
314f82c9e70SSam Leffler 
315f82c9e70SSam Leffler 	mtx_lock_spin(&queue->tq_mutex);
316f82c9e70SSam Leffler 	while (STAILQ_FIRST(&queue->tq_queue)) {
317f82c9e70SSam Leffler 		/*
318f82c9e70SSam Leffler 		 * Carefully remove the first task from the queue and
319f82c9e70SSam Leffler 		 * zero its pending count.
320f82c9e70SSam Leffler 		 */
321f82c9e70SSam Leffler 		task = STAILQ_FIRST(&queue->tq_queue);
322f82c9e70SSam Leffler 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
323f82c9e70SSam Leffler 		pending = task->ta_pending;
324f82c9e70SSam Leffler 		task->ta_pending = 0;
325f82c9e70SSam Leffler 		mtx_unlock_spin(&queue->tq_mutex);
326f82c9e70SSam Leffler 
327f82c9e70SSam Leffler 		task->ta_func(task->ta_context, pending);
328f82c9e70SSam Leffler 
329f82c9e70SSam Leffler 		mtx_lock_spin(&queue->tq_mutex);
330f82c9e70SSam Leffler 	}
331f82c9e70SSam Leffler 	mtx_unlock_spin(&queue->tq_mutex);
332f82c9e70SSam Leffler }
333f82c9e70SSam Leffler 
334f82c9e70SSam Leffler struct taskqueue *taskqueue_fast;
335f82c9e70SSam Leffler static void	*taskqueue_fast_ih;
336f82c9e70SSam Leffler 
337f82c9e70SSam Leffler static void
338f82c9e70SSam Leffler taskqueue_fast_schedule(void *context)
339f82c9e70SSam Leffler {
340f82c9e70SSam Leffler 	swi_sched(taskqueue_fast_ih, 0);
341f82c9e70SSam Leffler }
342f82c9e70SSam Leffler 
343f82c9e70SSam Leffler static void
344f82c9e70SSam Leffler taskqueue_fast_run(void *dummy)
345f82c9e70SSam Leffler {
346f82c9e70SSam Leffler 	taskqueue_run_fast(taskqueue_fast);
347f82c9e70SSam Leffler }
348f82c9e70SSam Leffler 
349f82c9e70SSam Leffler static void
350f82c9e70SSam Leffler taskqueue_define_fast(void *arg)
351f82c9e70SSam Leffler {
352bd83e879SJohn Baldwin 
353bd83e879SJohn Baldwin 	taskqueue_fast = malloc(sizeof(struct taskqueue), M_TASKQUEUE,
354bd83e879SJohn Baldwin 	    M_NOWAIT | M_ZERO);
355f82c9e70SSam Leffler 	if (!taskqueue_fast) {
356f82c9e70SSam Leffler 		printf("%s: Unable to allocate fast task queue!\n", __func__);
357f82c9e70SSam Leffler 		return;
358f82c9e70SSam Leffler 	}
359f82c9e70SSam Leffler 
360f82c9e70SSam Leffler 	STAILQ_INIT(&taskqueue_fast->tq_queue);
361f82c9e70SSam Leffler 	taskqueue_fast->tq_name = "fast";
362f82c9e70SSam Leffler 	taskqueue_fast->tq_enqueue = taskqueue_fast_schedule;
3637e2282a5SSam Leffler 	mtx_init(&taskqueue_fast->tq_mutex, "taskqueue_fast", NULL, MTX_SPIN);
364f82c9e70SSam Leffler 
365f82c9e70SSam Leffler 	mtx_lock(&taskqueue_queues_mutex);
366f82c9e70SSam Leffler 	STAILQ_INSERT_TAIL(&taskqueue_queues, taskqueue_fast, tq_link);
367f82c9e70SSam Leffler 	mtx_unlock(&taskqueue_queues_mutex);
368f82c9e70SSam Leffler 
369f82c9e70SSam Leffler 	swi_add(NULL, "Fast task queue", taskqueue_fast_run,
370f82c9e70SSam Leffler 		NULL, SWI_TQ_FAST, 0, &taskqueue_fast_ih);
371f82c9e70SSam Leffler }
372f82c9e70SSam Leffler SYSINIT(taskqueue_fast, SI_SUB_CONFIGURE, SI_ORDER_SECOND,
373f82c9e70SSam Leffler     taskqueue_define_fast, NULL);
374