xref: /freebsd/sys/kern/subr_taskqueue.c (revision cdc58367265a2bd6e8f913db2bdc591699ee229f)
1 /*-
2  * Copyright (c) 2000 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/cpuset.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/taskqueue.h>
44 #include <sys/unistd.h>
45 #include <machine/stdarg.h>
46 
47 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
48 static void	*taskqueue_giant_ih;
49 static void	*taskqueue_ih;
50 static void	 taskqueue_fast_enqueue(void *);
51 static void	 taskqueue_swi_enqueue(void *);
52 static void	 taskqueue_swi_giant_enqueue(void *);
53 
54 struct taskqueue_busy {
55 	struct task	*tb_running;
56 	TAILQ_ENTRY(taskqueue_busy) tb_link;
57 };
58 
59 struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
60 
61 struct taskqueue {
62 	STAILQ_HEAD(, task)	tq_queue;
63 	taskqueue_enqueue_fn	tq_enqueue;
64 	void			*tq_context;
65 	TAILQ_HEAD(, taskqueue_busy) tq_active;
66 	struct mtx		tq_mutex;
67 	struct thread		**tq_threads;
68 	int			tq_tcount;
69 	int			tq_spin;
70 	int			tq_flags;
71 	int			tq_callouts;
72 	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
73 	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
74 };
75 
76 #define	TQ_FLAGS_ACTIVE		(1 << 0)
77 #define	TQ_FLAGS_BLOCKED	(1 << 1)
78 #define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
79 
80 #define	DT_CALLOUT_ARMED	(1 << 0)
81 
82 #define	TQ_LOCK(tq)							\
83 	do {								\
84 		if ((tq)->tq_spin)					\
85 			mtx_lock_spin(&(tq)->tq_mutex);			\
86 		else							\
87 			mtx_lock(&(tq)->tq_mutex);			\
88 	} while (0)
89 #define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
90 
91 #define	TQ_UNLOCK(tq)							\
92 	do {								\
93 		if ((tq)->tq_spin)					\
94 			mtx_unlock_spin(&(tq)->tq_mutex);		\
95 		else							\
96 			mtx_unlock(&(tq)->tq_mutex);			\
97 	} while (0)
98 #define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
99 
100 void
101 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
102     int priority, task_fn_t func, void *context)
103 {
104 
105 	TASK_INIT(&timeout_task->t, priority, func, context);
106 	callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
107 	    CALLOUT_RETURNUNLOCKED);
108 	timeout_task->q = queue;
109 	timeout_task->f = 0;
110 }
111 
112 static __inline int
113 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
114     int t)
115 {
116 	if (tq->tq_spin)
117 		return (msleep_spin(p, m, wm, t));
118 	return (msleep(p, m, pri, wm, t));
119 }
120 
121 static struct taskqueue *
122 _taskqueue_create(const char *name __unused, int mflags,
123 		 taskqueue_enqueue_fn enqueue, void *context,
124 		 int mtxflags, const char *mtxname)
125 {
126 	struct taskqueue *queue;
127 
128 	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
129 	if (!queue)
130 		return NULL;
131 
132 	STAILQ_INIT(&queue->tq_queue);
133 	TAILQ_INIT(&queue->tq_active);
134 	queue->tq_enqueue = enqueue;
135 	queue->tq_context = context;
136 	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
137 	queue->tq_flags |= TQ_FLAGS_ACTIVE;
138 	if (enqueue == taskqueue_fast_enqueue ||
139 	    enqueue == taskqueue_swi_enqueue ||
140 	    enqueue == taskqueue_swi_giant_enqueue ||
141 	    enqueue == taskqueue_thread_enqueue)
142 		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
143 	mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
144 
145 	return queue;
146 }
147 
148 struct taskqueue *
149 taskqueue_create(const char *name, int mflags,
150 		 taskqueue_enqueue_fn enqueue, void *context)
151 {
152 	return _taskqueue_create(name, mflags, enqueue, context,
153 			MTX_DEF, "taskqueue");
154 }
155 
156 void
157 taskqueue_set_callback(struct taskqueue *queue,
158     enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
159     void *context)
160 {
161 
162 	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
163 	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
164 	    ("Callback type %d not valid, must be %d-%d", cb_type,
165 	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
166 	KASSERT((queue->tq_callbacks[cb_type] == NULL),
167 	    ("Re-initialization of taskqueue callback?"));
168 
169 	queue->tq_callbacks[cb_type] = callback;
170 	queue->tq_cb_contexts[cb_type] = context;
171 }
172 
173 /*
174  * Signal a taskqueue thread to terminate.
175  */
176 static void
177 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
178 {
179 
180 	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
181 		wakeup(tq);
182 		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
183 	}
184 }
185 
186 void
187 taskqueue_free(struct taskqueue *queue)
188 {
189 
190 	TQ_LOCK(queue);
191 	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
192 	taskqueue_terminate(queue->tq_threads, queue);
193 	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
194 	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
195 	mtx_destroy(&queue->tq_mutex);
196 	free(queue->tq_threads, M_TASKQUEUE);
197 	free(queue, M_TASKQUEUE);
198 }
199 
200 static int
201 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
202 {
203 	struct task *ins;
204 	struct task *prev;
205 
206 	/*
207 	 * Count multiple enqueues.
208 	 */
209 	if (task->ta_pending) {
210 		if (task->ta_pending < USHRT_MAX)
211 			task->ta_pending++;
212 		TQ_UNLOCK(queue);
213 		return (0);
214 	}
215 
216 	/*
217 	 * Optimise the case when all tasks have the same priority.
218 	 */
219 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
220 	if (!prev || prev->ta_priority >= task->ta_priority) {
221 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
222 	} else {
223 		prev = NULL;
224 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
225 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
226 			if (ins->ta_priority < task->ta_priority)
227 				break;
228 
229 		if (prev)
230 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
231 		else
232 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
233 	}
234 
235 	task->ta_pending = 1;
236 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
237 		TQ_UNLOCK(queue);
238 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
239 		queue->tq_enqueue(queue->tq_context);
240 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
241 		TQ_UNLOCK(queue);
242 
243 	/* Return with lock released. */
244 	return (0);
245 }
246 
247 int
248 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
249 {
250 	int res;
251 
252 	TQ_LOCK(queue);
253 	res = taskqueue_enqueue_locked(queue, task);
254 	/* The lock is released inside. */
255 
256 	return (res);
257 }
258 
259 static void
260 taskqueue_timeout_func(void *arg)
261 {
262 	struct taskqueue *queue;
263 	struct timeout_task *timeout_task;
264 
265 	timeout_task = arg;
266 	queue = timeout_task->q;
267 	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
268 	timeout_task->f &= ~DT_CALLOUT_ARMED;
269 	queue->tq_callouts--;
270 	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
271 	/* The lock is released inside. */
272 }
273 
274 int
275 taskqueue_enqueue_timeout(struct taskqueue *queue,
276     struct timeout_task *timeout_task, int ticks)
277 {
278 	int res;
279 
280 	TQ_LOCK(queue);
281 	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
282 	    ("Migrated queue"));
283 	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
284 	timeout_task->q = queue;
285 	res = timeout_task->t.ta_pending;
286 	if (ticks == 0) {
287 		taskqueue_enqueue_locked(queue, &timeout_task->t);
288 		/* The lock is released inside. */
289 	} else {
290 		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
291 			res++;
292 		} else {
293 			queue->tq_callouts++;
294 			timeout_task->f |= DT_CALLOUT_ARMED;
295 			if (ticks < 0)
296 				ticks = -ticks; /* Ignore overflow. */
297 		}
298 		if (ticks > 0) {
299 			callout_reset(&timeout_task->c, ticks,
300 			    taskqueue_timeout_func, timeout_task);
301 		}
302 		TQ_UNLOCK(queue);
303 	}
304 	return (res);
305 }
306 
307 static void
308 taskqueue_task_nop_fn(void *context, int pending)
309 {
310 }
311 
312 /*
313  * Block until all currently queued tasks in this taskqueue
314  * have begun execution.  Tasks queued during execution of
315  * this function are ignored.
316  */
317 static void
318 taskqueue_drain_tq_queue(struct taskqueue *queue)
319 {
320 	struct task t_barrier;
321 
322 	if (STAILQ_EMPTY(&queue->tq_queue))
323 		return;
324 
325 	/*
326 	 * Enqueue our barrier with the lowest possible priority
327 	 * so we are inserted after all current tasks.
328 	 */
329 	TASK_INIT(&t_barrier, 0, taskqueue_task_nop_fn, &t_barrier);
330 	taskqueue_enqueue_locked(queue, &t_barrier);
331 
332 	/*
333  	 * Raise the barrier's priority so newly queued tasks cannot
334  	 * pass it.
335  	 */
336 	t_barrier.ta_priority = USHRT_MAX;
337 
338 	/*
339 	 * Once the barrier has executed, all previously queued tasks
340 	 * have completed or are currently executing.
341 	 */
342 	while (t_barrier.ta_pending != 0)
343 		TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
344 }
345 
346 /*
347  * Block until all currently executing tasks for this taskqueue
348  * complete.  Tasks that begin execution during the execution
349  * of this function are ignored.
350  */
351 static void
352 taskqueue_drain_tq_active(struct taskqueue *queue)
353 {
354 	struct taskqueue_busy tb_marker, *tb_first;
355 
356 	if (TAILQ_EMPTY(&queue->tq_active))
357 		return;
358 
359 	/* Block taskq_terminate().*/
360 	queue->tq_callouts++;
361 
362 	/*
363 	 * Wait for all currently executing taskqueue threads
364 	 * to go idle.
365 	 */
366 	tb_marker.tb_running = TB_DRAIN_WAITER;
367 	TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
368 	while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
369 		TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
370 	TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
371 
372 	/*
373 	 * Wakeup any other drain waiter that happened to queue up
374 	 * without any intervening active thread.
375 	 */
376 	tb_first = TAILQ_FIRST(&queue->tq_active);
377 	if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
378 		wakeup(tb_first);
379 
380 	/* Release taskqueue_terminate(). */
381 	queue->tq_callouts--;
382 	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
383 		wakeup_one(queue->tq_threads);
384 }
385 
386 void
387 taskqueue_block(struct taskqueue *queue)
388 {
389 
390 	TQ_LOCK(queue);
391 	queue->tq_flags |= TQ_FLAGS_BLOCKED;
392 	TQ_UNLOCK(queue);
393 }
394 
395 void
396 taskqueue_unblock(struct taskqueue *queue)
397 {
398 
399 	TQ_LOCK(queue);
400 	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
401 	if (!STAILQ_EMPTY(&queue->tq_queue))
402 		queue->tq_enqueue(queue->tq_context);
403 	TQ_UNLOCK(queue);
404 }
405 
406 static void
407 taskqueue_run_locked(struct taskqueue *queue)
408 {
409 	struct taskqueue_busy tb;
410 	struct taskqueue_busy *tb_first;
411 	struct task *task;
412 	int pending;
413 
414 	TQ_ASSERT_LOCKED(queue);
415 	tb.tb_running = NULL;
416 
417 	while (STAILQ_FIRST(&queue->tq_queue)) {
418 		TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
419 
420 		/*
421 		 * Carefully remove the first task from the queue and
422 		 * zero its pending count.
423 		 */
424 		task = STAILQ_FIRST(&queue->tq_queue);
425 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
426 		pending = task->ta_pending;
427 		task->ta_pending = 0;
428 		tb.tb_running = task;
429 		TQ_UNLOCK(queue);
430 
431 		task->ta_func(task->ta_context, pending);
432 
433 		TQ_LOCK(queue);
434 		tb.tb_running = NULL;
435 		wakeup(task);
436 
437 		TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
438 		tb_first = TAILQ_FIRST(&queue->tq_active);
439 		if (tb_first != NULL &&
440 		    tb_first->tb_running == TB_DRAIN_WAITER)
441 			wakeup(tb_first);
442 	}
443 }
444 
445 void
446 taskqueue_run(struct taskqueue *queue)
447 {
448 
449 	TQ_LOCK(queue);
450 	taskqueue_run_locked(queue);
451 	TQ_UNLOCK(queue);
452 }
453 
454 static int
455 task_is_running(struct taskqueue *queue, struct task *task)
456 {
457 	struct taskqueue_busy *tb;
458 
459 	TQ_ASSERT_LOCKED(queue);
460 	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
461 		if (tb->tb_running == task)
462 			return (1);
463 	}
464 	return (0);
465 }
466 
467 static int
468 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
469     u_int *pendp)
470 {
471 
472 	if (task->ta_pending > 0)
473 		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
474 	if (pendp != NULL)
475 		*pendp = task->ta_pending;
476 	task->ta_pending = 0;
477 	return (task_is_running(queue, task) ? EBUSY : 0);
478 }
479 
480 int
481 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
482 {
483 	int error;
484 
485 	TQ_LOCK(queue);
486 	error = taskqueue_cancel_locked(queue, task, pendp);
487 	TQ_UNLOCK(queue);
488 
489 	return (error);
490 }
491 
492 int
493 taskqueue_cancel_timeout(struct taskqueue *queue,
494     struct timeout_task *timeout_task, u_int *pendp)
495 {
496 	u_int pending, pending1;
497 	int error;
498 
499 	TQ_LOCK(queue);
500 	pending = !!callout_stop(&timeout_task->c);
501 	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
502 	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
503 		timeout_task->f &= ~DT_CALLOUT_ARMED;
504 		queue->tq_callouts--;
505 	}
506 	TQ_UNLOCK(queue);
507 
508 	if (pendp != NULL)
509 		*pendp = pending + pending1;
510 	return (error);
511 }
512 
513 void
514 taskqueue_drain(struct taskqueue *queue, struct task *task)
515 {
516 
517 	if (!queue->tq_spin)
518 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
519 
520 	TQ_LOCK(queue);
521 	while (task->ta_pending != 0 || task_is_running(queue, task))
522 		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
523 	TQ_UNLOCK(queue);
524 }
525 
526 void
527 taskqueue_drain_all(struct taskqueue *queue)
528 {
529 
530 	if (!queue->tq_spin)
531 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
532 
533 	TQ_LOCK(queue);
534 	taskqueue_drain_tq_queue(queue);
535 	taskqueue_drain_tq_active(queue);
536 	TQ_UNLOCK(queue);
537 }
538 
539 void
540 taskqueue_drain_timeout(struct taskqueue *queue,
541     struct timeout_task *timeout_task)
542 {
543 
544 	callout_drain(&timeout_task->c);
545 	taskqueue_drain(queue, &timeout_task->t);
546 }
547 
548 static void
549 taskqueue_swi_enqueue(void *context)
550 {
551 	swi_sched(taskqueue_ih, 0);
552 }
553 
554 static void
555 taskqueue_swi_run(void *dummy)
556 {
557 	taskqueue_run(taskqueue_swi);
558 }
559 
560 static void
561 taskqueue_swi_giant_enqueue(void *context)
562 {
563 	swi_sched(taskqueue_giant_ih, 0);
564 }
565 
566 static void
567 taskqueue_swi_giant_run(void *dummy)
568 {
569 	taskqueue_run(taskqueue_swi_giant);
570 }
571 
572 static int
573 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
574     cpuset_t *mask, const char *name, va_list ap)
575 {
576 	char ktname[MAXCOMLEN + 1];
577 	struct thread *td;
578 	struct taskqueue *tq;
579 	int i, error;
580 
581 	if (count <= 0)
582 		return (EINVAL);
583 
584 	vsnprintf(ktname, sizeof(ktname), name, ap);
585 	tq = *tqp;
586 
587 	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
588 	    M_NOWAIT | M_ZERO);
589 	if (tq->tq_threads == NULL) {
590 		printf("%s: no memory for %s threads\n", __func__, ktname);
591 		return (ENOMEM);
592 	}
593 
594 	for (i = 0; i < count; i++) {
595 		if (count == 1)
596 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
597 			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
598 		else
599 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
600 			    &tq->tq_threads[i], RFSTOPPED, 0,
601 			    "%s_%d", ktname, i);
602 		if (error) {
603 			/* should be ok to continue, taskqueue_free will dtrt */
604 			printf("%s: kthread_add(%s): error %d", __func__,
605 			    ktname, error);
606 			tq->tq_threads[i] = NULL;		/* paranoid */
607 		} else
608 			tq->tq_tcount++;
609 	}
610 	for (i = 0; i < count; i++) {
611 		if (tq->tq_threads[i] == NULL)
612 			continue;
613 		td = tq->tq_threads[i];
614 		if (mask) {
615 			error = cpuset_setthread(td->td_tid, mask);
616 			/*
617 			 * Failing to pin is rarely an actual fatal error;
618 			 * it'll just affect performance.
619 			 */
620 			if (error)
621 				printf("%s: curthread=%llu: can't pin; "
622 				    "error=%d\n",
623 				    __func__,
624 				    (unsigned long long) td->td_tid,
625 				    error);
626 		}
627 		thread_lock(td);
628 		sched_prio(td, pri);
629 		sched_add(td, SRQ_BORING);
630 		thread_unlock(td);
631 	}
632 
633 	return (0);
634 }
635 
636 int
637 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
638     const char *name, ...)
639 {
640 	va_list ap;
641 	int error;
642 
643 	va_start(ap, name);
644 	error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
645 	va_end(ap);
646 	return (error);
647 }
648 
649 int
650 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
651     cpuset_t *mask, const char *name, ...)
652 {
653 	va_list ap;
654 	int error;
655 
656 	va_start(ap, name);
657 	error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
658 	va_end(ap);
659 	return (error);
660 }
661 
662 static inline void
663 taskqueue_run_callback(struct taskqueue *tq,
664     enum taskqueue_callback_type cb_type)
665 {
666 	taskqueue_callback_fn tq_callback;
667 
668 	TQ_ASSERT_UNLOCKED(tq);
669 	tq_callback = tq->tq_callbacks[cb_type];
670 	if (tq_callback != NULL)
671 		tq_callback(tq->tq_cb_contexts[cb_type]);
672 }
673 
674 void
675 taskqueue_thread_loop(void *arg)
676 {
677 	struct taskqueue **tqp, *tq;
678 
679 	tqp = arg;
680 	tq = *tqp;
681 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
682 	TQ_LOCK(tq);
683 	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
684 		taskqueue_run_locked(tq);
685 		/*
686 		 * Because taskqueue_run() can drop tq_mutex, we need to
687 		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
688 		 * meantime, which means we missed a wakeup.
689 		 */
690 		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
691 			break;
692 		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
693 	}
694 	taskqueue_run_locked(tq);
695 
696 	/*
697 	 * This thread is on its way out, so just drop the lock temporarily
698 	 * in order to call the shutdown callback.  This allows the callback
699 	 * to look at the taskqueue, even just before it dies.
700 	 */
701 	TQ_UNLOCK(tq);
702 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
703 	TQ_LOCK(tq);
704 
705 	/* rendezvous with thread that asked us to terminate */
706 	tq->tq_tcount--;
707 	wakeup_one(tq->tq_threads);
708 	TQ_UNLOCK(tq);
709 	kthread_exit();
710 }
711 
712 void
713 taskqueue_thread_enqueue(void *context)
714 {
715 	struct taskqueue **tqp, *tq;
716 
717 	tqp = context;
718 	tq = *tqp;
719 
720 	wakeup_one(tq);
721 }
722 
723 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
724 		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
725 		     INTR_MPSAFE, &taskqueue_ih));
726 
727 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
728 		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
729 		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
730 
731 TASKQUEUE_DEFINE_THREAD(thread);
732 
733 struct taskqueue *
734 taskqueue_create_fast(const char *name, int mflags,
735 		 taskqueue_enqueue_fn enqueue, void *context)
736 {
737 	return _taskqueue_create(name, mflags, enqueue, context,
738 			MTX_SPIN, "fast_taskqueue");
739 }
740 
741 /* NB: for backwards compatibility */
742 int
743 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
744 {
745 	return taskqueue_enqueue(queue, task);
746 }
747 
748 static void	*taskqueue_fast_ih;
749 
750 static void
751 taskqueue_fast_enqueue(void *context)
752 {
753 	swi_sched(taskqueue_fast_ih, 0);
754 }
755 
756 static void
757 taskqueue_fast_run(void *dummy)
758 {
759 	taskqueue_run(taskqueue_fast);
760 }
761 
762 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
763 	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
764 	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
765 
766 int
767 taskqueue_member(struct taskqueue *queue, struct thread *td)
768 {
769 	int i, j, ret = 0;
770 
771 	for (i = 0, j = 0; ; i++) {
772 		if (queue->tq_threads[i] == NULL)
773 			continue;
774 		if (queue->tq_threads[i] == td) {
775 			ret = 1;
776 			break;
777 		}
778 		if (++j >= queue->tq_tcount)
779 			break;
780 	}
781 	return (ret);
782 }
783