xref: /freebsd/sys/kern/subr_taskqueue.c (revision 3fc36ee018bb836bd1796067cf4ef8683f166ebc)
1 /*-
2  * Copyright (c) 2000 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/cpuset.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/libkern.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/taskqueue.h>
46 #include <sys/unistd.h>
47 #include <machine/stdarg.h>
48 
49 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
50 static void	*taskqueue_giant_ih;
51 static void	*taskqueue_ih;
52 static void	 taskqueue_fast_enqueue(void *);
53 static void	 taskqueue_swi_enqueue(void *);
54 static void	 taskqueue_swi_giant_enqueue(void *);
55 
56 struct taskqueue_busy {
57 	struct task	*tb_running;
58 	TAILQ_ENTRY(taskqueue_busy) tb_link;
59 };
60 
61 struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
62 
63 struct taskqueue {
64 	STAILQ_HEAD(, task)	tq_queue;
65 	taskqueue_enqueue_fn	tq_enqueue;
66 	void			*tq_context;
67 	char			*tq_name;
68 	TAILQ_HEAD(, taskqueue_busy) tq_active;
69 	struct mtx		tq_mutex;
70 	struct thread		**tq_threads;
71 	int			tq_tcount;
72 	int			tq_spin;
73 	int			tq_flags;
74 	int			tq_callouts;
75 	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
76 	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
77 };
78 
79 #define	TQ_FLAGS_ACTIVE		(1 << 0)
80 #define	TQ_FLAGS_BLOCKED	(1 << 1)
81 #define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
82 
83 #define	DT_CALLOUT_ARMED	(1 << 0)
84 
85 #define	TQ_LOCK(tq)							\
86 	do {								\
87 		if ((tq)->tq_spin)					\
88 			mtx_lock_spin(&(tq)->tq_mutex);			\
89 		else							\
90 			mtx_lock(&(tq)->tq_mutex);			\
91 	} while (0)
92 #define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
93 
94 #define	TQ_UNLOCK(tq)							\
95 	do {								\
96 		if ((tq)->tq_spin)					\
97 			mtx_unlock_spin(&(tq)->tq_mutex);		\
98 		else							\
99 			mtx_unlock(&(tq)->tq_mutex);			\
100 	} while (0)
101 #define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
102 
103 void
104 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
105     int priority, task_fn_t func, void *context)
106 {
107 
108 	TASK_INIT(&timeout_task->t, priority, func, context);
109 	callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
110 	    CALLOUT_RETURNUNLOCKED);
111 	timeout_task->q = queue;
112 	timeout_task->f = 0;
113 }
114 
115 static __inline int
116 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
117     int t)
118 {
119 	if (tq->tq_spin)
120 		return (msleep_spin(p, m, wm, t));
121 	return (msleep(p, m, pri, wm, t));
122 }
123 
124 static struct taskqueue *
125 _taskqueue_create(const char *name, int mflags,
126 		 taskqueue_enqueue_fn enqueue, void *context,
127 		 int mtxflags, const char *mtxname __unused)
128 {
129 	struct taskqueue *queue;
130 	char *tq_name;
131 
132 	tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
133 	if (tq_name == NULL)
134 		return (NULL);
135 
136 	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
137 	if (queue == NULL) {
138 		free(tq_name, M_TASKQUEUE);
139 		return (NULL);
140 	}
141 
142 	snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
143 
144 	STAILQ_INIT(&queue->tq_queue);
145 	TAILQ_INIT(&queue->tq_active);
146 	queue->tq_enqueue = enqueue;
147 	queue->tq_context = context;
148 	queue->tq_name = tq_name;
149 	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
150 	queue->tq_flags |= TQ_FLAGS_ACTIVE;
151 	if (enqueue == taskqueue_fast_enqueue ||
152 	    enqueue == taskqueue_swi_enqueue ||
153 	    enqueue == taskqueue_swi_giant_enqueue ||
154 	    enqueue == taskqueue_thread_enqueue)
155 		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
156 	mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
157 
158 	return (queue);
159 }
160 
161 struct taskqueue *
162 taskqueue_create(const char *name, int mflags,
163 		 taskqueue_enqueue_fn enqueue, void *context)
164 {
165 
166 	return _taskqueue_create(name, mflags, enqueue, context,
167 			MTX_DEF, name);
168 }
169 
170 void
171 taskqueue_set_callback(struct taskqueue *queue,
172     enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
173     void *context)
174 {
175 
176 	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
177 	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
178 	    ("Callback type %d not valid, must be %d-%d", cb_type,
179 	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
180 	KASSERT((queue->tq_callbacks[cb_type] == NULL),
181 	    ("Re-initialization of taskqueue callback?"));
182 
183 	queue->tq_callbacks[cb_type] = callback;
184 	queue->tq_cb_contexts[cb_type] = context;
185 }
186 
187 /*
188  * Signal a taskqueue thread to terminate.
189  */
190 static void
191 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
192 {
193 
194 	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
195 		wakeup(tq);
196 		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
197 	}
198 }
199 
200 void
201 taskqueue_free(struct taskqueue *queue)
202 {
203 
204 	TQ_LOCK(queue);
205 	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
206 	taskqueue_terminate(queue->tq_threads, queue);
207 	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
208 	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
209 	mtx_destroy(&queue->tq_mutex);
210 	free(queue->tq_threads, M_TASKQUEUE);
211 	free(queue->tq_name, M_TASKQUEUE);
212 	free(queue, M_TASKQUEUE);
213 }
214 
215 static int
216 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
217 {
218 	struct task *ins;
219 	struct task *prev;
220 
221 	KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
222 	/*
223 	 * Count multiple enqueues.
224 	 */
225 	if (task->ta_pending) {
226 		if (task->ta_pending < USHRT_MAX)
227 			task->ta_pending++;
228 		TQ_UNLOCK(queue);
229 		return (0);
230 	}
231 
232 	/*
233 	 * Optimise the case when all tasks have the same priority.
234 	 */
235 	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
236 	if (!prev || prev->ta_priority >= task->ta_priority) {
237 		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
238 	} else {
239 		prev = NULL;
240 		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
241 		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
242 			if (ins->ta_priority < task->ta_priority)
243 				break;
244 
245 		if (prev)
246 			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
247 		else
248 			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
249 	}
250 
251 	task->ta_pending = 1;
252 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
253 		TQ_UNLOCK(queue);
254 	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
255 		queue->tq_enqueue(queue->tq_context);
256 	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
257 		TQ_UNLOCK(queue);
258 
259 	/* Return with lock released. */
260 	return (0);
261 }
262 
263 int
264 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
265 {
266 	int res;
267 
268 	TQ_LOCK(queue);
269 	res = taskqueue_enqueue_locked(queue, task);
270 	/* The lock is released inside. */
271 
272 	return (res);
273 }
274 
275 static void
276 taskqueue_timeout_func(void *arg)
277 {
278 	struct taskqueue *queue;
279 	struct timeout_task *timeout_task;
280 
281 	timeout_task = arg;
282 	queue = timeout_task->q;
283 	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
284 	timeout_task->f &= ~DT_CALLOUT_ARMED;
285 	queue->tq_callouts--;
286 	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
287 	/* The lock is released inside. */
288 }
289 
290 int
291 taskqueue_enqueue_timeout(struct taskqueue *queue,
292     struct timeout_task *timeout_task, int ticks)
293 {
294 	int res;
295 
296 	TQ_LOCK(queue);
297 	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
298 	    ("Migrated queue"));
299 	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
300 	timeout_task->q = queue;
301 	res = timeout_task->t.ta_pending;
302 	if (ticks == 0) {
303 		taskqueue_enqueue_locked(queue, &timeout_task->t);
304 		/* The lock is released inside. */
305 	} else {
306 		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
307 			res++;
308 		} else {
309 			queue->tq_callouts++;
310 			timeout_task->f |= DT_CALLOUT_ARMED;
311 			if (ticks < 0)
312 				ticks = -ticks; /* Ignore overflow. */
313 		}
314 		if (ticks > 0) {
315 			callout_reset(&timeout_task->c, ticks,
316 			    taskqueue_timeout_func, timeout_task);
317 		}
318 		TQ_UNLOCK(queue);
319 	}
320 	return (res);
321 }
322 
323 static void
324 taskqueue_task_nop_fn(void *context, int pending)
325 {
326 }
327 
328 /*
329  * Block until all currently queued tasks in this taskqueue
330  * have begun execution.  Tasks queued during execution of
331  * this function are ignored.
332  */
333 static void
334 taskqueue_drain_tq_queue(struct taskqueue *queue)
335 {
336 	struct task t_barrier;
337 
338 	if (STAILQ_EMPTY(&queue->tq_queue))
339 		return;
340 
341 	/*
342 	 * Enqueue our barrier after all current tasks, but with
343 	 * the highest priority so that newly queued tasks cannot
344 	 * pass it.  Because of the high priority, we can not use
345 	 * taskqueue_enqueue_locked directly (which drops the lock
346 	 * anyway) so just insert it at tail while we have the
347 	 * queue lock.
348 	 */
349 	TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
350 	STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
351 	t_barrier.ta_pending = 1;
352 
353 	/*
354 	 * Once the barrier has executed, all previously queued tasks
355 	 * have completed or are currently executing.
356 	 */
357 	while (t_barrier.ta_pending != 0)
358 		TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
359 }
360 
361 /*
362  * Block until all currently executing tasks for this taskqueue
363  * complete.  Tasks that begin execution during the execution
364  * of this function are ignored.
365  */
366 static void
367 taskqueue_drain_tq_active(struct taskqueue *queue)
368 {
369 	struct taskqueue_busy tb_marker, *tb_first;
370 
371 	if (TAILQ_EMPTY(&queue->tq_active))
372 		return;
373 
374 	/* Block taskq_terminate().*/
375 	queue->tq_callouts++;
376 
377 	/*
378 	 * Wait for all currently executing taskqueue threads
379 	 * to go idle.
380 	 */
381 	tb_marker.tb_running = TB_DRAIN_WAITER;
382 	TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
383 	while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
384 		TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
385 	TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
386 
387 	/*
388 	 * Wakeup any other drain waiter that happened to queue up
389 	 * without any intervening active thread.
390 	 */
391 	tb_first = TAILQ_FIRST(&queue->tq_active);
392 	if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
393 		wakeup(tb_first);
394 
395 	/* Release taskqueue_terminate(). */
396 	queue->tq_callouts--;
397 	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
398 		wakeup_one(queue->tq_threads);
399 }
400 
401 void
402 taskqueue_block(struct taskqueue *queue)
403 {
404 
405 	TQ_LOCK(queue);
406 	queue->tq_flags |= TQ_FLAGS_BLOCKED;
407 	TQ_UNLOCK(queue);
408 }
409 
410 void
411 taskqueue_unblock(struct taskqueue *queue)
412 {
413 
414 	TQ_LOCK(queue);
415 	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
416 	if (!STAILQ_EMPTY(&queue->tq_queue))
417 		queue->tq_enqueue(queue->tq_context);
418 	TQ_UNLOCK(queue);
419 }
420 
421 static void
422 taskqueue_run_locked(struct taskqueue *queue)
423 {
424 	struct taskqueue_busy tb;
425 	struct taskqueue_busy *tb_first;
426 	struct task *task;
427 	int pending;
428 
429 	KASSERT(queue != NULL, ("tq is NULL"));
430 	TQ_ASSERT_LOCKED(queue);
431 	tb.tb_running = NULL;
432 
433 	while (STAILQ_FIRST(&queue->tq_queue)) {
434 		TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
435 
436 		/*
437 		 * Carefully remove the first task from the queue and
438 		 * zero its pending count.
439 		 */
440 		task = STAILQ_FIRST(&queue->tq_queue);
441 		KASSERT(task != NULL, ("task is NULL"));
442 		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
443 		pending = task->ta_pending;
444 		task->ta_pending = 0;
445 		tb.tb_running = task;
446 		TQ_UNLOCK(queue);
447 
448 		KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
449 		task->ta_func(task->ta_context, pending);
450 
451 		TQ_LOCK(queue);
452 		tb.tb_running = NULL;
453 		wakeup(task);
454 
455 		TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
456 		tb_first = TAILQ_FIRST(&queue->tq_active);
457 		if (tb_first != NULL &&
458 		    tb_first->tb_running == TB_DRAIN_WAITER)
459 			wakeup(tb_first);
460 	}
461 }
462 
463 void
464 taskqueue_run(struct taskqueue *queue)
465 {
466 
467 	TQ_LOCK(queue);
468 	taskqueue_run_locked(queue);
469 	TQ_UNLOCK(queue);
470 }
471 
472 static int
473 task_is_running(struct taskqueue *queue, struct task *task)
474 {
475 	struct taskqueue_busy *tb;
476 
477 	TQ_ASSERT_LOCKED(queue);
478 	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
479 		if (tb->tb_running == task)
480 			return (1);
481 	}
482 	return (0);
483 }
484 
485 static int
486 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
487     u_int *pendp)
488 {
489 
490 	if (task->ta_pending > 0)
491 		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
492 	if (pendp != NULL)
493 		*pendp = task->ta_pending;
494 	task->ta_pending = 0;
495 	return (task_is_running(queue, task) ? EBUSY : 0);
496 }
497 
498 int
499 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
500 {
501 	int error;
502 
503 	TQ_LOCK(queue);
504 	error = taskqueue_cancel_locked(queue, task, pendp);
505 	TQ_UNLOCK(queue);
506 
507 	return (error);
508 }
509 
510 int
511 taskqueue_cancel_timeout(struct taskqueue *queue,
512     struct timeout_task *timeout_task, u_int *pendp)
513 {
514 	u_int pending, pending1;
515 	int error;
516 
517 	TQ_LOCK(queue);
518 	pending = !!(callout_stop(&timeout_task->c) > 0);
519 	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
520 	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
521 		timeout_task->f &= ~DT_CALLOUT_ARMED;
522 		queue->tq_callouts--;
523 	}
524 	TQ_UNLOCK(queue);
525 
526 	if (pendp != NULL)
527 		*pendp = pending + pending1;
528 	return (error);
529 }
530 
531 void
532 taskqueue_drain(struct taskqueue *queue, struct task *task)
533 {
534 
535 	if (!queue->tq_spin)
536 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
537 
538 	TQ_LOCK(queue);
539 	while (task->ta_pending != 0 || task_is_running(queue, task))
540 		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
541 	TQ_UNLOCK(queue);
542 }
543 
544 void
545 taskqueue_drain_all(struct taskqueue *queue)
546 {
547 
548 	if (!queue->tq_spin)
549 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
550 
551 	TQ_LOCK(queue);
552 	taskqueue_drain_tq_queue(queue);
553 	taskqueue_drain_tq_active(queue);
554 	TQ_UNLOCK(queue);
555 }
556 
557 void
558 taskqueue_drain_timeout(struct taskqueue *queue,
559     struct timeout_task *timeout_task)
560 {
561 
562 	callout_drain(&timeout_task->c);
563 	taskqueue_drain(queue, &timeout_task->t);
564 }
565 
566 static void
567 taskqueue_swi_enqueue(void *context)
568 {
569 	swi_sched(taskqueue_ih, 0);
570 }
571 
572 static void
573 taskqueue_swi_run(void *dummy)
574 {
575 	taskqueue_run(taskqueue_swi);
576 }
577 
578 static void
579 taskqueue_swi_giant_enqueue(void *context)
580 {
581 	swi_sched(taskqueue_giant_ih, 0);
582 }
583 
584 static void
585 taskqueue_swi_giant_run(void *dummy)
586 {
587 	taskqueue_run(taskqueue_swi_giant);
588 }
589 
590 static int
591 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
592     cpuset_t *mask, const char *name, va_list ap)
593 {
594 	char ktname[MAXCOMLEN + 1];
595 	struct thread *td;
596 	struct taskqueue *tq;
597 	int i, error;
598 
599 	if (count <= 0)
600 		return (EINVAL);
601 
602 	vsnprintf(ktname, sizeof(ktname), name, ap);
603 	tq = *tqp;
604 
605 	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
606 	    M_NOWAIT | M_ZERO);
607 	if (tq->tq_threads == NULL) {
608 		printf("%s: no memory for %s threads\n", __func__, ktname);
609 		return (ENOMEM);
610 	}
611 
612 	for (i = 0; i < count; i++) {
613 		if (count == 1)
614 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
615 			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
616 		else
617 			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
618 			    &tq->tq_threads[i], RFSTOPPED, 0,
619 			    "%s_%d", ktname, i);
620 		if (error) {
621 			/* should be ok to continue, taskqueue_free will dtrt */
622 			printf("%s: kthread_add(%s): error %d", __func__,
623 			    ktname, error);
624 			tq->tq_threads[i] = NULL;		/* paranoid */
625 		} else
626 			tq->tq_tcount++;
627 	}
628 	if (tq->tq_tcount == 0) {
629 		free(tq->tq_threads, M_TASKQUEUE);
630 		tq->tq_threads = NULL;
631 		return (ENOMEM);
632 	}
633 	for (i = 0; i < count; i++) {
634 		if (tq->tq_threads[i] == NULL)
635 			continue;
636 		td = tq->tq_threads[i];
637 		if (mask) {
638 			error = cpuset_setthread(td->td_tid, mask);
639 			/*
640 			 * Failing to pin is rarely an actual fatal error;
641 			 * it'll just affect performance.
642 			 */
643 			if (error)
644 				printf("%s: curthread=%llu: can't pin; "
645 				    "error=%d\n",
646 				    __func__,
647 				    (unsigned long long) td->td_tid,
648 				    error);
649 		}
650 		thread_lock(td);
651 		sched_prio(td, pri);
652 		sched_add(td, SRQ_BORING);
653 		thread_unlock(td);
654 	}
655 
656 	return (0);
657 }
658 
659 int
660 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
661     const char *name, ...)
662 {
663 	va_list ap;
664 	int error;
665 
666 	va_start(ap, name);
667 	error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
668 	va_end(ap);
669 	return (error);
670 }
671 
672 int
673 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
674     cpuset_t *mask, const char *name, ...)
675 {
676 	va_list ap;
677 	int error;
678 
679 	va_start(ap, name);
680 	error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
681 	va_end(ap);
682 	return (error);
683 }
684 
685 static inline void
686 taskqueue_run_callback(struct taskqueue *tq,
687     enum taskqueue_callback_type cb_type)
688 {
689 	taskqueue_callback_fn tq_callback;
690 
691 	TQ_ASSERT_UNLOCKED(tq);
692 	tq_callback = tq->tq_callbacks[cb_type];
693 	if (tq_callback != NULL)
694 		tq_callback(tq->tq_cb_contexts[cb_type]);
695 }
696 
697 void
698 taskqueue_thread_loop(void *arg)
699 {
700 	struct taskqueue **tqp, *tq;
701 
702 	tqp = arg;
703 	tq = *tqp;
704 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
705 	TQ_LOCK(tq);
706 	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
707 		/* XXX ? */
708 		taskqueue_run_locked(tq);
709 		/*
710 		 * Because taskqueue_run() can drop tq_mutex, we need to
711 		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
712 		 * meantime, which means we missed a wakeup.
713 		 */
714 		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
715 			break;
716 		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
717 	}
718 	taskqueue_run_locked(tq);
719 	/*
720 	 * This thread is on its way out, so just drop the lock temporarily
721 	 * in order to call the shutdown callback.  This allows the callback
722 	 * to look at the taskqueue, even just before it dies.
723 	 */
724 	TQ_UNLOCK(tq);
725 	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
726 	TQ_LOCK(tq);
727 
728 	/* rendezvous with thread that asked us to terminate */
729 	tq->tq_tcount--;
730 	wakeup_one(tq->tq_threads);
731 	TQ_UNLOCK(tq);
732 	kthread_exit();
733 }
734 
735 void
736 taskqueue_thread_enqueue(void *context)
737 {
738 	struct taskqueue **tqp, *tq;
739 
740 	tqp = context;
741 	tq = *tqp;
742 	wakeup_one(tq);
743 }
744 
745 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
746 		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
747 		     INTR_MPSAFE, &taskqueue_ih));
748 
749 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
750 		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
751 		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
752 
753 TASKQUEUE_DEFINE_THREAD(thread);
754 
755 struct taskqueue *
756 taskqueue_create_fast(const char *name, int mflags,
757 		 taskqueue_enqueue_fn enqueue, void *context)
758 {
759 	return _taskqueue_create(name, mflags, enqueue, context,
760 			MTX_SPIN, "fast_taskqueue");
761 }
762 
763 static void	*taskqueue_fast_ih;
764 
765 static void
766 taskqueue_fast_enqueue(void *context)
767 {
768 	swi_sched(taskqueue_fast_ih, 0);
769 }
770 
771 static void
772 taskqueue_fast_run(void *dummy)
773 {
774 	taskqueue_run(taskqueue_fast);
775 }
776 
777 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
778 	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
779 	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
780 
781 int
782 taskqueue_member(struct taskqueue *queue, struct thread *td)
783 {
784 	int i, j, ret = 0;
785 
786 	for (i = 0, j = 0; ; i++) {
787 		if (queue->tq_threads[i] == NULL)
788 			continue;
789 		if (queue->tq_threads[i] == td) {
790 			ret = 1;
791 			break;
792 		}
793 		if (++j >= queue->tq_tcount)
794 			break;
795 	}
796 	return (ret);
797 }
798