1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * Copyright (c) 2014 Jeff Roberson
4 * Copyright (c) 2016 Matthew Macy
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/cpuset.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/libkern.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/epoch.h>
42 #include <sys/sched.h>
43 #include <sys/smp.h>
44 #include <sys/gtaskqueue.h>
45 #include <sys/unistd.h>
46 #include <machine/stdarg.h>
47
48 static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
49 static void gtaskqueue_thread_enqueue(void *);
50 static void gtaskqueue_thread_loop(void *arg);
51 static int task_is_running(struct gtaskqueue *queue, struct gtask *gtask);
52 static void gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask);
53
54 TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
55
56 struct gtaskqueue_busy {
57 struct gtask *tb_running;
58 u_int tb_seq;
59 LIST_ENTRY(gtaskqueue_busy) tb_link;
60 };
61
62 typedef void (*gtaskqueue_enqueue_fn)(void *context);
63
64 struct gtaskqueue {
65 STAILQ_HEAD(, gtask) tq_queue;
66 LIST_HEAD(, gtaskqueue_busy) tq_active;
67 u_int tq_seq;
68 int tq_callouts;
69 struct mtx_padalign tq_mutex;
70 gtaskqueue_enqueue_fn tq_enqueue;
71 void *tq_context;
72 char *tq_name;
73 struct thread **tq_threads;
74 int tq_tcount;
75 int tq_spin;
76 int tq_flags;
77 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
78 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
79 };
80
81 #define TQ_FLAGS_ACTIVE (1 << 0)
82 #define TQ_FLAGS_BLOCKED (1 << 1)
83 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
84
85 #define DT_CALLOUT_ARMED (1 << 0)
86
87 #define TQ_LOCK(tq) \
88 do { \
89 if ((tq)->tq_spin) \
90 mtx_lock_spin(&(tq)->tq_mutex); \
91 else \
92 mtx_lock(&(tq)->tq_mutex); \
93 } while (0)
94 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
95
96 #define TQ_UNLOCK(tq) \
97 do { \
98 if ((tq)->tq_spin) \
99 mtx_unlock_spin(&(tq)->tq_mutex); \
100 else \
101 mtx_unlock(&(tq)->tq_mutex); \
102 } while (0)
103 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
104
105 #ifdef INVARIANTS
106 static void
gtask_dump(struct gtask * gtask)107 gtask_dump(struct gtask *gtask)
108 {
109 printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n",
110 gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context);
111 }
112 #endif
113
114 static __inline int
TQ_SLEEP(struct gtaskqueue * tq,void * p,const char * wm)115 TQ_SLEEP(struct gtaskqueue *tq, void *p, const char *wm)
116 {
117 if (tq->tq_spin)
118 return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0));
119 return (msleep(p, &tq->tq_mutex, 0, wm, 0));
120 }
121
122 static struct gtaskqueue *
_gtaskqueue_create(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context,int mtxflags,const char * mtxname __unused)123 _gtaskqueue_create(const char *name, int mflags,
124 taskqueue_enqueue_fn enqueue, void *context,
125 int mtxflags, const char *mtxname __unused)
126 {
127 struct gtaskqueue *queue;
128 char *tq_name;
129
130 tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
131 if (!tq_name)
132 return (NULL);
133
134 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
135
136 queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
137 if (!queue) {
138 free(tq_name, M_GTASKQUEUE);
139 return (NULL);
140 }
141
142 STAILQ_INIT(&queue->tq_queue);
143 LIST_INIT(&queue->tq_active);
144 queue->tq_enqueue = enqueue;
145 queue->tq_context = context;
146 queue->tq_name = tq_name;
147 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
148 queue->tq_flags |= TQ_FLAGS_ACTIVE;
149 if (enqueue == gtaskqueue_thread_enqueue)
150 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
151 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
152
153 return (queue);
154 }
155
156 /*
157 * Signal a taskqueue thread to terminate.
158 */
159 static void
gtaskqueue_terminate(struct thread ** pp,struct gtaskqueue * tq)160 gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
161 {
162
163 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
164 wakeup(tq);
165 TQ_SLEEP(tq, pp, "gtq_destroy");
166 }
167 }
168
169 static void __unused
gtaskqueue_free(struct gtaskqueue * queue)170 gtaskqueue_free(struct gtaskqueue *queue)
171 {
172
173 TQ_LOCK(queue);
174 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
175 gtaskqueue_terminate(queue->tq_threads, queue);
176 KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
177 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
178 mtx_destroy(&queue->tq_mutex);
179 free(queue->tq_threads, M_GTASKQUEUE);
180 free(queue->tq_name, M_GTASKQUEUE);
181 free(queue, M_GTASKQUEUE);
182 }
183
184 /*
185 * Wait for all to complete, then prevent it from being enqueued
186 */
187 void
grouptask_block(struct grouptask * grouptask)188 grouptask_block(struct grouptask *grouptask)
189 {
190 struct gtaskqueue *queue = grouptask->gt_taskqueue;
191 struct gtask *gtask = &grouptask->gt_task;
192
193 #ifdef INVARIANTS
194 if (queue == NULL) {
195 gtask_dump(gtask);
196 panic("queue == NULL");
197 }
198 #endif
199 TQ_LOCK(queue);
200 gtask->ta_flags |= TASK_NOENQUEUE;
201 gtaskqueue_drain_locked(queue, gtask);
202 TQ_UNLOCK(queue);
203 }
204
205 void
grouptask_unblock(struct grouptask * grouptask)206 grouptask_unblock(struct grouptask *grouptask)
207 {
208 struct gtaskqueue *queue = grouptask->gt_taskqueue;
209 struct gtask *gtask = &grouptask->gt_task;
210
211 #ifdef INVARIANTS
212 if (queue == NULL) {
213 gtask_dump(gtask);
214 panic("queue == NULL");
215 }
216 #endif
217 TQ_LOCK(queue);
218 gtask->ta_flags &= ~TASK_NOENQUEUE;
219 TQ_UNLOCK(queue);
220 }
221
222 int
grouptaskqueue_enqueue(struct gtaskqueue * queue,struct gtask * gtask)223 grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
224 {
225 #ifdef INVARIANTS
226 if (queue == NULL) {
227 gtask_dump(gtask);
228 panic("queue == NULL");
229 }
230 #endif
231 TQ_LOCK(queue);
232 if (gtask->ta_flags & TASK_ENQUEUED) {
233 TQ_UNLOCK(queue);
234 return (0);
235 }
236 if (gtask->ta_flags & TASK_NOENQUEUE) {
237 TQ_UNLOCK(queue);
238 return (EAGAIN);
239 }
240 STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
241 gtask->ta_flags |= TASK_ENQUEUED;
242 TQ_UNLOCK(queue);
243 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
244 queue->tq_enqueue(queue->tq_context);
245 return (0);
246 }
247
248 static void
gtaskqueue_task_nop_fn(void * context)249 gtaskqueue_task_nop_fn(void *context)
250 {
251 }
252
253 /*
254 * Block until all currently queued tasks in this taskqueue
255 * have begun execution. Tasks queued during execution of
256 * this function are ignored.
257 */
258 static void
gtaskqueue_drain_tq_queue(struct gtaskqueue * queue)259 gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
260 {
261 struct gtask t_barrier;
262
263 if (STAILQ_EMPTY(&queue->tq_queue))
264 return;
265
266 /*
267 * Enqueue our barrier after all current tasks, but with
268 * the highest priority so that newly queued tasks cannot
269 * pass it. Because of the high priority, we can not use
270 * taskqueue_enqueue_locked directly (which drops the lock
271 * anyway) so just insert it at tail while we have the
272 * queue lock.
273 */
274 GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
275 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
276 t_barrier.ta_flags |= TASK_ENQUEUED;
277
278 /*
279 * Once the barrier has executed, all previously queued tasks
280 * have completed or are currently executing.
281 */
282 while (t_barrier.ta_flags & TASK_ENQUEUED)
283 TQ_SLEEP(queue, &t_barrier, "gtq_qdrain");
284 }
285
286 /*
287 * Block until all currently executing tasks for this taskqueue
288 * complete. Tasks that begin execution during the execution
289 * of this function are ignored.
290 */
291 static void
gtaskqueue_drain_tq_active(struct gtaskqueue * queue)292 gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
293 {
294 struct gtaskqueue_busy *tb;
295 u_int seq;
296
297 if (LIST_EMPTY(&queue->tq_active))
298 return;
299
300 /* Block taskq_terminate().*/
301 queue->tq_callouts++;
302
303 /* Wait for any active task with sequence from the past. */
304 seq = queue->tq_seq;
305 restart:
306 LIST_FOREACH(tb, &queue->tq_active, tb_link) {
307 if ((int)(tb->tb_seq - seq) <= 0) {
308 TQ_SLEEP(queue, tb->tb_running, "gtq_adrain");
309 goto restart;
310 }
311 }
312
313 /* Release taskqueue_terminate(). */
314 queue->tq_callouts--;
315 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
316 wakeup_one(queue->tq_threads);
317 }
318
319 void
gtaskqueue_block(struct gtaskqueue * queue)320 gtaskqueue_block(struct gtaskqueue *queue)
321 {
322
323 TQ_LOCK(queue);
324 queue->tq_flags |= TQ_FLAGS_BLOCKED;
325 TQ_UNLOCK(queue);
326 }
327
328 void
gtaskqueue_unblock(struct gtaskqueue * queue)329 gtaskqueue_unblock(struct gtaskqueue *queue)
330 {
331
332 TQ_LOCK(queue);
333 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
334 if (!STAILQ_EMPTY(&queue->tq_queue))
335 queue->tq_enqueue(queue->tq_context);
336 TQ_UNLOCK(queue);
337 }
338
339 static void
gtaskqueue_run_locked(struct gtaskqueue * queue)340 gtaskqueue_run_locked(struct gtaskqueue *queue)
341 {
342 struct epoch_tracker et;
343 struct gtaskqueue_busy tb;
344 struct gtask *gtask;
345 bool in_net_epoch;
346
347 KASSERT(queue != NULL, ("tq is NULL"));
348 TQ_ASSERT_LOCKED(queue);
349 tb.tb_running = NULL;
350 LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
351 in_net_epoch = false;
352
353 while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
354 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
355 gtask->ta_flags &= ~TASK_ENQUEUED;
356 tb.tb_running = gtask;
357 tb.tb_seq = ++queue->tq_seq;
358 TQ_UNLOCK(queue);
359
360 KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
361 if (!in_net_epoch && TASK_IS_NET(gtask)) {
362 in_net_epoch = true;
363 NET_EPOCH_ENTER(et);
364 } else if (in_net_epoch && !TASK_IS_NET(gtask)) {
365 NET_EPOCH_EXIT(et);
366 in_net_epoch = false;
367 }
368 gtask->ta_func(gtask->ta_context);
369
370 TQ_LOCK(queue);
371 wakeup(gtask);
372 }
373 if (in_net_epoch)
374 NET_EPOCH_EXIT(et);
375 LIST_REMOVE(&tb, tb_link);
376 }
377
378 static int
task_is_running(struct gtaskqueue * queue,struct gtask * gtask)379 task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
380 {
381 struct gtaskqueue_busy *tb;
382
383 TQ_ASSERT_LOCKED(queue);
384 LIST_FOREACH(tb, &queue->tq_active, tb_link) {
385 if (tb->tb_running == gtask)
386 return (1);
387 }
388 return (0);
389 }
390
391 static int
gtaskqueue_cancel_locked(struct gtaskqueue * queue,struct gtask * gtask)392 gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
393 {
394
395 if (gtask->ta_flags & TASK_ENQUEUED)
396 STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
397 gtask->ta_flags &= ~TASK_ENQUEUED;
398 return (task_is_running(queue, gtask) ? EBUSY : 0);
399 }
400
401 int
gtaskqueue_cancel(struct gtaskqueue * queue,struct gtask * gtask)402 gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
403 {
404 int error;
405
406 TQ_LOCK(queue);
407 error = gtaskqueue_cancel_locked(queue, gtask);
408 TQ_UNLOCK(queue);
409
410 return (error);
411 }
412
413 static void
gtaskqueue_drain_locked(struct gtaskqueue * queue,struct gtask * gtask)414 gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask)
415 {
416 while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
417 TQ_SLEEP(queue, gtask, "gtq_drain");
418 }
419
420 void
gtaskqueue_drain(struct gtaskqueue * queue,struct gtask * gtask)421 gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
422 {
423
424 if (!queue->tq_spin)
425 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
426
427 TQ_LOCK(queue);
428 gtaskqueue_drain_locked(queue, gtask);
429 TQ_UNLOCK(queue);
430 }
431
432 void
gtaskqueue_drain_all(struct gtaskqueue * queue)433 gtaskqueue_drain_all(struct gtaskqueue *queue)
434 {
435
436 if (!queue->tq_spin)
437 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
438
439 TQ_LOCK(queue);
440 gtaskqueue_drain_tq_queue(queue);
441 gtaskqueue_drain_tq_active(queue);
442 TQ_UNLOCK(queue);
443 }
444
445 static int
_gtaskqueue_start_threads(struct gtaskqueue ** tqp,int count,int pri,cpuset_t * mask,const char * name,va_list ap)446 _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
447 cpuset_t *mask, const char *name, va_list ap)
448 {
449 char ktname[MAXCOMLEN + 1];
450 struct thread *td;
451 struct gtaskqueue *tq;
452 int i, error;
453
454 if (count <= 0)
455 return (EINVAL);
456
457 vsnprintf(ktname, sizeof(ktname), name, ap);
458 tq = *tqp;
459
460 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
461 M_NOWAIT | M_ZERO);
462 if (tq->tq_threads == NULL) {
463 printf("%s: no memory for %s threads\n", __func__, ktname);
464 return (ENOMEM);
465 }
466
467 for (i = 0; i < count; i++) {
468 if (count == 1)
469 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
470 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
471 else
472 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
473 &tq->tq_threads[i], RFSTOPPED, 0,
474 "%s_%d", ktname, i);
475 if (error) {
476 /* should be ok to continue, taskqueue_free will dtrt */
477 printf("%s: kthread_add(%s): error %d", __func__,
478 ktname, error);
479 tq->tq_threads[i] = NULL; /* paranoid */
480 } else
481 tq->tq_tcount++;
482 }
483 for (i = 0; i < count; i++) {
484 if (tq->tq_threads[i] == NULL)
485 continue;
486 td = tq->tq_threads[i];
487 if (mask) {
488 error = cpuset_setthread(td->td_tid, mask);
489 /*
490 * Failing to pin is rarely an actual fatal error;
491 * it'll just affect performance.
492 */
493 if (error)
494 printf("%s: curthread=%llu: can't pin; "
495 "error=%d\n",
496 __func__,
497 (unsigned long long) td->td_tid,
498 error);
499 }
500 thread_lock(td);
501 sched_prio(td, pri);
502 sched_add(td, SRQ_BORING);
503 }
504
505 return (0);
506 }
507
508 static int
gtaskqueue_start_threads(struct gtaskqueue ** tqp,int count,int pri,const char * name,...)509 gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
510 const char *name, ...)
511 {
512 va_list ap;
513 int error;
514
515 va_start(ap, name);
516 error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
517 va_end(ap);
518 return (error);
519 }
520
521 static inline void
gtaskqueue_run_callback(struct gtaskqueue * tq,enum taskqueue_callback_type cb_type)522 gtaskqueue_run_callback(struct gtaskqueue *tq,
523 enum taskqueue_callback_type cb_type)
524 {
525 taskqueue_callback_fn tq_callback;
526
527 TQ_ASSERT_UNLOCKED(tq);
528 tq_callback = tq->tq_callbacks[cb_type];
529 if (tq_callback != NULL)
530 tq_callback(tq->tq_cb_contexts[cb_type]);
531 }
532
533 static void
gtaskqueue_thread_loop(void * arg)534 gtaskqueue_thread_loop(void *arg)
535 {
536 struct gtaskqueue **tqp, *tq;
537
538 tqp = arg;
539 tq = *tqp;
540 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
541 TQ_LOCK(tq);
542 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
543 /* XXX ? */
544 gtaskqueue_run_locked(tq);
545 /*
546 * Because taskqueue_run() can drop tq_mutex, we need to
547 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
548 * meantime, which means we missed a wakeup.
549 */
550 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
551 break;
552 TQ_SLEEP(tq, tq, "-");
553 }
554 gtaskqueue_run_locked(tq);
555 /*
556 * This thread is on its way out, so just drop the lock temporarily
557 * in order to call the shutdown callback. This allows the callback
558 * to look at the taskqueue, even just before it dies.
559 */
560 TQ_UNLOCK(tq);
561 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
562 TQ_LOCK(tq);
563
564 /* rendezvous with thread that asked us to terminate */
565 tq->tq_tcount--;
566 wakeup_one(tq->tq_threads);
567 TQ_UNLOCK(tq);
568 kthread_exit();
569 }
570
571 static void
gtaskqueue_thread_enqueue(void * context)572 gtaskqueue_thread_enqueue(void *context)
573 {
574 struct gtaskqueue **tqp, *tq;
575
576 tqp = context;
577 tq = *tqp;
578 wakeup_any(tq);
579 }
580
581 static struct gtaskqueue *
gtaskqueue_create_fast(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context)582 gtaskqueue_create_fast(const char *name, int mflags,
583 taskqueue_enqueue_fn enqueue, void *context)
584 {
585 return _gtaskqueue_create(name, mflags, enqueue, context,
586 MTX_SPIN, "fast_taskqueue");
587 }
588
589 struct taskqgroup_cpu {
590 LIST_HEAD(, grouptask) tgc_tasks;
591 struct gtaskqueue *tgc_taskq;
592 int tgc_cnt;
593 int tgc_cpu;
594 };
595
596 struct taskqgroup {
597 struct taskqgroup_cpu tqg_queue[MAXCPU];
598 struct mtx tqg_lock;
599 const char * tqg_name;
600 int tqg_cnt;
601 };
602
603 struct taskq_bind_task {
604 struct gtask bt_task;
605 int bt_cpuid;
606 };
607
608 static void
taskqgroup_cpu_create(struct taskqgroup * qgroup,int idx,int cpu)609 taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
610 {
611 struct taskqgroup_cpu *qcpu;
612
613 qcpu = &qgroup->tqg_queue[idx];
614 LIST_INIT(&qcpu->tgc_tasks);
615 qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
616 gtaskqueue_thread_enqueue, &qcpu->tgc_taskq);
617 gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
618 "%s_%d", qgroup->tqg_name, idx);
619 qcpu->tgc_cpu = cpu;
620 }
621
622 /*
623 * Find the taskq with least # of tasks that doesn't currently have any
624 * other queues from the uniq identifier.
625 */
626 static int
taskqgroup_find(struct taskqgroup * qgroup,void * uniq)627 taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
628 {
629 struct grouptask *n;
630 int i, idx, mincnt;
631 int strict;
632
633 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
634 KASSERT(qgroup->tqg_cnt != 0,
635 ("qgroup %s has no queues", qgroup->tqg_name));
636
637 /*
638 * Two passes: first scan for a queue with the least tasks that
639 * does not already service this uniq id. If that fails simply find
640 * the queue with the least total tasks.
641 */
642 for (idx = -1, mincnt = INT_MAX, strict = 1; mincnt == INT_MAX;
643 strict = 0) {
644 for (i = 0; i < qgroup->tqg_cnt; i++) {
645 if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
646 continue;
647 if (strict) {
648 LIST_FOREACH(n, &qgroup->tqg_queue[i].tgc_tasks,
649 gt_list)
650 if (n->gt_uniq == uniq)
651 break;
652 if (n != NULL)
653 continue;
654 }
655 mincnt = qgroup->tqg_queue[i].tgc_cnt;
656 idx = i;
657 }
658 }
659 if (idx == -1)
660 panic("%s: failed to pick a qid.", __func__);
661
662 return (idx);
663 }
664
665 void
taskqgroup_attach(struct taskqgroup * qgroup,struct grouptask * gtask,void * uniq,device_t dev,struct resource * irq,const char * name)666 taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
667 void *uniq, device_t dev, struct resource *irq, const char *name)
668 {
669 int cpu, qid, error;
670
671 KASSERT(qgroup->tqg_cnt > 0,
672 ("qgroup %s has no queues", qgroup->tqg_name));
673
674 gtask->gt_uniq = uniq;
675 snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
676 gtask->gt_dev = dev;
677 gtask->gt_irq = irq;
678 gtask->gt_cpu = -1;
679 mtx_lock(&qgroup->tqg_lock);
680 qid = taskqgroup_find(qgroup, uniq);
681 qgroup->tqg_queue[qid].tgc_cnt++;
682 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
683 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
684 if (dev != NULL && irq != NULL) {
685 cpu = qgroup->tqg_queue[qid].tgc_cpu;
686 gtask->gt_cpu = cpu;
687 mtx_unlock(&qgroup->tqg_lock);
688 error = bus_bind_intr(dev, irq, cpu);
689 if (error)
690 printf("%s: binding interrupt failed for %s: %d\n",
691 __func__, gtask->gt_name, error);
692 } else
693 mtx_unlock(&qgroup->tqg_lock);
694 }
695
696 int
taskqgroup_attach_cpu(struct taskqgroup * qgroup,struct grouptask * gtask,void * uniq,int cpu,device_t dev,struct resource * irq,const char * name)697 taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
698 void *uniq, int cpu, device_t dev, struct resource *irq, const char *name)
699 {
700 int i, qid, error;
701
702 gtask->gt_uniq = uniq;
703 snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
704 gtask->gt_dev = dev;
705 gtask->gt_irq = irq;
706 gtask->gt_cpu = cpu;
707 mtx_lock(&qgroup->tqg_lock);
708 for (i = 0, qid = -1; i < qgroup->tqg_cnt; i++)
709 if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
710 qid = i;
711 break;
712 }
713 if (qid == -1) {
714 mtx_unlock(&qgroup->tqg_lock);
715 printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
716 return (EINVAL);
717 }
718 qgroup->tqg_queue[qid].tgc_cnt++;
719 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
720 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
721 cpu = qgroup->tqg_queue[qid].tgc_cpu;
722 mtx_unlock(&qgroup->tqg_lock);
723
724 if (dev != NULL && irq != NULL) {
725 error = bus_bind_intr(dev, irq, cpu);
726 if (error)
727 printf("%s: binding interrupt failed for %s: %d\n",
728 __func__, gtask->gt_name, error);
729 }
730 return (0);
731 }
732
733 void
taskqgroup_detach(struct taskqgroup * qgroup,struct grouptask * gtask)734 taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
735 {
736 int i;
737
738 grouptask_block(gtask);
739 mtx_lock(&qgroup->tqg_lock);
740 for (i = 0; i < qgroup->tqg_cnt; i++)
741 if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
742 break;
743 if (i == qgroup->tqg_cnt)
744 panic("%s: task %s not in group", __func__, gtask->gt_name);
745 qgroup->tqg_queue[i].tgc_cnt--;
746 LIST_REMOVE(gtask, gt_list);
747 mtx_unlock(&qgroup->tqg_lock);
748 gtask->gt_taskqueue = NULL;
749 gtask->gt_task.ta_flags &= ~TASK_NOENQUEUE;
750 }
751
752 static void
taskqgroup_binder(void * ctx)753 taskqgroup_binder(void *ctx)
754 {
755 struct taskq_bind_task *gtask;
756 cpuset_t mask;
757 int error;
758
759 gtask = ctx;
760 CPU_ZERO(&mask);
761 CPU_SET(gtask->bt_cpuid, &mask);
762 error = cpuset_setthread(curthread->td_tid, &mask);
763 thread_lock(curthread);
764 sched_bind(curthread, gtask->bt_cpuid);
765 thread_unlock(curthread);
766
767 if (error)
768 printf("%s: binding curthread failed: %d\n", __func__, error);
769 free(gtask, M_DEVBUF);
770 }
771
772 void
taskqgroup_bind(struct taskqgroup * qgroup)773 taskqgroup_bind(struct taskqgroup *qgroup)
774 {
775 struct taskq_bind_task *gtask;
776 int i;
777
778 /*
779 * Bind taskqueue threads to specific CPUs, if they have been assigned
780 * one.
781 */
782 if (qgroup->tqg_cnt == 1)
783 return;
784
785 for (i = 0; i < qgroup->tqg_cnt; i++) {
786 gtask = malloc(sizeof(*gtask), M_DEVBUF, M_WAITOK);
787 GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask);
788 gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
789 grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
790 >ask->bt_task);
791 }
792 }
793
794 struct taskqgroup *
taskqgroup_create(const char * name,int cnt,int stride)795 taskqgroup_create(const char *name, int cnt, int stride)
796 {
797 struct taskqgroup *qgroup;
798 int cpu, i, j;
799
800 qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
801 mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
802 qgroup->tqg_name = name;
803 qgroup->tqg_cnt = cnt;
804
805 for (cpu = i = 0; i < cnt; i++) {
806 taskqgroup_cpu_create(qgroup, i, cpu);
807 for (j = 0; j < stride; j++)
808 cpu = CPU_NEXT(cpu);
809 }
810 return (qgroup);
811 }
812
813 void
taskqgroup_destroy(struct taskqgroup * qgroup)814 taskqgroup_destroy(struct taskqgroup *qgroup)
815 {
816 }
817
818 void
taskqgroup_drain_all(struct taskqgroup * tqg)819 taskqgroup_drain_all(struct taskqgroup *tqg)
820 {
821 struct gtaskqueue *q;
822
823 for (int i = 0; i < mp_ncpus; i++) {
824 q = tqg->tqg_queue[i].tgc_taskq;
825 if (q == NULL)
826 continue;
827 gtaskqueue_drain_all(q);
828 }
829 }
830