1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000 Doug Rabson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/cpuset.h> 36 #include <sys/interrupt.h> 37 #include <sys/kernel.h> 38 #include <sys/kthread.h> 39 #include <sys/libkern.h> 40 #include <sys/limits.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/sched.h> 46 #include <sys/smp.h> 47 #include <sys/taskqueue.h> 48 #include <sys/unistd.h> 49 #include <machine/stdarg.h> 50 51 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 52 static void *taskqueue_giant_ih; 53 static void *taskqueue_ih; 54 static void taskqueue_fast_enqueue(void *); 55 static void taskqueue_swi_enqueue(void *); 56 static void taskqueue_swi_giant_enqueue(void *); 57 58 struct taskqueue_busy { 59 struct task *tb_running; 60 TAILQ_ENTRY(taskqueue_busy) tb_link; 61 }; 62 63 struct task * const TB_DRAIN_WAITER = (struct task *)0x1; 64 65 struct taskqueue { 66 STAILQ_HEAD(, task) tq_queue; 67 taskqueue_enqueue_fn tq_enqueue; 68 void *tq_context; 69 char *tq_name; 70 TAILQ_HEAD(, taskqueue_busy) tq_active; 71 struct mtx tq_mutex; 72 struct thread **tq_threads; 73 int tq_tcount; 74 int tq_spin; 75 int tq_flags; 76 int tq_callouts; 77 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; 78 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; 79 }; 80 81 #define TQ_FLAGS_ACTIVE (1 << 0) 82 #define TQ_FLAGS_BLOCKED (1 << 1) 83 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) 84 85 #define DT_CALLOUT_ARMED (1 << 0) 86 #define DT_DRAIN_IN_PROGRESS (1 << 1) 87 88 #define TQ_LOCK(tq) \ 89 do { \ 90 if ((tq)->tq_spin) \ 91 mtx_lock_spin(&(tq)->tq_mutex); \ 92 else \ 93 mtx_lock(&(tq)->tq_mutex); \ 94 } while (0) 95 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) 96 97 #define TQ_UNLOCK(tq) \ 98 do { \ 99 if ((tq)->tq_spin) \ 100 mtx_unlock_spin(&(tq)->tq_mutex); \ 101 else \ 102 mtx_unlock(&(tq)->tq_mutex); \ 103 } while (0) 104 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) 105 106 void 107 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 108 int priority, task_fn_t func, void *context) 109 { 110 111 TASK_INIT(&timeout_task->t, priority, func, context); 112 callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 113 CALLOUT_RETURNUNLOCKED); 114 timeout_task->q = queue; 115 timeout_task->f = 0; 116 } 117 118 static __inline int 119 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 120 int t) 121 { 122 if (tq->tq_spin) 123 return (msleep_spin(p, m, wm, t)); 124 return (msleep(p, m, pri, wm, t)); 125 } 126 127 static struct taskqueue * 128 _taskqueue_create(const char *name, int mflags, 129 taskqueue_enqueue_fn enqueue, void *context, 130 int mtxflags, const char *mtxname __unused) 131 { 132 struct taskqueue *queue; 133 char *tq_name; 134 135 tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO); 136 if (tq_name == NULL) 137 return (NULL); 138 139 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 140 if (queue == NULL) { 141 free(tq_name, M_TASKQUEUE); 142 return (NULL); 143 } 144 145 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); 146 147 STAILQ_INIT(&queue->tq_queue); 148 TAILQ_INIT(&queue->tq_active); 149 queue->tq_enqueue = enqueue; 150 queue->tq_context = context; 151 queue->tq_name = tq_name; 152 queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 153 queue->tq_flags |= TQ_FLAGS_ACTIVE; 154 if (enqueue == taskqueue_fast_enqueue || 155 enqueue == taskqueue_swi_enqueue || 156 enqueue == taskqueue_swi_giant_enqueue || 157 enqueue == taskqueue_thread_enqueue) 158 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; 159 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); 160 161 return (queue); 162 } 163 164 struct taskqueue * 165 taskqueue_create(const char *name, int mflags, 166 taskqueue_enqueue_fn enqueue, void *context) 167 { 168 169 return _taskqueue_create(name, mflags, enqueue, context, 170 MTX_DEF, name); 171 } 172 173 void 174 taskqueue_set_callback(struct taskqueue *queue, 175 enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, 176 void *context) 177 { 178 179 KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && 180 (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), 181 ("Callback type %d not valid, must be %d-%d", cb_type, 182 TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); 183 KASSERT((queue->tq_callbacks[cb_type] == NULL), 184 ("Re-initialization of taskqueue callback?")); 185 186 queue->tq_callbacks[cb_type] = callback; 187 queue->tq_cb_contexts[cb_type] = context; 188 } 189 190 /* 191 * Signal a taskqueue thread to terminate. 192 */ 193 static void 194 taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 195 { 196 197 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 198 wakeup(tq); 199 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 200 } 201 } 202 203 void 204 taskqueue_free(struct taskqueue *queue) 205 { 206 207 TQ_LOCK(queue); 208 queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 209 taskqueue_terminate(queue->tq_threads, queue); 210 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 211 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 212 mtx_destroy(&queue->tq_mutex); 213 free(queue->tq_threads, M_TASKQUEUE); 214 free(queue->tq_name, M_TASKQUEUE); 215 free(queue, M_TASKQUEUE); 216 } 217 218 static int 219 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 220 { 221 struct task *ins; 222 struct task *prev; 223 224 KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func")); 225 /* 226 * Count multiple enqueues. 227 */ 228 if (task->ta_pending) { 229 if (task->ta_pending < USHRT_MAX) 230 task->ta_pending++; 231 TQ_UNLOCK(queue); 232 return (0); 233 } 234 235 /* 236 * Optimise the case when all tasks have the same priority. 237 */ 238 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 239 if (!prev || prev->ta_priority >= task->ta_priority) { 240 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 241 } else { 242 prev = NULL; 243 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 244 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 245 if (ins->ta_priority < task->ta_priority) 246 break; 247 248 if (prev) 249 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 250 else 251 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 252 } 253 254 task->ta_pending = 1; 255 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) 256 TQ_UNLOCK(queue); 257 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 258 queue->tq_enqueue(queue->tq_context); 259 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) 260 TQ_UNLOCK(queue); 261 262 /* Return with lock released. */ 263 return (0); 264 } 265 266 int 267 taskqueue_enqueue(struct taskqueue *queue, struct task *task) 268 { 269 int res; 270 271 TQ_LOCK(queue); 272 res = taskqueue_enqueue_locked(queue, task); 273 /* The lock is released inside. */ 274 275 return (res); 276 } 277 278 static void 279 taskqueue_timeout_func(void *arg) 280 { 281 struct taskqueue *queue; 282 struct timeout_task *timeout_task; 283 284 timeout_task = arg; 285 queue = timeout_task->q; 286 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 287 timeout_task->f &= ~DT_CALLOUT_ARMED; 288 queue->tq_callouts--; 289 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 290 /* The lock is released inside. */ 291 } 292 293 int 294 taskqueue_enqueue_timeout_sbt(struct taskqueue *queue, 295 struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags) 296 { 297 int res; 298 299 TQ_LOCK(queue); 300 KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 301 ("Migrated queue")); 302 KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); 303 timeout_task->q = queue; 304 res = timeout_task->t.ta_pending; 305 if (timeout_task->f & DT_DRAIN_IN_PROGRESS) { 306 /* Do nothing */ 307 TQ_UNLOCK(queue); 308 res = -1; 309 } else if (sbt == 0) { 310 taskqueue_enqueue_locked(queue, &timeout_task->t); 311 /* The lock is released inside. */ 312 } else { 313 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 314 res++; 315 } else { 316 queue->tq_callouts++; 317 timeout_task->f |= DT_CALLOUT_ARMED; 318 if (sbt < 0) 319 sbt = -sbt; /* Ignore overflow. */ 320 } 321 if (sbt > 0) { 322 callout_reset_sbt(&timeout_task->c, sbt, pr, 323 taskqueue_timeout_func, timeout_task, flags); 324 } 325 TQ_UNLOCK(queue); 326 } 327 return (res); 328 } 329 330 int 331 taskqueue_enqueue_timeout(struct taskqueue *queue, 332 struct timeout_task *ttask, int ticks) 333 { 334 335 return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt, 336 0, 0)); 337 } 338 339 static void 340 taskqueue_task_nop_fn(void *context, int pending) 341 { 342 } 343 344 /* 345 * Block until all currently queued tasks in this taskqueue 346 * have begun execution. Tasks queued during execution of 347 * this function are ignored. 348 */ 349 static int 350 taskqueue_drain_tq_queue(struct taskqueue *queue) 351 { 352 struct task t_barrier; 353 354 if (STAILQ_EMPTY(&queue->tq_queue)) 355 return (0); 356 357 /* 358 * Enqueue our barrier after all current tasks, but with 359 * the highest priority so that newly queued tasks cannot 360 * pass it. Because of the high priority, we can not use 361 * taskqueue_enqueue_locked directly (which drops the lock 362 * anyway) so just insert it at tail while we have the 363 * queue lock. 364 */ 365 TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier); 366 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); 367 t_barrier.ta_pending = 1; 368 369 /* 370 * Once the barrier has executed, all previously queued tasks 371 * have completed or are currently executing. 372 */ 373 while (t_barrier.ta_pending != 0) 374 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0); 375 return (1); 376 } 377 378 /* 379 * Block until all currently executing tasks for this taskqueue 380 * complete. Tasks that begin execution during the execution 381 * of this function are ignored. 382 */ 383 static int 384 taskqueue_drain_tq_active(struct taskqueue *queue) 385 { 386 struct taskqueue_busy tb_marker, *tb_first; 387 388 if (TAILQ_EMPTY(&queue->tq_active)) 389 return (0); 390 391 /* Block taskq_terminate().*/ 392 queue->tq_callouts++; 393 394 /* 395 * Wait for all currently executing taskqueue threads 396 * to go idle. 397 */ 398 tb_marker.tb_running = TB_DRAIN_WAITER; 399 TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link); 400 while (TAILQ_FIRST(&queue->tq_active) != &tb_marker) 401 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0); 402 TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link); 403 404 /* 405 * Wakeup any other drain waiter that happened to queue up 406 * without any intervening active thread. 407 */ 408 tb_first = TAILQ_FIRST(&queue->tq_active); 409 if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER) 410 wakeup(tb_first); 411 412 /* Release taskqueue_terminate(). */ 413 queue->tq_callouts--; 414 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) 415 wakeup_one(queue->tq_threads); 416 return (1); 417 } 418 419 void 420 taskqueue_block(struct taskqueue *queue) 421 { 422 423 TQ_LOCK(queue); 424 queue->tq_flags |= TQ_FLAGS_BLOCKED; 425 TQ_UNLOCK(queue); 426 } 427 428 void 429 taskqueue_unblock(struct taskqueue *queue) 430 { 431 432 TQ_LOCK(queue); 433 queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 434 if (!STAILQ_EMPTY(&queue->tq_queue)) 435 queue->tq_enqueue(queue->tq_context); 436 TQ_UNLOCK(queue); 437 } 438 439 static void 440 taskqueue_run_locked(struct taskqueue *queue) 441 { 442 struct taskqueue_busy tb; 443 struct taskqueue_busy *tb_first; 444 struct task *task; 445 int pending; 446 447 KASSERT(queue != NULL, ("tq is NULL")); 448 TQ_ASSERT_LOCKED(queue); 449 tb.tb_running = NULL; 450 451 while (STAILQ_FIRST(&queue->tq_queue)) { 452 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 453 454 /* 455 * Carefully remove the first task from the queue and 456 * zero its pending count. 457 */ 458 task = STAILQ_FIRST(&queue->tq_queue); 459 KASSERT(task != NULL, ("task is NULL")); 460 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 461 pending = task->ta_pending; 462 task->ta_pending = 0; 463 tb.tb_running = task; 464 TQ_UNLOCK(queue); 465 466 KASSERT(task->ta_func != NULL, ("task->ta_func is NULL")); 467 task->ta_func(task->ta_context, pending); 468 469 TQ_LOCK(queue); 470 tb.tb_running = NULL; 471 wakeup(task); 472 473 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 474 tb_first = TAILQ_FIRST(&queue->tq_active); 475 if (tb_first != NULL && 476 tb_first->tb_running == TB_DRAIN_WAITER) 477 wakeup(tb_first); 478 } 479 } 480 481 void 482 taskqueue_run(struct taskqueue *queue) 483 { 484 485 TQ_LOCK(queue); 486 taskqueue_run_locked(queue); 487 TQ_UNLOCK(queue); 488 } 489 490 static int 491 task_is_running(struct taskqueue *queue, struct task *task) 492 { 493 struct taskqueue_busy *tb; 494 495 TQ_ASSERT_LOCKED(queue); 496 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 497 if (tb->tb_running == task) 498 return (1); 499 } 500 return (0); 501 } 502 503 /* 504 * Only use this function in single threaded contexts. It returns 505 * non-zero if the given task is either pending or running. Else the 506 * task is idle and can be queued again or freed. 507 */ 508 int 509 taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task) 510 { 511 int retval; 512 513 TQ_LOCK(queue); 514 retval = task->ta_pending > 0 || task_is_running(queue, task); 515 TQ_UNLOCK(queue); 516 517 return (retval); 518 } 519 520 static int 521 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 522 u_int *pendp) 523 { 524 525 if (task->ta_pending > 0) 526 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 527 if (pendp != NULL) 528 *pendp = task->ta_pending; 529 task->ta_pending = 0; 530 return (task_is_running(queue, task) ? EBUSY : 0); 531 } 532 533 int 534 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 535 { 536 int error; 537 538 TQ_LOCK(queue); 539 error = taskqueue_cancel_locked(queue, task, pendp); 540 TQ_UNLOCK(queue); 541 542 return (error); 543 } 544 545 int 546 taskqueue_cancel_timeout(struct taskqueue *queue, 547 struct timeout_task *timeout_task, u_int *pendp) 548 { 549 u_int pending, pending1; 550 int error; 551 552 TQ_LOCK(queue); 553 pending = !!(callout_stop(&timeout_task->c) > 0); 554 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 555 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 556 timeout_task->f &= ~DT_CALLOUT_ARMED; 557 queue->tq_callouts--; 558 } 559 TQ_UNLOCK(queue); 560 561 if (pendp != NULL) 562 *pendp = pending + pending1; 563 return (error); 564 } 565 566 void 567 taskqueue_drain(struct taskqueue *queue, struct task *task) 568 { 569 570 if (!queue->tq_spin) 571 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 572 573 TQ_LOCK(queue); 574 while (task->ta_pending != 0 || task_is_running(queue, task)) 575 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 576 TQ_UNLOCK(queue); 577 } 578 579 void 580 taskqueue_drain_all(struct taskqueue *queue) 581 { 582 583 if (!queue->tq_spin) 584 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 585 586 TQ_LOCK(queue); 587 (void)taskqueue_drain_tq_queue(queue); 588 (void)taskqueue_drain_tq_active(queue); 589 TQ_UNLOCK(queue); 590 } 591 592 void 593 taskqueue_drain_timeout(struct taskqueue *queue, 594 struct timeout_task *timeout_task) 595 { 596 597 /* 598 * Set flag to prevent timer from re-starting during drain: 599 */ 600 TQ_LOCK(queue); 601 KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0, 602 ("Drain already in progress")); 603 timeout_task->f |= DT_DRAIN_IN_PROGRESS; 604 TQ_UNLOCK(queue); 605 606 callout_drain(&timeout_task->c); 607 taskqueue_drain(queue, &timeout_task->t); 608 609 /* 610 * Clear flag to allow timer to re-start: 611 */ 612 TQ_LOCK(queue); 613 timeout_task->f &= ~DT_DRAIN_IN_PROGRESS; 614 TQ_UNLOCK(queue); 615 } 616 617 void 618 taskqueue_quiesce(struct taskqueue *queue) 619 { 620 int ret; 621 622 TQ_LOCK(queue); 623 do { 624 ret = taskqueue_drain_tq_queue(queue); 625 if (ret == 0) 626 ret = taskqueue_drain_tq_active(queue); 627 } while (ret != 0); 628 TQ_UNLOCK(queue); 629 } 630 631 static void 632 taskqueue_swi_enqueue(void *context) 633 { 634 swi_sched(taskqueue_ih, 0); 635 } 636 637 static void 638 taskqueue_swi_run(void *dummy) 639 { 640 taskqueue_run(taskqueue_swi); 641 } 642 643 static void 644 taskqueue_swi_giant_enqueue(void *context) 645 { 646 swi_sched(taskqueue_giant_ih, 0); 647 } 648 649 static void 650 taskqueue_swi_giant_run(void *dummy) 651 { 652 taskqueue_run(taskqueue_swi_giant); 653 } 654 655 static int 656 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 657 cpuset_t *mask, const char *name, va_list ap) 658 { 659 char ktname[MAXCOMLEN + 1]; 660 struct thread *td; 661 struct taskqueue *tq; 662 int i, error; 663 664 if (count <= 0) 665 return (EINVAL); 666 667 vsnprintf(ktname, sizeof(ktname), name, ap); 668 tq = *tqp; 669 670 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 671 M_NOWAIT | M_ZERO); 672 if (tq->tq_threads == NULL) { 673 printf("%s: no memory for %s threads\n", __func__, ktname); 674 return (ENOMEM); 675 } 676 677 for (i = 0; i < count; i++) { 678 if (count == 1) 679 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 680 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 681 else 682 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 683 &tq->tq_threads[i], RFSTOPPED, 0, 684 "%s_%d", ktname, i); 685 if (error) { 686 /* should be ok to continue, taskqueue_free will dtrt */ 687 printf("%s: kthread_add(%s): error %d", __func__, 688 ktname, error); 689 tq->tq_threads[i] = NULL; /* paranoid */ 690 } else 691 tq->tq_tcount++; 692 } 693 if (tq->tq_tcount == 0) { 694 free(tq->tq_threads, M_TASKQUEUE); 695 tq->tq_threads = NULL; 696 return (ENOMEM); 697 } 698 for (i = 0; i < count; i++) { 699 if (tq->tq_threads[i] == NULL) 700 continue; 701 td = tq->tq_threads[i]; 702 if (mask) { 703 error = cpuset_setthread(td->td_tid, mask); 704 /* 705 * Failing to pin is rarely an actual fatal error; 706 * it'll just affect performance. 707 */ 708 if (error) 709 printf("%s: curthread=%llu: can't pin; " 710 "error=%d\n", 711 __func__, 712 (unsigned long long) td->td_tid, 713 error); 714 } 715 thread_lock(td); 716 sched_prio(td, pri); 717 sched_add(td, SRQ_BORING); 718 thread_unlock(td); 719 } 720 721 return (0); 722 } 723 724 int 725 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 726 const char *name, ...) 727 { 728 va_list ap; 729 int error; 730 731 va_start(ap, name); 732 error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap); 733 va_end(ap); 734 return (error); 735 } 736 737 int 738 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, 739 cpuset_t *mask, const char *name, ...) 740 { 741 va_list ap; 742 int error; 743 744 va_start(ap, name); 745 error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap); 746 va_end(ap); 747 return (error); 748 } 749 750 static inline void 751 taskqueue_run_callback(struct taskqueue *tq, 752 enum taskqueue_callback_type cb_type) 753 { 754 taskqueue_callback_fn tq_callback; 755 756 TQ_ASSERT_UNLOCKED(tq); 757 tq_callback = tq->tq_callbacks[cb_type]; 758 if (tq_callback != NULL) 759 tq_callback(tq->tq_cb_contexts[cb_type]); 760 } 761 762 void 763 taskqueue_thread_loop(void *arg) 764 { 765 struct taskqueue **tqp, *tq; 766 767 tqp = arg; 768 tq = *tqp; 769 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); 770 TQ_LOCK(tq); 771 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 772 /* XXX ? */ 773 taskqueue_run_locked(tq); 774 /* 775 * Because taskqueue_run() can drop tq_mutex, we need to 776 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 777 * meantime, which means we missed a wakeup. 778 */ 779 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 780 break; 781 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 782 } 783 taskqueue_run_locked(tq); 784 /* 785 * This thread is on its way out, so just drop the lock temporarily 786 * in order to call the shutdown callback. This allows the callback 787 * to look at the taskqueue, even just before it dies. 788 */ 789 TQ_UNLOCK(tq); 790 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); 791 TQ_LOCK(tq); 792 793 /* rendezvous with thread that asked us to terminate */ 794 tq->tq_tcount--; 795 wakeup_one(tq->tq_threads); 796 TQ_UNLOCK(tq); 797 kthread_exit(); 798 } 799 800 void 801 taskqueue_thread_enqueue(void *context) 802 { 803 struct taskqueue **tqp, *tq; 804 805 tqp = context; 806 tq = *tqp; 807 wakeup_one(tq); 808 } 809 810 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 811 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 812 INTR_MPSAFE, &taskqueue_ih)); 813 814 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 815 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 816 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 817 818 TASKQUEUE_DEFINE_THREAD(thread); 819 820 struct taskqueue * 821 taskqueue_create_fast(const char *name, int mflags, 822 taskqueue_enqueue_fn enqueue, void *context) 823 { 824 return _taskqueue_create(name, mflags, enqueue, context, 825 MTX_SPIN, "fast_taskqueue"); 826 } 827 828 static void *taskqueue_fast_ih; 829 830 static void 831 taskqueue_fast_enqueue(void *context) 832 { 833 swi_sched(taskqueue_fast_ih, 0); 834 } 835 836 static void 837 taskqueue_fast_run(void *dummy) 838 { 839 taskqueue_run(taskqueue_fast); 840 } 841 842 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 843 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, 844 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 845 846 int 847 taskqueue_member(struct taskqueue *queue, struct thread *td) 848 { 849 int i, j, ret = 0; 850 851 for (i = 0, j = 0; ; i++) { 852 if (queue->tq_threads[i] == NULL) 853 continue; 854 if (queue->tq_threads[i] == td) { 855 ret = 1; 856 break; 857 } 858 if (++j >= queue->tq_tcount) 859 break; 860 } 861 return (ret); 862 } 863