1 /*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/interrupt.h> 34 #include <sys/kernel.h> 35 #include <sys/kthread.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mutex.h> 39 #include <sys/proc.h> 40 #include <sys/sched.h> 41 #include <sys/taskqueue.h> 42 #include <sys/unistd.h> 43 #include <machine/stdarg.h> 44 45 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 46 static void *taskqueue_giant_ih; 47 static void *taskqueue_ih; 48 49 struct taskqueue_busy { 50 struct task *tb_running; 51 TAILQ_ENTRY(taskqueue_busy) tb_link; 52 }; 53 54 struct taskqueue { 55 STAILQ_HEAD(, task) tq_queue; 56 taskqueue_enqueue_fn tq_enqueue; 57 void *tq_context; 58 TAILQ_HEAD(, taskqueue_busy) tq_active; 59 struct mtx tq_mutex; 60 struct thread **tq_threads; 61 int tq_tcount; 62 int tq_spin; 63 int tq_flags; 64 int tq_callouts; 65 }; 66 67 #define TQ_FLAGS_ACTIVE (1 << 0) 68 #define TQ_FLAGS_BLOCKED (1 << 1) 69 #define TQ_FLAGS_PENDING (1 << 2) 70 71 #define DT_CALLOUT_ARMED (1 << 0) 72 73 #define TQ_LOCK(tq) \ 74 do { \ 75 if ((tq)->tq_spin) \ 76 mtx_lock_spin(&(tq)->tq_mutex); \ 77 else \ 78 mtx_lock(&(tq)->tq_mutex); \ 79 } while (0) 80 81 #define TQ_UNLOCK(tq) \ 82 do { \ 83 if ((tq)->tq_spin) \ 84 mtx_unlock_spin(&(tq)->tq_mutex); \ 85 else \ 86 mtx_unlock(&(tq)->tq_mutex); \ 87 } while (0) 88 89 void 90 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, 91 int priority, task_fn_t func, void *context) 92 { 93 94 TASK_INIT(&timeout_task->t, priority, func, context); 95 callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0); 96 timeout_task->q = queue; 97 timeout_task->f = 0; 98 } 99 100 static __inline int 101 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 102 int t) 103 { 104 if (tq->tq_spin) 105 return (msleep_spin(p, m, wm, t)); 106 return (msleep(p, m, pri, wm, t)); 107 } 108 109 static struct taskqueue * 110 _taskqueue_create(const char *name __unused, int mflags, 111 taskqueue_enqueue_fn enqueue, void *context, 112 int mtxflags, const char *mtxname) 113 { 114 struct taskqueue *queue; 115 116 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 117 if (!queue) 118 return NULL; 119 120 STAILQ_INIT(&queue->tq_queue); 121 TAILQ_INIT(&queue->tq_active); 122 queue->tq_enqueue = enqueue; 123 queue->tq_context = context; 124 queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 125 queue->tq_flags |= TQ_FLAGS_ACTIVE; 126 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 127 128 return queue; 129 } 130 131 struct taskqueue * 132 taskqueue_create(const char *name, int mflags, 133 taskqueue_enqueue_fn enqueue, void *context) 134 { 135 return _taskqueue_create(name, mflags, enqueue, context, 136 MTX_DEF, "taskqueue"); 137 } 138 139 /* 140 * Signal a taskqueue thread to terminate. 141 */ 142 static void 143 taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 144 { 145 146 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { 147 wakeup(tq); 148 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 149 } 150 } 151 152 void 153 taskqueue_free(struct taskqueue *queue) 154 { 155 156 TQ_LOCK(queue); 157 queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 158 taskqueue_terminate(queue->tq_threads, queue); 159 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); 160 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); 161 mtx_destroy(&queue->tq_mutex); 162 free(queue->tq_threads, M_TASKQUEUE); 163 free(queue, M_TASKQUEUE); 164 } 165 166 static int 167 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) 168 { 169 struct task *ins; 170 struct task *prev; 171 172 /* 173 * Count multiple enqueues. 174 */ 175 if (task->ta_pending) { 176 task->ta_pending++; 177 return (0); 178 } 179 180 /* 181 * Optimise the case when all tasks have the same priority. 182 */ 183 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 184 if (!prev || prev->ta_priority >= task->ta_priority) { 185 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 186 } else { 187 prev = NULL; 188 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 189 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 190 if (ins->ta_priority < task->ta_priority) 191 break; 192 193 if (prev) 194 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 195 else 196 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 197 } 198 199 task->ta_pending = 1; 200 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 201 queue->tq_enqueue(queue->tq_context); 202 else 203 queue->tq_flags |= TQ_FLAGS_PENDING; 204 205 return (0); 206 } 207 int 208 taskqueue_enqueue(struct taskqueue *queue, struct task *task) 209 { 210 int res; 211 212 TQ_LOCK(queue); 213 res = taskqueue_enqueue_locked(queue, task); 214 TQ_UNLOCK(queue); 215 216 return (res); 217 } 218 219 static void 220 taskqueue_timeout_func(void *arg) 221 { 222 struct taskqueue *queue; 223 struct timeout_task *timeout_task; 224 225 timeout_task = arg; 226 queue = timeout_task->q; 227 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); 228 timeout_task->f &= ~DT_CALLOUT_ARMED; 229 queue->tq_callouts--; 230 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); 231 } 232 233 int 234 taskqueue_enqueue_timeout(struct taskqueue *queue, 235 struct timeout_task *timeout_task, int ticks) 236 { 237 int res; 238 239 TQ_LOCK(queue); 240 KASSERT(timeout_task->q == NULL || timeout_task->q == queue, 241 ("Migrated queue")); 242 KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); 243 timeout_task->q = queue; 244 res = timeout_task->t.ta_pending; 245 if (ticks == 0) { 246 taskqueue_enqueue_locked(queue, &timeout_task->t); 247 } else { 248 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 249 res++; 250 } else { 251 queue->tq_callouts++; 252 timeout_task->f |= DT_CALLOUT_ARMED; 253 } 254 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func, 255 timeout_task); 256 } 257 TQ_UNLOCK(queue); 258 return (res); 259 } 260 261 void 262 taskqueue_block(struct taskqueue *queue) 263 { 264 265 TQ_LOCK(queue); 266 queue->tq_flags |= TQ_FLAGS_BLOCKED; 267 TQ_UNLOCK(queue); 268 } 269 270 void 271 taskqueue_unblock(struct taskqueue *queue) 272 { 273 274 TQ_LOCK(queue); 275 queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 276 if (queue->tq_flags & TQ_FLAGS_PENDING) { 277 queue->tq_flags &= ~TQ_FLAGS_PENDING; 278 queue->tq_enqueue(queue->tq_context); 279 } 280 TQ_UNLOCK(queue); 281 } 282 283 static void 284 taskqueue_run_locked(struct taskqueue *queue) 285 { 286 struct taskqueue_busy tb; 287 struct task *task; 288 int pending; 289 290 mtx_assert(&queue->tq_mutex, MA_OWNED); 291 tb.tb_running = NULL; 292 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); 293 294 while (STAILQ_FIRST(&queue->tq_queue)) { 295 /* 296 * Carefully remove the first task from the queue and 297 * zero its pending count. 298 */ 299 task = STAILQ_FIRST(&queue->tq_queue); 300 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 301 pending = task->ta_pending; 302 task->ta_pending = 0; 303 tb.tb_running = task; 304 TQ_UNLOCK(queue); 305 306 task->ta_func(task->ta_context, pending); 307 308 TQ_LOCK(queue); 309 tb.tb_running = NULL; 310 wakeup(task); 311 } 312 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); 313 } 314 315 void 316 taskqueue_run(struct taskqueue *queue) 317 { 318 319 TQ_LOCK(queue); 320 taskqueue_run_locked(queue); 321 TQ_UNLOCK(queue); 322 } 323 324 static int 325 task_is_running(struct taskqueue *queue, struct task *task) 326 { 327 struct taskqueue_busy *tb; 328 329 mtx_assert(&queue->tq_mutex, MA_OWNED); 330 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { 331 if (tb->tb_running == task) 332 return (1); 333 } 334 return (0); 335 } 336 337 static int 338 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, 339 u_int *pendp) 340 { 341 342 if (task->ta_pending > 0) 343 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); 344 if (pendp != NULL) 345 *pendp = task->ta_pending; 346 task->ta_pending = 0; 347 return (task_is_running(queue, task) ? EBUSY : 0); 348 } 349 350 int 351 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) 352 { 353 u_int pending; 354 int error; 355 356 TQ_LOCK(queue); 357 pending = task->ta_pending; 358 error = taskqueue_cancel_locked(queue, task, pendp); 359 TQ_UNLOCK(queue); 360 361 return (error); 362 } 363 364 int 365 taskqueue_cancel_timeout(struct taskqueue *queue, 366 struct timeout_task *timeout_task, u_int *pendp) 367 { 368 u_int pending, pending1; 369 int error; 370 371 TQ_LOCK(queue); 372 pending = !!callout_stop(&timeout_task->c); 373 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); 374 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { 375 timeout_task->f &= ~DT_CALLOUT_ARMED; 376 queue->tq_callouts--; 377 } 378 TQ_UNLOCK(queue); 379 380 if (pendp != NULL) 381 *pendp = pending + pending1; 382 return (error); 383 } 384 385 void 386 taskqueue_drain(struct taskqueue *queue, struct task *task) 387 { 388 389 if (!queue->tq_spin) 390 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 391 392 TQ_LOCK(queue); 393 while (task->ta_pending != 0 || task_is_running(queue, task)) 394 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 395 TQ_UNLOCK(queue); 396 } 397 398 void 399 taskqueue_drain_timeout(struct taskqueue *queue, 400 struct timeout_task *timeout_task) 401 { 402 403 callout_drain(&timeout_task->c); 404 taskqueue_drain(queue, &timeout_task->t); 405 } 406 407 static void 408 taskqueue_swi_enqueue(void *context) 409 { 410 swi_sched(taskqueue_ih, 0); 411 } 412 413 static void 414 taskqueue_swi_run(void *dummy) 415 { 416 taskqueue_run(taskqueue_swi); 417 } 418 419 static void 420 taskqueue_swi_giant_enqueue(void *context) 421 { 422 swi_sched(taskqueue_giant_ih, 0); 423 } 424 425 static void 426 taskqueue_swi_giant_run(void *dummy) 427 { 428 taskqueue_run(taskqueue_swi_giant); 429 } 430 431 int 432 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 433 const char *name, ...) 434 { 435 va_list ap; 436 struct thread *td; 437 struct taskqueue *tq; 438 int i, error; 439 char ktname[MAXCOMLEN + 1]; 440 441 if (count <= 0) 442 return (EINVAL); 443 444 tq = *tqp; 445 446 va_start(ap, name); 447 vsnprintf(ktname, sizeof(ktname), name, ap); 448 va_end(ap); 449 450 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 451 M_NOWAIT | M_ZERO); 452 if (tq->tq_threads == NULL) { 453 printf("%s: no memory for %s threads\n", __func__, ktname); 454 return (ENOMEM); 455 } 456 457 for (i = 0; i < count; i++) { 458 if (count == 1) 459 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 460 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 461 else 462 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 463 &tq->tq_threads[i], RFSTOPPED, 0, 464 "%s_%d", ktname, i); 465 if (error) { 466 /* should be ok to continue, taskqueue_free will dtrt */ 467 printf("%s: kthread_add(%s): error %d", __func__, 468 ktname, error); 469 tq->tq_threads[i] = NULL; /* paranoid */ 470 } else 471 tq->tq_tcount++; 472 } 473 for (i = 0; i < count; i++) { 474 if (tq->tq_threads[i] == NULL) 475 continue; 476 td = tq->tq_threads[i]; 477 thread_lock(td); 478 sched_prio(td, pri); 479 sched_add(td, SRQ_BORING); 480 thread_unlock(td); 481 } 482 483 return (0); 484 } 485 486 void 487 taskqueue_thread_loop(void *arg) 488 { 489 struct taskqueue **tqp, *tq; 490 491 tqp = arg; 492 tq = *tqp; 493 TQ_LOCK(tq); 494 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 495 taskqueue_run_locked(tq); 496 /* 497 * Because taskqueue_run() can drop tq_mutex, we need to 498 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 499 * meantime, which means we missed a wakeup. 500 */ 501 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 502 break; 503 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 504 } 505 taskqueue_run_locked(tq); 506 507 /* rendezvous with thread that asked us to terminate */ 508 tq->tq_tcount--; 509 wakeup_one(tq->tq_threads); 510 TQ_UNLOCK(tq); 511 kthread_exit(); 512 } 513 514 void 515 taskqueue_thread_enqueue(void *context) 516 { 517 struct taskqueue **tqp, *tq; 518 519 tqp = context; 520 tq = *tqp; 521 522 mtx_assert(&tq->tq_mutex, MA_OWNED); 523 wakeup_one(tq); 524 } 525 526 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 527 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 528 INTR_MPSAFE, &taskqueue_ih)); 529 530 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 531 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 532 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 533 534 TASKQUEUE_DEFINE_THREAD(thread); 535 536 struct taskqueue * 537 taskqueue_create_fast(const char *name, int mflags, 538 taskqueue_enqueue_fn enqueue, void *context) 539 { 540 return _taskqueue_create(name, mflags, enqueue, context, 541 MTX_SPIN, "fast_taskqueue"); 542 } 543 544 /* NB: for backwards compatibility */ 545 int 546 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 547 { 548 return taskqueue_enqueue(queue, task); 549 } 550 551 static void *taskqueue_fast_ih; 552 553 static void 554 taskqueue_fast_enqueue(void *context) 555 { 556 swi_sched(taskqueue_fast_ih, 0); 557 } 558 559 static void 560 taskqueue_fast_run(void *dummy) 561 { 562 taskqueue_run(taskqueue_fast); 563 } 564 565 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 566 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 567 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 568 569 int 570 taskqueue_member(struct taskqueue *queue, struct thread *td) 571 { 572 int i, j, ret = 0; 573 574 TQ_LOCK(queue); 575 for (i = 0, j = 0; ; i++) { 576 if (queue->tq_threads[i] == NULL) 577 continue; 578 if (queue->tq_threads[i] == td) { 579 ret = 1; 580 break; 581 } 582 if (++j >= queue->tq_tcount) 583 break; 584 } 585 TQ_UNLOCK(queue); 586 return (ret); 587 } 588