1 /* 2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. 3 * Copyright (C) 2007 The Regents of the University of California. 4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>. 6 * UCRL-CODE-235197 7 * 8 * This file is part of the SPL, Solaris Porting Layer. 9 * 10 * The SPL is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or (at your 13 * option) any later version. 14 * 15 * The SPL is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with the SPL. If not, see <http://www.gnu.org/licenses/>. 22 * 23 * Solaris Porting Layer (SPL) Task Queue Implementation. 24 */ 25 26 #include <sys/timer.h> 27 #include <sys/taskq.h> 28 #include <sys/kmem.h> 29 #include <sys/tsd.h> 30 #include <sys/trace_spl.h> 31 #ifdef HAVE_CPU_HOTPLUG 32 #include <linux/cpuhotplug.h> 33 #endif 34 35 static int spl_taskq_thread_bind = 0; 36 module_param(spl_taskq_thread_bind, int, 0644); 37 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); 38 39 40 static int spl_taskq_thread_dynamic = 1; 41 module_param(spl_taskq_thread_dynamic, int, 0444); 42 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads"); 43 44 static int spl_taskq_thread_priority = 1; 45 module_param(spl_taskq_thread_priority, int, 0644); 46 MODULE_PARM_DESC(spl_taskq_thread_priority, 47 "Allow non-default priority for taskq threads"); 48 49 static uint_t spl_taskq_thread_sequential = 4; 50 /* BEGIN CSTYLED */ 51 module_param(spl_taskq_thread_sequential, uint, 0644); 52 /* END CSTYLED */ 53 MODULE_PARM_DESC(spl_taskq_thread_sequential, 54 "Create new taskq threads after N sequential tasks"); 55 56 /* 57 * Global system-wide dynamic task queue available for all consumers. This 58 * taskq is not intended for long-running tasks; instead, a dedicated taskq 59 * should be created. 60 */ 61 taskq_t *system_taskq; 62 EXPORT_SYMBOL(system_taskq); 63 /* Global dynamic task queue for long delay */ 64 taskq_t *system_delay_taskq; 65 EXPORT_SYMBOL(system_delay_taskq); 66 67 /* Private dedicated taskq for creating new taskq threads on demand. */ 68 static taskq_t *dynamic_taskq; 69 static taskq_thread_t *taskq_thread_create(taskq_t *); 70 71 #ifdef HAVE_CPU_HOTPLUG 72 /* Multi-callback id for cpu hotplugging. */ 73 static int spl_taskq_cpuhp_state; 74 #endif 75 76 /* List of all taskqs */ 77 LIST_HEAD(tq_list); 78 struct rw_semaphore tq_list_sem; 79 static uint_t taskq_tsd; 80 81 static int 82 task_km_flags(uint_t flags) 83 { 84 if (flags & TQ_NOSLEEP) 85 return (KM_NOSLEEP); 86 87 if (flags & TQ_PUSHPAGE) 88 return (KM_PUSHPAGE); 89 90 return (KM_SLEEP); 91 } 92 93 /* 94 * taskq_find_by_name - Find the largest instance number of a named taskq. 95 */ 96 static int 97 taskq_find_by_name(const char *name) 98 { 99 struct list_head *tql = NULL; 100 taskq_t *tq; 101 102 list_for_each_prev(tql, &tq_list) { 103 tq = list_entry(tql, taskq_t, tq_taskqs); 104 if (strcmp(name, tq->tq_name) == 0) 105 return (tq->tq_instance); 106 } 107 return (-1); 108 } 109 110 /* 111 * NOTE: Must be called with tq->tq_lock held, returns a list_t which 112 * is not attached to the free, work, or pending taskq lists. 113 */ 114 static taskq_ent_t * 115 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags) 116 { 117 taskq_ent_t *t; 118 int count = 0; 119 120 ASSERT(tq); 121 retry: 122 /* Acquire taskq_ent_t's from free list if available */ 123 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) { 124 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 125 126 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 127 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL)); 128 ASSERT(!timer_pending(&t->tqent_timer)); 129 130 list_del_init(&t->tqent_list); 131 return (t); 132 } 133 134 /* Free list is empty and memory allocations are prohibited */ 135 if (flags & TQ_NOALLOC) 136 return (NULL); 137 138 /* Hit maximum taskq_ent_t pool size */ 139 if (tq->tq_nalloc >= tq->tq_maxalloc) { 140 if (flags & TQ_NOSLEEP) 141 return (NULL); 142 143 /* 144 * Sleep periodically polling the free list for an available 145 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed 146 * but we cannot block forever waiting for an taskq_ent_t to 147 * show up in the free list, otherwise a deadlock can happen. 148 * 149 * Therefore, we need to allocate a new task even if the number 150 * of allocated tasks is above tq->tq_maxalloc, but we still 151 * end up delaying the task allocation by one second, thereby 152 * throttling the task dispatch rate. 153 */ 154 spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 155 schedule_timeout(HZ / 100); 156 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, 157 tq->tq_lock_class); 158 if (count < 100) { 159 count++; 160 goto retry; 161 } 162 } 163 164 spin_unlock_irqrestore(&tq->tq_lock, *irqflags); 165 t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags)); 166 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class); 167 168 if (t) { 169 taskq_init_ent(t); 170 tq->tq_nalloc++; 171 } 172 173 return (t); 174 } 175 176 /* 177 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t 178 * to already be removed from the free, work, or pending taskq lists. 179 */ 180 static void 181 task_free(taskq_t *tq, taskq_ent_t *t) 182 { 183 ASSERT(tq); 184 ASSERT(t); 185 ASSERT(list_empty(&t->tqent_list)); 186 ASSERT(!timer_pending(&t->tqent_timer)); 187 188 kmem_free(t, sizeof (taskq_ent_t)); 189 tq->tq_nalloc--; 190 } 191 192 /* 193 * NOTE: Must be called with tq->tq_lock held, either destroys the 194 * taskq_ent_t if too many exist or moves it to the free list for later use. 195 */ 196 static void 197 task_done(taskq_t *tq, taskq_ent_t *t) 198 { 199 ASSERT(tq); 200 ASSERT(t); 201 202 /* Wake tasks blocked in taskq_wait_id() */ 203 wake_up_all(&t->tqent_waitq); 204 205 list_del_init(&t->tqent_list); 206 207 if (tq->tq_nalloc <= tq->tq_minalloc) { 208 t->tqent_id = TASKQID_INVALID; 209 t->tqent_func = NULL; 210 t->tqent_arg = NULL; 211 t->tqent_flags = 0; 212 213 list_add_tail(&t->tqent_list, &tq->tq_free_list); 214 } else { 215 task_free(tq, t); 216 } 217 } 218 219 /* 220 * When a delayed task timer expires remove it from the delay list and 221 * add it to the priority list in order for immediate processing. 222 */ 223 static void 224 task_expire_impl(taskq_ent_t *t) 225 { 226 taskq_ent_t *w; 227 taskq_t *tq = t->tqent_taskq; 228 struct list_head *l = NULL; 229 unsigned long flags; 230 231 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 232 233 if (t->tqent_flags & TQENT_FLAG_CANCEL) { 234 ASSERT(list_empty(&t->tqent_list)); 235 spin_unlock_irqrestore(&tq->tq_lock, flags); 236 return; 237 } 238 239 t->tqent_birth = jiffies; 240 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 241 242 /* 243 * The priority list must be maintained in strict task id order 244 * from lowest to highest for lowest_id to be easily calculable. 245 */ 246 list_del(&t->tqent_list); 247 list_for_each_prev(l, &tq->tq_prio_list) { 248 w = list_entry(l, taskq_ent_t, tqent_list); 249 if (w->tqent_id < t->tqent_id) { 250 list_add(&t->tqent_list, l); 251 break; 252 } 253 } 254 if (l == &tq->tq_prio_list) 255 list_add(&t->tqent_list, &tq->tq_prio_list); 256 257 spin_unlock_irqrestore(&tq->tq_lock, flags); 258 259 wake_up(&tq->tq_work_waitq); 260 } 261 262 static void 263 task_expire(spl_timer_list_t tl) 264 { 265 struct timer_list *tmr = (struct timer_list *)tl; 266 taskq_ent_t *t = from_timer(t, tmr, tqent_timer); 267 task_expire_impl(t); 268 } 269 270 /* 271 * Returns the lowest incomplete taskqid_t. The taskqid_t may 272 * be queued on the pending list, on the priority list, on the 273 * delay list, or on the work list currently being handled, but 274 * it is not 100% complete yet. 275 */ 276 static taskqid_t 277 taskq_lowest_id(taskq_t *tq) 278 { 279 taskqid_t lowest_id = tq->tq_next_id; 280 taskq_ent_t *t; 281 taskq_thread_t *tqt; 282 283 if (!list_empty(&tq->tq_pend_list)) { 284 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list); 285 lowest_id = MIN(lowest_id, t->tqent_id); 286 } 287 288 if (!list_empty(&tq->tq_prio_list)) { 289 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list); 290 lowest_id = MIN(lowest_id, t->tqent_id); 291 } 292 293 if (!list_empty(&tq->tq_delay_list)) { 294 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list); 295 lowest_id = MIN(lowest_id, t->tqent_id); 296 } 297 298 if (!list_empty(&tq->tq_active_list)) { 299 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t, 300 tqt_active_list); 301 ASSERT(tqt->tqt_id != TASKQID_INVALID); 302 lowest_id = MIN(lowest_id, tqt->tqt_id); 303 } 304 305 return (lowest_id); 306 } 307 308 /* 309 * Insert a task into a list keeping the list sorted by increasing taskqid. 310 */ 311 static void 312 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt) 313 { 314 taskq_thread_t *w; 315 struct list_head *l = NULL; 316 317 ASSERT(tq); 318 ASSERT(tqt); 319 320 list_for_each_prev(l, &tq->tq_active_list) { 321 w = list_entry(l, taskq_thread_t, tqt_active_list); 322 if (w->tqt_id < tqt->tqt_id) { 323 list_add(&tqt->tqt_active_list, l); 324 break; 325 } 326 } 327 if (l == &tq->tq_active_list) 328 list_add(&tqt->tqt_active_list, &tq->tq_active_list); 329 } 330 331 /* 332 * Find and return a task from the given list if it exists. The list 333 * must be in lowest to highest task id order. 334 */ 335 static taskq_ent_t * 336 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id) 337 { 338 struct list_head *l = NULL; 339 taskq_ent_t *t; 340 341 list_for_each(l, lh) { 342 t = list_entry(l, taskq_ent_t, tqent_list); 343 344 if (t->tqent_id == id) 345 return (t); 346 347 if (t->tqent_id > id) 348 break; 349 } 350 351 return (NULL); 352 } 353 354 /* 355 * Find an already dispatched task given the task id regardless of what 356 * state it is in. If a task is still pending it will be returned. 357 * If a task is executing, then -EBUSY will be returned instead. 358 * If the task has already been run then NULL is returned. 359 */ 360 static taskq_ent_t * 361 taskq_find(taskq_t *tq, taskqid_t id) 362 { 363 taskq_thread_t *tqt; 364 struct list_head *l = NULL; 365 taskq_ent_t *t; 366 367 t = taskq_find_list(tq, &tq->tq_delay_list, id); 368 if (t) 369 return (t); 370 371 t = taskq_find_list(tq, &tq->tq_prio_list, id); 372 if (t) 373 return (t); 374 375 t = taskq_find_list(tq, &tq->tq_pend_list, id); 376 if (t) 377 return (t); 378 379 list_for_each(l, &tq->tq_active_list) { 380 tqt = list_entry(l, taskq_thread_t, tqt_active_list); 381 if (tqt->tqt_id == id) { 382 /* 383 * Instead of returning tqt_task, we just return a non 384 * NULL value to prevent misuse, since tqt_task only 385 * has two valid fields. 386 */ 387 return (ERR_PTR(-EBUSY)); 388 } 389 } 390 391 return (NULL); 392 } 393 394 /* 395 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and 396 * taskq_wait() functions below. 397 * 398 * Taskq waiting is accomplished by tracking the lowest outstanding task 399 * id and the next available task id. As tasks are dispatched they are 400 * added to the tail of the pending, priority, or delay lists. As worker 401 * threads become available the tasks are removed from the heads of these 402 * lists and linked to the worker threads. This ensures the lists are 403 * kept sorted by lowest to highest task id. 404 * 405 * Therefore the lowest outstanding task id can be quickly determined by 406 * checking the head item from all of these lists. This value is stored 407 * with the taskq as the lowest id. It only needs to be recalculated when 408 * either the task with the current lowest id completes or is canceled. 409 * 410 * By blocking until the lowest task id exceeds the passed task id the 411 * taskq_wait_outstanding() function can be easily implemented. Similarly, 412 * by blocking until the lowest task id matches the next task id taskq_wait() 413 * can be implemented. 414 * 415 * Callers should be aware that when there are multiple worked threads it 416 * is possible for larger task ids to complete before smaller ones. Also 417 * when the taskq contains delay tasks with small task ids callers may 418 * block for a considerable length of time waiting for them to expire and 419 * execute. 420 */ 421 static int 422 taskq_wait_id_check(taskq_t *tq, taskqid_t id) 423 { 424 int rc; 425 unsigned long flags; 426 427 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 428 rc = (taskq_find(tq, id) == NULL); 429 spin_unlock_irqrestore(&tq->tq_lock, flags); 430 431 return (rc); 432 } 433 434 /* 435 * The taskq_wait_id() function blocks until the passed task id completes. 436 * This does not guarantee that all lower task ids have completed. 437 */ 438 void 439 taskq_wait_id(taskq_t *tq, taskqid_t id) 440 { 441 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id)); 442 } 443 EXPORT_SYMBOL(taskq_wait_id); 444 445 static int 446 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id) 447 { 448 int rc; 449 unsigned long flags; 450 451 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 452 rc = (id < tq->tq_lowest_id); 453 spin_unlock_irqrestore(&tq->tq_lock, flags); 454 455 return (rc); 456 } 457 458 /* 459 * The taskq_wait_outstanding() function will block until all tasks with a 460 * lower taskqid than the passed 'id' have been completed. Note that all 461 * task id's are assigned monotonically at dispatch time. Zero may be 462 * passed for the id to indicate all tasks dispatch up to this point, 463 * but not after, should be waited for. 464 */ 465 void 466 taskq_wait_outstanding(taskq_t *tq, taskqid_t id) 467 { 468 id = id ? id : tq->tq_next_id - 1; 469 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id)); 470 } 471 EXPORT_SYMBOL(taskq_wait_outstanding); 472 473 static int 474 taskq_wait_check(taskq_t *tq) 475 { 476 int rc; 477 unsigned long flags; 478 479 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 480 rc = (tq->tq_lowest_id == tq->tq_next_id); 481 spin_unlock_irqrestore(&tq->tq_lock, flags); 482 483 return (rc); 484 } 485 486 /* 487 * The taskq_wait() function will block until the taskq is empty. 488 * This means that if a taskq re-dispatches work to itself taskq_wait() 489 * callers will block indefinitely. 490 */ 491 void 492 taskq_wait(taskq_t *tq) 493 { 494 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq)); 495 } 496 EXPORT_SYMBOL(taskq_wait); 497 498 int 499 taskq_member(taskq_t *tq, kthread_t *t) 500 { 501 return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t)); 502 } 503 EXPORT_SYMBOL(taskq_member); 504 505 taskq_t * 506 taskq_of_curthread(void) 507 { 508 return (tsd_get(taskq_tsd)); 509 } 510 EXPORT_SYMBOL(taskq_of_curthread); 511 512 /* 513 * Cancel an already dispatched task given the task id. Still pending tasks 514 * will be immediately canceled, and if the task is active the function will 515 * block until it completes. Preallocated tasks which are canceled must be 516 * freed by the caller. 517 */ 518 int 519 taskq_cancel_id(taskq_t *tq, taskqid_t id) 520 { 521 taskq_ent_t *t; 522 int rc = ENOENT; 523 unsigned long flags; 524 525 ASSERT(tq); 526 527 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 528 t = taskq_find(tq, id); 529 if (t && t != ERR_PTR(-EBUSY)) { 530 list_del_init(&t->tqent_list); 531 t->tqent_flags |= TQENT_FLAG_CANCEL; 532 533 /* 534 * When canceling the lowest outstanding task id we 535 * must recalculate the new lowest outstanding id. 536 */ 537 if (tq->tq_lowest_id == t->tqent_id) { 538 tq->tq_lowest_id = taskq_lowest_id(tq); 539 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id); 540 } 541 542 /* 543 * The task_expire() function takes the tq->tq_lock so drop 544 * drop the lock before synchronously cancelling the timer. 545 */ 546 if (timer_pending(&t->tqent_timer)) { 547 spin_unlock_irqrestore(&tq->tq_lock, flags); 548 del_timer_sync(&t->tqent_timer); 549 spin_lock_irqsave_nested(&tq->tq_lock, flags, 550 tq->tq_lock_class); 551 } 552 553 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC)) 554 task_done(tq, t); 555 556 rc = 0; 557 } 558 spin_unlock_irqrestore(&tq->tq_lock, flags); 559 560 if (t == ERR_PTR(-EBUSY)) { 561 taskq_wait_id(tq, id); 562 rc = EBUSY; 563 } 564 565 return (rc); 566 } 567 EXPORT_SYMBOL(taskq_cancel_id); 568 569 static int taskq_thread_spawn(taskq_t *tq); 570 571 taskqid_t 572 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) 573 { 574 taskq_ent_t *t; 575 taskqid_t rc = TASKQID_INVALID; 576 unsigned long irqflags; 577 578 ASSERT(tq); 579 ASSERT(func); 580 581 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 582 583 /* Taskq being destroyed and all tasks drained */ 584 if (!(tq->tq_flags & TASKQ_ACTIVE)) 585 goto out; 586 587 /* Do not queue the task unless there is idle thread for it */ 588 ASSERT(tq->tq_nactive <= tq->tq_nthreads); 589 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 590 /* Dynamic taskq may be able to spawn another thread */ 591 if (!(tq->tq_flags & TASKQ_DYNAMIC) || 592 taskq_thread_spawn(tq) == 0) 593 goto out; 594 } 595 596 if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 597 goto out; 598 599 spin_lock(&t->tqent_lock); 600 601 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */ 602 if (flags & TQ_NOQUEUE) 603 list_add(&t->tqent_list, &tq->tq_prio_list); 604 /* Queue to the priority list instead of the pending list */ 605 else if (flags & TQ_FRONT) 606 list_add_tail(&t->tqent_list, &tq->tq_prio_list); 607 else 608 list_add_tail(&t->tqent_list, &tq->tq_pend_list); 609 610 t->tqent_id = rc = tq->tq_next_id; 611 tq->tq_next_id++; 612 t->tqent_func = func; 613 t->tqent_arg = arg; 614 t->tqent_taskq = tq; 615 t->tqent_timer.function = NULL; 616 t->tqent_timer.expires = 0; 617 618 t->tqent_birth = jiffies; 619 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 620 621 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 622 623 spin_unlock(&t->tqent_lock); 624 625 wake_up(&tq->tq_work_waitq); 626 out: 627 /* Spawn additional taskq threads if required. */ 628 if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads) 629 (void) taskq_thread_spawn(tq); 630 631 spin_unlock_irqrestore(&tq->tq_lock, irqflags); 632 return (rc); 633 } 634 EXPORT_SYMBOL(taskq_dispatch); 635 636 taskqid_t 637 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, 638 uint_t flags, clock_t expire_time) 639 { 640 taskqid_t rc = TASKQID_INVALID; 641 taskq_ent_t *t; 642 unsigned long irqflags; 643 644 ASSERT(tq); 645 ASSERT(func); 646 647 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class); 648 649 /* Taskq being destroyed and all tasks drained */ 650 if (!(tq->tq_flags & TASKQ_ACTIVE)) 651 goto out; 652 653 if ((t = task_alloc(tq, flags, &irqflags)) == NULL) 654 goto out; 655 656 spin_lock(&t->tqent_lock); 657 658 /* Queue to the delay list for subsequent execution */ 659 list_add_tail(&t->tqent_list, &tq->tq_delay_list); 660 661 t->tqent_id = rc = tq->tq_next_id; 662 tq->tq_next_id++; 663 t->tqent_func = func; 664 t->tqent_arg = arg; 665 t->tqent_taskq = tq; 666 t->tqent_timer.function = task_expire; 667 t->tqent_timer.expires = (unsigned long)expire_time; 668 add_timer(&t->tqent_timer); 669 670 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 671 672 spin_unlock(&t->tqent_lock); 673 out: 674 /* Spawn additional taskq threads if required. */ 675 if (tq->tq_nactive == tq->tq_nthreads) 676 (void) taskq_thread_spawn(tq); 677 spin_unlock_irqrestore(&tq->tq_lock, irqflags); 678 return (rc); 679 } 680 EXPORT_SYMBOL(taskq_dispatch_delay); 681 682 void 683 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, 684 taskq_ent_t *t) 685 { 686 unsigned long irqflags; 687 ASSERT(tq); 688 ASSERT(func); 689 690 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 691 tq->tq_lock_class); 692 693 /* Taskq being destroyed and all tasks drained */ 694 if (!(tq->tq_flags & TASKQ_ACTIVE)) { 695 t->tqent_id = TASKQID_INVALID; 696 goto out; 697 } 698 699 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) { 700 /* Dynamic taskq may be able to spawn another thread */ 701 if (!(tq->tq_flags & TASKQ_DYNAMIC) || 702 taskq_thread_spawn(tq) == 0) 703 goto out2; 704 flags |= TQ_FRONT; 705 } 706 707 spin_lock(&t->tqent_lock); 708 709 /* 710 * Make sure the entry is not on some other taskq; it is important to 711 * ASSERT() under lock 712 */ 713 ASSERT(taskq_empty_ent(t)); 714 715 /* 716 * Mark it as a prealloc'd task. This is important 717 * to ensure that we don't free it later. 718 */ 719 t->tqent_flags |= TQENT_FLAG_PREALLOC; 720 721 /* Queue to the priority list instead of the pending list */ 722 if (flags & TQ_FRONT) 723 list_add_tail(&t->tqent_list, &tq->tq_prio_list); 724 else 725 list_add_tail(&t->tqent_list, &tq->tq_pend_list); 726 727 t->tqent_id = tq->tq_next_id; 728 tq->tq_next_id++; 729 t->tqent_func = func; 730 t->tqent_arg = arg; 731 t->tqent_taskq = tq; 732 733 t->tqent_birth = jiffies; 734 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t); 735 736 spin_unlock(&t->tqent_lock); 737 738 wake_up(&tq->tq_work_waitq); 739 out: 740 /* Spawn additional taskq threads if required. */ 741 if (tq->tq_nactive == tq->tq_nthreads) 742 (void) taskq_thread_spawn(tq); 743 out2: 744 spin_unlock_irqrestore(&tq->tq_lock, irqflags); 745 } 746 EXPORT_SYMBOL(taskq_dispatch_ent); 747 748 int 749 taskq_empty_ent(taskq_ent_t *t) 750 { 751 return (list_empty(&t->tqent_list)); 752 } 753 EXPORT_SYMBOL(taskq_empty_ent); 754 755 void 756 taskq_init_ent(taskq_ent_t *t) 757 { 758 spin_lock_init(&t->tqent_lock); 759 init_waitqueue_head(&t->tqent_waitq); 760 timer_setup(&t->tqent_timer, NULL, 0); 761 INIT_LIST_HEAD(&t->tqent_list); 762 t->tqent_id = 0; 763 t->tqent_func = NULL; 764 t->tqent_arg = NULL; 765 t->tqent_flags = 0; 766 t->tqent_taskq = NULL; 767 } 768 EXPORT_SYMBOL(taskq_init_ent); 769 770 /* 771 * Return the next pending task, preference is given to tasks on the 772 * priority list which were dispatched with TQ_FRONT. 773 */ 774 static taskq_ent_t * 775 taskq_next_ent(taskq_t *tq) 776 { 777 struct list_head *list; 778 779 if (!list_empty(&tq->tq_prio_list)) 780 list = &tq->tq_prio_list; 781 else if (!list_empty(&tq->tq_pend_list)) 782 list = &tq->tq_pend_list; 783 else 784 return (NULL); 785 786 return (list_entry(list->next, taskq_ent_t, tqent_list)); 787 } 788 789 /* 790 * Spawns a new thread for the specified taskq. 791 */ 792 static void 793 taskq_thread_spawn_task(void *arg) 794 { 795 taskq_t *tq = (taskq_t *)arg; 796 unsigned long flags; 797 798 if (taskq_thread_create(tq) == NULL) { 799 /* restore spawning count if failed */ 800 spin_lock_irqsave_nested(&tq->tq_lock, flags, 801 tq->tq_lock_class); 802 tq->tq_nspawn--; 803 spin_unlock_irqrestore(&tq->tq_lock, flags); 804 } 805 } 806 807 /* 808 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current 809 * number of threads is insufficient to handle the pending tasks. These 810 * new threads must be created by the dedicated dynamic_taskq to avoid 811 * deadlocks between thread creation and memory reclaim. The system_taskq 812 * which is also a dynamic taskq cannot be safely used for this. 813 */ 814 static int 815 taskq_thread_spawn(taskq_t *tq) 816 { 817 int spawning = 0; 818 819 if (!(tq->tq_flags & TASKQ_DYNAMIC)) 820 return (0); 821 822 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && 823 (tq->tq_flags & TASKQ_ACTIVE)) { 824 spawning = (++tq->tq_nspawn); 825 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, 826 tq, TQ_NOSLEEP); 827 } 828 829 return (spawning); 830 } 831 832 /* 833 * Threads in a dynamic taskq should only exit once it has been completely 834 * drained and no other threads are actively servicing tasks. This prevents 835 * threads from being created and destroyed more than is required. 836 * 837 * The first thread is the thread list is treated as the primary thread. 838 * There is nothing special about the primary thread but in order to avoid 839 * all the taskq pids from changing we opt to make it long running. 840 */ 841 static int 842 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt) 843 { 844 if (!(tq->tq_flags & TASKQ_DYNAMIC)) 845 return (0); 846 847 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t, 848 tqt_thread_list) == tqt) 849 return (0); 850 851 return 852 ((tq->tq_nspawn == 0) && /* No threads are being spawned */ 853 (tq->tq_nactive == 0) && /* No threads are handling tasks */ 854 (tq->tq_nthreads > 1) && /* More than 1 thread is running */ 855 (!taskq_next_ent(tq)) && /* There are no pending tasks */ 856 (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */ 857 } 858 859 static int 860 taskq_thread(void *args) 861 { 862 DECLARE_WAITQUEUE(wait, current); 863 sigset_t blocked; 864 taskq_thread_t *tqt = args; 865 taskq_t *tq; 866 taskq_ent_t *t; 867 int seq_tasks = 0; 868 unsigned long flags; 869 taskq_ent_t dup_task = {}; 870 871 ASSERT(tqt); 872 ASSERT(tqt->tqt_tq); 873 tq = tqt->tqt_tq; 874 current->flags |= PF_NOFREEZE; 875 876 (void) spl_fstrans_mark(); 877 878 sigfillset(&blocked); 879 sigprocmask(SIG_BLOCK, &blocked, NULL); 880 flush_signals(current); 881 882 tsd_set(taskq_tsd, tq); 883 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 884 /* 885 * If we are dynamically spawned, decrease spawning count. Note that 886 * we could be created during taskq_create, in which case we shouldn't 887 * do the decrement. But it's fine because taskq_create will reset 888 * tq_nspawn later. 889 */ 890 if (tq->tq_flags & TASKQ_DYNAMIC) 891 tq->tq_nspawn--; 892 893 /* Immediately exit if more threads than allowed were created. */ 894 if (tq->tq_nthreads >= tq->tq_maxthreads) 895 goto error; 896 897 tq->tq_nthreads++; 898 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list); 899 wake_up(&tq->tq_wait_waitq); 900 set_current_state(TASK_INTERRUPTIBLE); 901 902 while (!kthread_should_stop()) { 903 904 if (list_empty(&tq->tq_pend_list) && 905 list_empty(&tq->tq_prio_list)) { 906 907 if (taskq_thread_should_stop(tq, tqt)) { 908 wake_up_all(&tq->tq_wait_waitq); 909 break; 910 } 911 912 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait); 913 spin_unlock_irqrestore(&tq->tq_lock, flags); 914 915 schedule(); 916 seq_tasks = 0; 917 918 spin_lock_irqsave_nested(&tq->tq_lock, flags, 919 tq->tq_lock_class); 920 remove_wait_queue(&tq->tq_work_waitq, &wait); 921 } else { 922 __set_current_state(TASK_RUNNING); 923 } 924 925 if ((t = taskq_next_ent(tq)) != NULL) { 926 list_del_init(&t->tqent_list); 927 928 /* 929 * A TQENT_FLAG_PREALLOC task may be reused or freed 930 * during the task function call. Store tqent_id and 931 * tqent_flags here. 932 * 933 * Also use an on stack taskq_ent_t for tqt_task 934 * assignment in this case; we want to make sure 935 * to duplicate all fields, so the values are 936 * correct when it's accessed via DTRACE_PROBE*. 937 */ 938 tqt->tqt_id = t->tqent_id; 939 tqt->tqt_flags = t->tqent_flags; 940 941 if (t->tqent_flags & TQENT_FLAG_PREALLOC) { 942 dup_task = *t; 943 t = &dup_task; 944 } 945 tqt->tqt_task = t; 946 947 taskq_insert_in_order(tq, tqt); 948 tq->tq_nactive++; 949 spin_unlock_irqrestore(&tq->tq_lock, flags); 950 951 DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t); 952 953 /* Perform the requested task */ 954 t->tqent_func(t->tqent_arg); 955 956 DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t); 957 958 spin_lock_irqsave_nested(&tq->tq_lock, flags, 959 tq->tq_lock_class); 960 tq->tq_nactive--; 961 list_del_init(&tqt->tqt_active_list); 962 tqt->tqt_task = NULL; 963 964 /* For prealloc'd tasks, we don't free anything. */ 965 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC)) 966 task_done(tq, t); 967 968 /* 969 * When the current lowest outstanding taskqid is 970 * done calculate the new lowest outstanding id 971 */ 972 if (tq->tq_lowest_id == tqt->tqt_id) { 973 tq->tq_lowest_id = taskq_lowest_id(tq); 974 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id); 975 } 976 977 /* Spawn additional taskq threads if required. */ 978 if ((++seq_tasks) > spl_taskq_thread_sequential && 979 taskq_thread_spawn(tq)) 980 seq_tasks = 0; 981 982 tqt->tqt_id = TASKQID_INVALID; 983 tqt->tqt_flags = 0; 984 wake_up_all(&tq->tq_wait_waitq); 985 } else { 986 if (taskq_thread_should_stop(tq, tqt)) 987 break; 988 } 989 990 set_current_state(TASK_INTERRUPTIBLE); 991 992 } 993 994 __set_current_state(TASK_RUNNING); 995 tq->tq_nthreads--; 996 list_del_init(&tqt->tqt_thread_list); 997 error: 998 kmem_free(tqt, sizeof (taskq_thread_t)); 999 spin_unlock_irqrestore(&tq->tq_lock, flags); 1000 1001 tsd_set(taskq_tsd, NULL); 1002 thread_exit(); 1003 1004 return (0); 1005 } 1006 1007 static taskq_thread_t * 1008 taskq_thread_create(taskq_t *tq) 1009 { 1010 static int last_used_cpu = 0; 1011 taskq_thread_t *tqt; 1012 1013 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE); 1014 INIT_LIST_HEAD(&tqt->tqt_thread_list); 1015 INIT_LIST_HEAD(&tqt->tqt_active_list); 1016 tqt->tqt_tq = tq; 1017 tqt->tqt_id = TASKQID_INVALID; 1018 1019 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt, 1020 "%s", tq->tq_name); 1021 if (tqt->tqt_thread == NULL) { 1022 kmem_free(tqt, sizeof (taskq_thread_t)); 1023 return (NULL); 1024 } 1025 1026 if (spl_taskq_thread_bind) { 1027 last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); 1028 kthread_bind(tqt->tqt_thread, last_used_cpu); 1029 } 1030 1031 if (spl_taskq_thread_priority) 1032 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri)); 1033 1034 wake_up_process(tqt->tqt_thread); 1035 1036 return (tqt); 1037 } 1038 1039 taskq_t * 1040 taskq_create(const char *name, int threads_arg, pri_t pri, 1041 int minalloc, int maxalloc, uint_t flags) 1042 { 1043 taskq_t *tq; 1044 taskq_thread_t *tqt; 1045 int count = 0, rc = 0, i; 1046 unsigned long irqflags; 1047 int nthreads = threads_arg; 1048 1049 ASSERT(name != NULL); 1050 ASSERT(minalloc >= 0); 1051 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */ 1052 1053 /* Scale the number of threads using nthreads as a percentage */ 1054 if (flags & TASKQ_THREADS_CPU_PCT) { 1055 ASSERT(nthreads <= 100); 1056 ASSERT(nthreads >= 0); 1057 nthreads = MIN(threads_arg, 100); 1058 nthreads = MAX(nthreads, 0); 1059 nthreads = MAX((num_online_cpus() * nthreads) /100, 1); 1060 } 1061 1062 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE); 1063 if (tq == NULL) 1064 return (NULL); 1065 1066 tq->tq_hp_support = B_FALSE; 1067 #ifdef HAVE_CPU_HOTPLUG 1068 if (flags & TASKQ_THREADS_CPU_PCT) { 1069 tq->tq_hp_support = B_TRUE; 1070 if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state, 1071 &tq->tq_hp_cb_node) != 0) { 1072 kmem_free(tq, sizeof (*tq)); 1073 return (NULL); 1074 } 1075 } 1076 #endif 1077 1078 spin_lock_init(&tq->tq_lock); 1079 INIT_LIST_HEAD(&tq->tq_thread_list); 1080 INIT_LIST_HEAD(&tq->tq_active_list); 1081 tq->tq_name = kmem_strdup(name); 1082 tq->tq_nactive = 0; 1083 tq->tq_nthreads = 0; 1084 tq->tq_nspawn = 0; 1085 tq->tq_maxthreads = nthreads; 1086 tq->tq_cpu_pct = threads_arg; 1087 tq->tq_pri = pri; 1088 tq->tq_minalloc = minalloc; 1089 tq->tq_maxalloc = maxalloc; 1090 tq->tq_nalloc = 0; 1091 tq->tq_flags = (flags | TASKQ_ACTIVE); 1092 tq->tq_next_id = TASKQID_INITIAL; 1093 tq->tq_lowest_id = TASKQID_INITIAL; 1094 INIT_LIST_HEAD(&tq->tq_free_list); 1095 INIT_LIST_HEAD(&tq->tq_pend_list); 1096 INIT_LIST_HEAD(&tq->tq_prio_list); 1097 INIT_LIST_HEAD(&tq->tq_delay_list); 1098 init_waitqueue_head(&tq->tq_work_waitq); 1099 init_waitqueue_head(&tq->tq_wait_waitq); 1100 tq->tq_lock_class = TQ_LOCK_GENERAL; 1101 INIT_LIST_HEAD(&tq->tq_taskqs); 1102 1103 if (flags & TASKQ_PREPOPULATE) { 1104 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, 1105 tq->tq_lock_class); 1106 1107 for (i = 0; i < minalloc; i++) 1108 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW, 1109 &irqflags)); 1110 1111 spin_unlock_irqrestore(&tq->tq_lock, irqflags); 1112 } 1113 1114 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) 1115 nthreads = 1; 1116 1117 for (i = 0; i < nthreads; i++) { 1118 tqt = taskq_thread_create(tq); 1119 if (tqt == NULL) 1120 rc = 1; 1121 else 1122 count++; 1123 } 1124 1125 /* Wait for all threads to be started before potential destroy */ 1126 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count); 1127 /* 1128 * taskq_thread might have touched nspawn, but we don't want them to 1129 * because they're not dynamically spawned. So we reset it to 0 1130 */ 1131 tq->tq_nspawn = 0; 1132 1133 if (rc) { 1134 taskq_destroy(tq); 1135 tq = NULL; 1136 } else { 1137 down_write(&tq_list_sem); 1138 tq->tq_instance = taskq_find_by_name(name) + 1; 1139 list_add_tail(&tq->tq_taskqs, &tq_list); 1140 up_write(&tq_list_sem); 1141 } 1142 1143 return (tq); 1144 } 1145 EXPORT_SYMBOL(taskq_create); 1146 1147 void 1148 taskq_destroy(taskq_t *tq) 1149 { 1150 struct task_struct *thread; 1151 taskq_thread_t *tqt; 1152 taskq_ent_t *t; 1153 unsigned long flags; 1154 1155 ASSERT(tq); 1156 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1157 tq->tq_flags &= ~TASKQ_ACTIVE; 1158 spin_unlock_irqrestore(&tq->tq_lock, flags); 1159 1160 #ifdef HAVE_CPU_HOTPLUG 1161 if (tq->tq_hp_support) { 1162 VERIFY0(cpuhp_state_remove_instance_nocalls( 1163 spl_taskq_cpuhp_state, &tq->tq_hp_cb_node)); 1164 } 1165 #endif 1166 /* 1167 * When TASKQ_ACTIVE is clear new tasks may not be added nor may 1168 * new worker threads be spawned for dynamic taskq. 1169 */ 1170 if (dynamic_taskq != NULL) 1171 taskq_wait_outstanding(dynamic_taskq, 0); 1172 1173 taskq_wait(tq); 1174 1175 /* remove taskq from global list used by the kstats */ 1176 down_write(&tq_list_sem); 1177 list_del(&tq->tq_taskqs); 1178 up_write(&tq_list_sem); 1179 1180 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1181 /* wait for spawning threads to insert themselves to the list */ 1182 while (tq->tq_nspawn) { 1183 spin_unlock_irqrestore(&tq->tq_lock, flags); 1184 schedule_timeout_interruptible(1); 1185 spin_lock_irqsave_nested(&tq->tq_lock, flags, 1186 tq->tq_lock_class); 1187 } 1188 1189 /* 1190 * Signal each thread to exit and block until it does. Each thread 1191 * is responsible for removing itself from the list and freeing its 1192 * taskq_thread_t. This allows for idle threads to opt to remove 1193 * themselves from the taskq. They can be recreated as needed. 1194 */ 1195 while (!list_empty(&tq->tq_thread_list)) { 1196 tqt = list_entry(tq->tq_thread_list.next, 1197 taskq_thread_t, tqt_thread_list); 1198 thread = tqt->tqt_thread; 1199 spin_unlock_irqrestore(&tq->tq_lock, flags); 1200 1201 kthread_stop(thread); 1202 1203 spin_lock_irqsave_nested(&tq->tq_lock, flags, 1204 tq->tq_lock_class); 1205 } 1206 1207 while (!list_empty(&tq->tq_free_list)) { 1208 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list); 1209 1210 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC)); 1211 1212 list_del_init(&t->tqent_list); 1213 task_free(tq, t); 1214 } 1215 1216 ASSERT0(tq->tq_nthreads); 1217 ASSERT0(tq->tq_nalloc); 1218 ASSERT0(tq->tq_nspawn); 1219 ASSERT(list_empty(&tq->tq_thread_list)); 1220 ASSERT(list_empty(&tq->tq_active_list)); 1221 ASSERT(list_empty(&tq->tq_free_list)); 1222 ASSERT(list_empty(&tq->tq_pend_list)); 1223 ASSERT(list_empty(&tq->tq_prio_list)); 1224 ASSERT(list_empty(&tq->tq_delay_list)); 1225 1226 spin_unlock_irqrestore(&tq->tq_lock, flags); 1227 1228 kmem_strfree(tq->tq_name); 1229 kmem_free(tq, sizeof (taskq_t)); 1230 } 1231 EXPORT_SYMBOL(taskq_destroy); 1232 1233 static unsigned int spl_taskq_kick = 0; 1234 1235 /* 1236 * 2.6.36 API Change 1237 * module_param_cb is introduced to take kernel_param_ops and 1238 * module_param_call is marked as obsolete. Also set and get operations 1239 * were changed to take a 'const struct kernel_param *'. 1240 */ 1241 static int 1242 #ifdef module_param_cb 1243 param_set_taskq_kick(const char *val, const struct kernel_param *kp) 1244 #else 1245 param_set_taskq_kick(const char *val, struct kernel_param *kp) 1246 #endif 1247 { 1248 int ret; 1249 taskq_t *tq = NULL; 1250 taskq_ent_t *t; 1251 unsigned long flags; 1252 1253 ret = param_set_uint(val, kp); 1254 if (ret < 0 || !spl_taskq_kick) 1255 return (ret); 1256 /* reset value */ 1257 spl_taskq_kick = 0; 1258 1259 down_read(&tq_list_sem); 1260 list_for_each_entry(tq, &tq_list, tq_taskqs) { 1261 spin_lock_irqsave_nested(&tq->tq_lock, flags, 1262 tq->tq_lock_class); 1263 /* Check if the first pending is older than 5 seconds */ 1264 t = taskq_next_ent(tq); 1265 if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) { 1266 (void) taskq_thread_spawn(tq); 1267 printk(KERN_INFO "spl: Kicked taskq %s/%d\n", 1268 tq->tq_name, tq->tq_instance); 1269 } 1270 spin_unlock_irqrestore(&tq->tq_lock, flags); 1271 } 1272 up_read(&tq_list_sem); 1273 return (ret); 1274 } 1275 1276 #ifdef module_param_cb 1277 static const struct kernel_param_ops param_ops_taskq_kick = { 1278 .set = param_set_taskq_kick, 1279 .get = param_get_uint, 1280 }; 1281 module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644); 1282 #else 1283 module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint, 1284 &spl_taskq_kick, 0644); 1285 #endif 1286 MODULE_PARM_DESC(spl_taskq_kick, 1287 "Write nonzero to kick stuck taskqs to spawn more threads"); 1288 1289 #ifdef HAVE_CPU_HOTPLUG 1290 /* 1291 * This callback will be called exactly once for each core that comes online, 1292 * for each dynamic taskq. We attempt to expand taskqs that have 1293 * TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every 1294 * time, to correctly determine whether or not to add a thread. 1295 */ 1296 static int 1297 spl_taskq_expand(unsigned int cpu, struct hlist_node *node) 1298 { 1299 taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 1300 unsigned long flags; 1301 int err = 0; 1302 1303 ASSERT(tq); 1304 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1305 1306 if (!(tq->tq_flags & TASKQ_ACTIVE)) { 1307 spin_unlock_irqrestore(&tq->tq_lock, flags); 1308 return (err); 1309 } 1310 1311 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 1312 int nthreads = MIN(tq->tq_cpu_pct, 100); 1313 nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1); 1314 tq->tq_maxthreads = nthreads; 1315 1316 if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 1317 tq->tq_maxthreads > tq->tq_nthreads) { 1318 spin_unlock_irqrestore(&tq->tq_lock, flags); 1319 taskq_thread_t *tqt = taskq_thread_create(tq); 1320 if (tqt == NULL) 1321 err = -1; 1322 return (err); 1323 } 1324 spin_unlock_irqrestore(&tq->tq_lock, flags); 1325 return (err); 1326 } 1327 1328 /* 1329 * While we don't support offlining CPUs, it is possible that CPUs will fail 1330 * to online successfully. We do need to be able to handle this case 1331 * gracefully. 1332 */ 1333 static int 1334 spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node) 1335 { 1336 taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node); 1337 unsigned long flags; 1338 1339 ASSERT(tq); 1340 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); 1341 1342 if (!(tq->tq_flags & TASKQ_ACTIVE)) 1343 goto out; 1344 1345 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 1346 int nthreads = MIN(tq->tq_cpu_pct, 100); 1347 nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1); 1348 tq->tq_maxthreads = nthreads; 1349 1350 if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && 1351 tq->tq_maxthreads < tq->tq_nthreads) { 1352 ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1); 1353 taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next, 1354 taskq_thread_t, tqt_thread_list); 1355 struct task_struct *thread = tqt->tqt_thread; 1356 spin_unlock_irqrestore(&tq->tq_lock, flags); 1357 1358 kthread_stop(thread); 1359 1360 return (0); 1361 } 1362 1363 out: 1364 spin_unlock_irqrestore(&tq->tq_lock, flags); 1365 return (0); 1366 } 1367 #endif 1368 1369 int 1370 spl_taskq_init(void) 1371 { 1372 init_rwsem(&tq_list_sem); 1373 tsd_create(&taskq_tsd, NULL); 1374 1375 #ifdef HAVE_CPU_HOTPLUG 1376 spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1377 "fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down); 1378 #endif 1379 1380 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64), 1381 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1382 if (system_taskq == NULL) 1383 return (-ENOMEM); 1384 1385 system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4), 1386 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC); 1387 if (system_delay_taskq == NULL) { 1388 #ifdef HAVE_CPU_HOTPLUG 1389 cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 1390 #endif 1391 taskq_destroy(system_taskq); 1392 return (-ENOMEM); 1393 } 1394 1395 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1, 1396 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE); 1397 if (dynamic_taskq == NULL) { 1398 #ifdef HAVE_CPU_HOTPLUG 1399 cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 1400 #endif 1401 taskq_destroy(system_taskq); 1402 taskq_destroy(system_delay_taskq); 1403 return (-ENOMEM); 1404 } 1405 1406 /* 1407 * This is used to annotate tq_lock, so 1408 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch 1409 * does not trigger a lockdep warning re: possible recursive locking 1410 */ 1411 dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC; 1412 1413 return (0); 1414 } 1415 1416 void 1417 spl_taskq_fini(void) 1418 { 1419 taskq_destroy(dynamic_taskq); 1420 dynamic_taskq = NULL; 1421 1422 taskq_destroy(system_delay_taskq); 1423 system_delay_taskq = NULL; 1424 1425 taskq_destroy(system_taskq); 1426 system_taskq = NULL; 1427 1428 tsd_destroy(&taskq_tsd); 1429 1430 #ifdef HAVE_CPU_HOTPLUG 1431 cpuhp_remove_multi_state(spl_taskq_cpuhp_state); 1432 spl_taskq_cpuhp_state = 0; 1433 #endif 1434 } 1435