1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Kernel task queues: general-purpose asynchronous task scheduling. 28 * 29 * A common problem in kernel programming is the need to schedule tasks 30 * to be performed later, by another thread. There are several reasons 31 * you may want or need to do this: 32 * 33 * (1) The task isn't time-critical, but your current code path is. 34 * 35 * (2) The task may require grabbing locks that you already hold. 36 * 37 * (3) The task may need to block (e.g. to wait for memory), but you 38 * cannot block in your current context. 39 * 40 * (4) Your code path can't complete because of some condition, but you can't 41 * sleep or fail, so you queue the task for later execution when condition 42 * disappears. 43 * 44 * (5) You just want a simple way to launch multiple tasks in parallel. 45 * 46 * Task queues provide such a facility. In its simplest form (used when 47 * performance is not a critical consideration) a task queue consists of a 48 * single list of tasks, together with one or more threads to service the 49 * list. There are some cases when this simple queue is not sufficient: 50 * 51 * (1) The task queues are very hot and there is a need to avoid data and lock 52 * contention over global resources. 53 * 54 * (2) Some tasks may depend on other tasks to complete, so they can't be put in 55 * the same list managed by the same thread. 56 * 57 * (3) Some tasks may block for a long time, and this should not block other 58 * tasks in the queue. 59 * 60 * To provide useful service in such cases we define a "dynamic task queue" 61 * which has an individual thread for each of the tasks. These threads are 62 * dynamically created as they are needed and destroyed when they are not in 63 * use. The API for managing task pools is the same as for managing task queues 64 * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that 65 * dynamic task pool behavior is desired. 66 * 67 * Dynamic task queues may also place tasks in the normal queue (called "backing 68 * queue") when task pool runs out of resources. Users of task queues may 69 * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch 70 * flags. 71 * 72 * The backing task queue is also used for scheduling internal tasks needed for 73 * dynamic task queue maintenance. 74 * 75 * INTERFACES ================================================================== 76 * 77 * taskq_t *taskq_create(name, nthreads, pri, minalloc, maxall, flags); 78 * 79 * Create a taskq with specified properties. 80 * Possible 'flags': 81 * 82 * TASKQ_DYNAMIC: Create task pool for task management. If this flag is 83 * specified, 'nthreads' specifies the maximum number of threads in 84 * the task queue. Task execution order for dynamic task queues is 85 * not predictable. 86 * 87 * If this flag is not specified (default case) a 88 * single-list task queue is created with 'nthreads' threads 89 * servicing it. Entries in this queue are managed by 90 * taskq_ent_alloc() and taskq_ent_free() which try to keep the 91 * task population between 'minalloc' and 'maxalloc', but the 92 * latter limit is only advisory for TQ_SLEEP dispatches and the 93 * former limit is only advisory for TQ_NOALLOC dispatches. If 94 * TASKQ_PREPOPULATE is set in 'flags', the taskq will be 95 * prepopulated with 'minalloc' task structures. 96 * 97 * Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be 98 * executed in the order they are scheduled if nthreads == 1. 99 * If nthreads > 1, task execution order is not predictable. 100 * 101 * TASKQ_PREPOPULATE: Prepopulate task queue with threads. 102 * Also prepopulate the task queue with 'minalloc' task structures. 103 * 104 * TASKQ_THREADS_CPU_PCT: This flag specifies that 'nthreads' should be 105 * interpreted as a percentage of the # of online CPUs on the 106 * system. The taskq subsystem will automatically adjust the 107 * number of threads in the taskq in response to CPU online 108 * and offline events, to keep the ratio. nthreads must be in 109 * the range [0,100]. 110 * 111 * The calculation used is: 112 * 113 * MAX((ncpus_online * percentage)/100, 1) 114 * 115 * This flag is not supported for DYNAMIC task queues. 116 * This flag is not compatible with TASKQ_CPR_SAFE. 117 * 118 * TASKQ_CPR_SAFE: This flag specifies that users of the task queue will 119 * use their own protocol for handling CPR issues. This flag is not 120 * supported for DYNAMIC task queues. This flag is not compatible 121 * with TASKQ_THREADS_CPU_PCT. 122 * 123 * The 'pri' field specifies the default priority for the threads that 124 * service all scheduled tasks. 125 * 126 * taskq_t *taskq_create_instance(name, instance, nthreads, pri, minalloc, 127 * maxall, flags); 128 * 129 * Like taskq_create(), but takes an instance number (or -1 to indicate 130 * no instance). 131 * 132 * taskq_t *taskq_create_proc(name, nthreads, pri, minalloc, maxall, proc, 133 * flags); 134 * 135 * Like taskq_create(), but creates the taskq threads in the specified 136 * system process. If proc != &p0, this must be called from a thread 137 * in that process. 138 * 139 * taskq_t *taskq_create_sysdc(name, nthreads, minalloc, maxall, proc, 140 * dc, flags); 141 * 142 * Like taskq_create_proc(), but the taskq threads will use the 143 * System Duty Cycle (SDC) scheduling class with a duty cycle of dc. 144 * 145 * void taskq_destroy(tap): 146 * 147 * Waits for any scheduled tasks to complete, then destroys the taskq. 148 * Caller should guarantee that no new tasks are scheduled in the closing 149 * taskq. 150 * 151 * taskqid_t taskq_dispatch(tq, func, arg, flags): 152 * 153 * Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether 154 * the caller is willing to block for memory. The function returns an 155 * opaque value which is zero iff dispatch fails. If flags is TQ_NOSLEEP 156 * or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails 157 * and returns (taskqid_t)0. 158 * 159 * ASSUMES: func != NULL. 160 * 161 * Possible flags: 162 * TQ_NOSLEEP: Do not wait for resources; may fail. 163 * 164 * TQ_NOALLOC: Do not allocate memory; may fail. May only be used with 165 * non-dynamic task queues. 166 * 167 * TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to 168 * lack of available resources and fail. If this flag is not 169 * set, and the task pool is exhausted, the task may be scheduled 170 * in the backing queue. This flag may ONLY be used with dynamic 171 * task queues. 172 * 173 * NOTE: This flag should always be used when a task queue is used 174 * for tasks that may depend on each other for completion. 175 * Enqueueing dependent tasks may create deadlocks. 176 * 177 * TQ_SLEEP: May block waiting for resources. May still fail for 178 * dynamic task queues if TQ_NOQUEUE is also specified, otherwise 179 * always succeed. 180 * 181 * TQ_FRONT: Puts the new task at the front of the queue. Be careful. 182 * 183 * NOTE: Dynamic task queues are much more likely to fail in 184 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it 185 * is important to have backup strategies handling such failures. 186 * 187 * void taskq_wait(tq): 188 * 189 * Waits for all previously scheduled tasks to complete. 190 * 191 * NOTE: It does not stop any new task dispatches. 192 * Do NOT call taskq_wait() from a task: it will cause deadlock. 193 * 194 * void taskq_suspend(tq) 195 * 196 * Suspend all task execution. Tasks already scheduled for a dynamic task 197 * queue will still be executed, but all new scheduled tasks will be 198 * suspended until taskq_resume() is called. 199 * 200 * int taskq_suspended(tq) 201 * 202 * Returns 1 if taskq is suspended and 0 otherwise. It is intended to 203 * ASSERT that the task queue is suspended. 204 * 205 * void taskq_resume(tq) 206 * 207 * Resume task queue execution. 208 * 209 * int taskq_member(tq, thread) 210 * 211 * Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The 212 * intended use is to ASSERT that a given function is called in taskq 213 * context only. 214 * 215 * system_taskq 216 * 217 * Global system-wide dynamic task queue for common uses. It may be used by 218 * any subsystem that needs to schedule tasks and does not need to manage 219 * its own task queues. It is initialized quite early during system boot. 220 * 221 * IMPLEMENTATION ============================================================== 222 * 223 * This is schematic representation of the task queue structures. 224 * 225 * taskq: 226 * +-------------+ 227 * | tq_lock | +---< taskq_ent_free() 228 * +-------------+ | 229 * |... | | tqent: tqent: 230 * +-------------+ | +------------+ +------------+ 231 * | tq_freelist |-->| tqent_next |--> ... ->| tqent_next | 232 * +-------------+ +------------+ +------------+ 233 * |... | | ... | | ... | 234 * +-------------+ +------------+ +------------+ 235 * | tq_task | | 236 * | | +-------------->taskq_ent_alloc() 237 * +--------------------------------------------------------------------------+ 238 * | | | tqent tqent | 239 * | +---------------------+ +--> +------------+ +--> +------------+ | 240 * | | ... | | | func, arg | | | func, arg | | 241 * +>+---------------------+ <---|-+ +------------+ <---|-+ +------------+ | 242 * | tq_taskq.tqent_next | ----+ | | tqent_next | --->+ | | tqent_next |--+ 243 * +---------------------+ | +------------+ ^ | +------------+ 244 * +-| tq_task.tqent_prev | +--| tqent_prev | | +--| tqent_prev | ^ 245 * | +---------------------+ +------------+ | +------------+ | 246 * | |... | | ... | | | ... | | 247 * | +---------------------+ +------------+ | +------------+ | 248 * | ^ | | 249 * | | | | 250 * +--------------------------------------+--------------+ TQ_APPEND() -+ 251 * | | | 252 * |... | taskq_thread()-----+ 253 * +-------------+ 254 * | tq_buckets |--+-------> [ NULL ] (for regular task queues) 255 * +-------------+ | 256 * | DYNAMIC TASK QUEUES: 257 * | 258 * +-> taskq_bucket[nCPU] taskq_bucket_dispatch() 259 * +-------------------+ ^ 260 * +--->| tqbucket_lock | | 261 * | +-------------------+ +--------+ +--------+ 262 * | | tqbucket_freelist |-->| tqent |-->...| tqent | ^ 263 * | +-------------------+<--+--------+<--...+--------+ | 264 * | | ... | | thread | | thread | | 265 * | +-------------------+ +--------+ +--------+ | 266 * | +-------------------+ | 267 * taskq_dispatch()--+--->| tqbucket_lock | TQ_APPEND()------+ 268 * TQ_HASH() | +-------------------+ +--------+ +--------+ 269 * | | tqbucket_freelist |-->| tqent |-->...| tqent | 270 * | +-------------------+<--+--------+<--...+--------+ 271 * | | ... | | thread | | thread | 272 * | +-------------------+ +--------+ +--------+ 273 * +---> ... 274 * 275 * 276 * Task queues use tq_task field to link new entry in the queue. The queue is a 277 * circular doubly-linked list. Entries are put in the end of the list with 278 * TQ_APPEND() and processed from the front of the list by taskq_thread() in 279 * FIFO order. Task queue entries are cached in the free list managed by 280 * taskq_ent_alloc() and taskq_ent_free() functions. 281 * 282 * All threads used by task queues mark t_taskq field of the thread to 283 * point to the task queue. 284 * 285 * Taskq Thread Management ----------------------------------------------------- 286 * 287 * Taskq's non-dynamic threads are managed with several variables and flags: 288 * 289 * * tq_nthreads - The number of threads in taskq_thread() for the 290 * taskq. 291 * 292 * * tq_active - The number of threads not waiting on a CV in 293 * taskq_thread(); includes newly created threads 294 * not yet counted in tq_nthreads. 295 * 296 * * tq_nthreads_target 297 * - The number of threads desired for the taskq. 298 * 299 * * tq_flags & TASKQ_CHANGING 300 * - Indicates that tq_nthreads != tq_nthreads_target. 301 * 302 * * tq_flags & TASKQ_THREAD_CREATED 303 * - Indicates that a thread is being created in the taskq. 304 * 305 * During creation, tq_nthreads and tq_active are set to 0, and 306 * tq_nthreads_target is set to the number of threads desired. The 307 * TASKQ_CHANGING flag is set, and taskq_thread_create() is called to 308 * create the first thread. taskq_thread_create() increments tq_active, 309 * sets TASKQ_THREAD_CREATED, and creates the new thread. 310 * 311 * Each thread starts in taskq_thread(), clears the TASKQ_THREAD_CREATED 312 * flag, and increments tq_nthreads. It stores the new value of 313 * tq_nthreads as its "thread_id", and stores its thread pointer in the 314 * tq_threadlist at the (thread_id - 1). We keep the thread_id space 315 * densely packed by requiring that only the largest thread_id can exit during 316 * normal adjustment. The exception is during the destruction of the 317 * taskq; once tq_nthreads_target is set to zero, no new threads will be created 318 * for the taskq queue, so every thread can exit without any ordering being 319 * necessary. 320 * 321 * Threads will only process work if their thread id is <= tq_nthreads_target. 322 * 323 * When TASKQ_CHANGING is set, threads will check the current thread target 324 * whenever they wake up, and do whatever they can to apply its effects. 325 * 326 * TASKQ_THREAD_CPU_PCT -------------------------------------------------------- 327 * 328 * When a taskq is created with TASKQ_THREAD_CPU_PCT, we store their requested 329 * percentage in tq_threads_ncpus_pct, start them off with the correct thread 330 * target, and add them to the taskq_cpupct_list for later adjustment. 331 * 332 * We register taskq_cpu_setup() to be called whenever a CPU changes state. It 333 * walks the list of TASKQ_THREAD_CPU_PCT taskqs, adjusts their nthread_target 334 * if need be, and wakes up all of the threads to process the change. 335 * 336 * Dynamic Task Queues Implementation ------------------------------------------ 337 * 338 * For a dynamic task queues there is a 1-to-1 mapping between a thread and 339 * taskq_ent_structure. Each entry is serviced by its own thread and each thread 340 * is controlled by a single entry. 341 * 342 * Entries are distributed over a set of buckets. To avoid using modulo 343 * arithmetics the number of buckets is 2^n and is determined as the nearest 344 * power of two roundown of the number of CPUs in the system. Tunable 345 * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry 346 * is attached to a bucket for its lifetime and can't migrate to other buckets. 347 * 348 * Entries that have scheduled tasks are not placed in any list. The dispatch 349 * function sets their "func" and "arg" fields and signals the corresponding 350 * thread to execute the task. Once the thread executes the task it clears the 351 * "func" field and places an entry on the bucket cache of free entries pointed 352 * by "tqbucket_freelist" field. ALL entries on the free list should have "func" 353 * field equal to NULL. The free list is a circular doubly-linked list identical 354 * in structure to the tq_task list above, but entries are taken from it in LIFO 355 * order - the last freed entry is the first to be allocated. The 356 * taskq_bucket_dispatch() function gets the most recently used entry from the 357 * free list, sets its "func" and "arg" fields and signals a worker thread. 358 * 359 * After executing each task a per-entry thread taskq_d_thread() places its 360 * entry on the bucket free list and goes to a timed sleep. If it wakes up 361 * without getting new task it removes the entry from the free list and destroys 362 * itself. The thread sleep time is controlled by a tunable variable 363 * `taskq_thread_timeout'. 364 * 365 * There are various statistics kept in the bucket which allows for later 366 * analysis of taskq usage patterns. Also, a global copy of taskq creation and 367 * death statistics is kept in the global taskq data structure. Since thread 368 * creation and death happen rarely, updating such global data does not present 369 * a performance problem. 370 * 371 * NOTE: Threads are not bound to any CPU and there is absolutely no association 372 * between the bucket and actual thread CPU, so buckets are used only to 373 * split resources and reduce resource contention. Having threads attached 374 * to the CPU denoted by a bucket may reduce number of times the job 375 * switches between CPUs. 376 * 377 * Current algorithm creates a thread whenever a bucket has no free 378 * entries. It would be nice to know how many threads are in the running 379 * state and don't create threads if all CPUs are busy with existing 380 * tasks, but it is unclear how such strategy can be implemented. 381 * 382 * Currently buckets are created statically as an array attached to task 383 * queue. On some system with nCPUs < max_ncpus it may waste system 384 * memory. One solution may be allocation of buckets when they are first 385 * touched, but it is not clear how useful it is. 386 * 387 * SUSPEND/RESUME implementation ----------------------------------------------- 388 * 389 * Before executing a task taskq_thread() (executing non-dynamic task 390 * queues) obtains taskq's thread lock as a reader. The taskq_suspend() 391 * function gets the same lock as a writer blocking all non-dynamic task 392 * execution. The taskq_resume() function releases the lock allowing 393 * taskq_thread to continue execution. 394 * 395 * For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by 396 * taskq_suspend() function. After that taskq_bucket_dispatch() always 397 * fails, so that taskq_dispatch() will either enqueue tasks for a 398 * suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch 399 * flags. 400 * 401 * NOTE: taskq_suspend() does not immediately block any tasks already 402 * scheduled for dynamic task queues. It only suspends new tasks 403 * scheduled after taskq_suspend() was called. 404 * 405 * taskq_member() function works by comparing a thread t_taskq pointer with 406 * the passed thread pointer. 407 * 408 * LOCKS and LOCK Hierarchy ---------------------------------------------------- 409 * 410 * There are three locks used in task queues: 411 * 412 * 1) The taskq_t's tq_lock, protecting global task queue state. 413 * 414 * 2) Each per-CPU bucket has a lock for bucket management. 415 * 416 * 3) The global taskq_cpupct_lock, which protects the list of 417 * TASKQ_THREADS_CPU_PCT taskqs. 418 * 419 * If both (1) and (2) are needed, tq_lock should be taken *after* the bucket 420 * lock. 421 * 422 * If both (1) and (3) are needed, tq_lock should be taken *after* 423 * taskq_cpupct_lock. 424 * 425 * DEBUG FACILITIES ------------------------------------------------------------ 426 * 427 * For DEBUG kernels it is possible to induce random failures to 428 * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of 429 * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced 430 * failures for dynamic and static task queues respectively. 431 * 432 * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics. 433 * 434 * TUNABLES -------------------------------------------------------------------- 435 * 436 * system_taskq_size - Size of the global system_taskq. 437 * This value is multiplied by nCPUs to determine 438 * actual size. 439 * Default value: 64 440 * 441 * taskq_minimum_nthreads_max 442 * - Minimum size of the thread list for a taskq. 443 * Useful for testing different thread pool 444 * sizes by overwriting tq_nthreads_target. 445 * 446 * taskq_thread_timeout - Maximum idle time for taskq_d_thread() 447 * Default value: 5 minutes 448 * 449 * taskq_maxbuckets - Maximum number of buckets in any task queue 450 * Default value: 128 451 * 452 * taskq_search_depth - Maximum # of buckets searched for a free entry 453 * Default value: 4 454 * 455 * taskq_dmtbf - Mean time between induced dispatch failures 456 * for dynamic task queues. 457 * Default value: UINT_MAX (no induced failures) 458 * 459 * taskq_smtbf - Mean time between induced dispatch failures 460 * for static task queues. 461 * Default value: UINT_MAX (no induced failures) 462 * 463 * CONDITIONAL compilation ----------------------------------------------------- 464 * 465 * TASKQ_STATISTIC - If set will enable bucket statistic (default). 466 * 467 */ 468 469 #include <sys/taskq_impl.h> 470 #include <sys/thread.h> 471 #include <sys/proc.h> 472 #include <sys/kmem.h> 473 #include <sys/vmem.h> 474 #include <sys/callb.h> 475 #include <sys/class.h> 476 #include <sys/systm.h> 477 #include <sys/cmn_err.h> 478 #include <sys/debug.h> 479 #include <sys/vmsystm.h> /* For throttlefree */ 480 #include <sys/sysmacros.h> 481 #include <sys/cpuvar.h> 482 #include <sys/cpupart.h> 483 #include <sys/sdt.h> 484 #include <sys/sysdc.h> 485 #include <sys/note.h> 486 487 static kmem_cache_t *taskq_ent_cache, *taskq_cache; 488 489 /* 490 * Pseudo instance numbers for taskqs without explicitly provided instance. 491 */ 492 static vmem_t *taskq_id_arena; 493 494 /* Global system task queue for common use */ 495 taskq_t *system_taskq; 496 497 /* 498 * Maximum number of entries in global system taskq is 499 * system_taskq_size * max_ncpus 500 */ 501 #define SYSTEM_TASKQ_SIZE 64 502 int system_taskq_size = SYSTEM_TASKQ_SIZE; 503 504 /* 505 * Minimum size for tq_nthreads_max; useful for those who want to play around 506 * with increasing a taskq's tq_nthreads_target. 507 */ 508 int taskq_minimum_nthreads_max = 1; 509 510 /* 511 * We want to ensure that when taskq_create() returns, there is at least 512 * one thread ready to handle requests. To guarantee this, we have to wait 513 * for the second thread, since the first one cannot process requests until 514 * the second thread has been created. 515 */ 516 #define TASKQ_CREATE_ACTIVE_THREADS 2 517 518 /* Maximum percentage allowed for TASKQ_THREADS_CPU_PCT */ 519 #define TASKQ_CPUPCT_MAX_PERCENT 1000 520 int taskq_cpupct_max_percent = TASKQ_CPUPCT_MAX_PERCENT; 521 522 /* 523 * Dynamic task queue threads that don't get any work within 524 * taskq_thread_timeout destroy themselves 525 */ 526 #define TASKQ_THREAD_TIMEOUT (60 * 5) 527 int taskq_thread_timeout = TASKQ_THREAD_TIMEOUT; 528 529 #define TASKQ_MAXBUCKETS 128 530 int taskq_maxbuckets = TASKQ_MAXBUCKETS; 531 532 /* 533 * When a bucket has no available entries another buckets are tried. 534 * taskq_search_depth parameter limits the amount of buckets that we search 535 * before failing. This is mostly useful in systems with many CPUs where we may 536 * spend too much time scanning busy buckets. 537 */ 538 #define TASKQ_SEARCH_DEPTH 4 539 int taskq_search_depth = TASKQ_SEARCH_DEPTH; 540 541 /* 542 * Hashing function: mix various bits of x. May be pretty much anything. 543 */ 544 #define TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27)) 545 546 /* 547 * We do not create any new threads when the system is low on memory and start 548 * throttling memory allocations. The following macro tries to estimate such 549 * condition. 550 */ 551 #define ENOUGH_MEMORY() (freemem > throttlefree) 552 553 /* 554 * Static functions. 555 */ 556 static taskq_t *taskq_create_common(const char *, int, int, pri_t, int, 557 int, proc_t *, uint_t, uint_t); 558 static void taskq_thread(void *); 559 static void taskq_d_thread(taskq_ent_t *); 560 static void taskq_bucket_extend(void *); 561 static int taskq_constructor(void *, void *, int); 562 static void taskq_destructor(void *, void *); 563 static int taskq_ent_constructor(void *, void *, int); 564 static void taskq_ent_destructor(void *, void *); 565 static taskq_ent_t *taskq_ent_alloc(taskq_t *, int); 566 static void taskq_ent_free(taskq_t *, taskq_ent_t *); 567 static taskq_ent_t *taskq_bucket_dispatch(taskq_bucket_t *, task_func_t, 568 void *); 569 570 /* 571 * Task queues kstats. 572 */ 573 struct taskq_kstat { 574 kstat_named_t tq_pid; 575 kstat_named_t tq_tasks; 576 kstat_named_t tq_executed; 577 kstat_named_t tq_maxtasks; 578 kstat_named_t tq_totaltime; 579 kstat_named_t tq_nalloc; 580 kstat_named_t tq_nactive; 581 kstat_named_t tq_pri; 582 kstat_named_t tq_nthreads; 583 } taskq_kstat = { 584 { "pid", KSTAT_DATA_UINT64 }, 585 { "tasks", KSTAT_DATA_UINT64 }, 586 { "executed", KSTAT_DATA_UINT64 }, 587 { "maxtasks", KSTAT_DATA_UINT64 }, 588 { "totaltime", KSTAT_DATA_UINT64 }, 589 { "nactive", KSTAT_DATA_UINT64 }, 590 { "nalloc", KSTAT_DATA_UINT64 }, 591 { "priority", KSTAT_DATA_UINT64 }, 592 { "threads", KSTAT_DATA_UINT64 }, 593 }; 594 595 struct taskq_d_kstat { 596 kstat_named_t tqd_pri; 597 kstat_named_t tqd_btasks; 598 kstat_named_t tqd_bexecuted; 599 kstat_named_t tqd_bmaxtasks; 600 kstat_named_t tqd_bnalloc; 601 kstat_named_t tqd_bnactive; 602 kstat_named_t tqd_btotaltime; 603 kstat_named_t tqd_hits; 604 kstat_named_t tqd_misses; 605 kstat_named_t tqd_overflows; 606 kstat_named_t tqd_tcreates; 607 kstat_named_t tqd_tdeaths; 608 kstat_named_t tqd_maxthreads; 609 kstat_named_t tqd_nomem; 610 kstat_named_t tqd_disptcreates; 611 kstat_named_t tqd_totaltime; 612 kstat_named_t tqd_nalloc; 613 kstat_named_t tqd_nfree; 614 } taskq_d_kstat = { 615 { "priority", KSTAT_DATA_UINT64 }, 616 { "btasks", KSTAT_DATA_UINT64 }, 617 { "bexecuted", KSTAT_DATA_UINT64 }, 618 { "bmaxtasks", KSTAT_DATA_UINT64 }, 619 { "bnalloc", KSTAT_DATA_UINT64 }, 620 { "bnactive", KSTAT_DATA_UINT64 }, 621 { "btotaltime", KSTAT_DATA_UINT64 }, 622 { "hits", KSTAT_DATA_UINT64 }, 623 { "misses", KSTAT_DATA_UINT64 }, 624 { "overflows", KSTAT_DATA_UINT64 }, 625 { "tcreates", KSTAT_DATA_UINT64 }, 626 { "tdeaths", KSTAT_DATA_UINT64 }, 627 { "maxthreads", KSTAT_DATA_UINT64 }, 628 { "nomem", KSTAT_DATA_UINT64 }, 629 { "disptcreates", KSTAT_DATA_UINT64 }, 630 { "totaltime", KSTAT_DATA_UINT64 }, 631 { "nalloc", KSTAT_DATA_UINT64 }, 632 { "nfree", KSTAT_DATA_UINT64 }, 633 }; 634 635 static kmutex_t taskq_kstat_lock; 636 static kmutex_t taskq_d_kstat_lock; 637 static int taskq_kstat_update(kstat_t *, int); 638 static int taskq_d_kstat_update(kstat_t *, int); 639 640 /* 641 * List of all TASKQ_THREADS_CPU_PCT taskqs. 642 */ 643 static list_t taskq_cpupct_list; /* protected by cpu_lock */ 644 645 /* 646 * Collect per-bucket statistic when TASKQ_STATISTIC is defined. 647 */ 648 #define TASKQ_STATISTIC 1 649 650 #if TASKQ_STATISTIC 651 #define TQ_STAT(b, x) b->tqbucket_stat.x++ 652 #else 653 #define TQ_STAT(b, x) 654 #endif 655 656 /* 657 * Random fault injection. 658 */ 659 uint_t taskq_random; 660 uint_t taskq_dmtbf = UINT_MAX; /* mean time between injected failures */ 661 uint_t taskq_smtbf = UINT_MAX; /* mean time between injected failures */ 662 663 /* 664 * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail. 665 * 666 * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because 667 * they could prepopulate the cache and make sure that they do not use more 668 * then minalloc entries. So, fault injection in this case insures that 669 * either TASKQ_PREPOPULATE is not set or there are more entries allocated 670 * than is specified by minalloc. TQ_NOALLOC dispatches are always allowed 671 * to fail, but for simplicity we treat them identically to TQ_NOSLEEP 672 * dispatches. 673 */ 674 #ifdef DEBUG 675 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) \ 676 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\ 677 if ((flag & TQ_NOSLEEP) && \ 678 taskq_random < 1771875 / taskq_dmtbf) { \ 679 return (NULL); \ 680 } 681 682 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) \ 683 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\ 684 if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) && \ 685 (!(tq->tq_flags & TASKQ_PREPOPULATE) || \ 686 (tq->tq_nalloc > tq->tq_minalloc)) && \ 687 (taskq_random < (1771875 / taskq_smtbf))) { \ 688 mutex_exit(&tq->tq_lock); \ 689 return (NULL); \ 690 } 691 #else 692 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) 693 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) 694 #endif 695 696 #define IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) && \ 697 ((l).tqent_prev == &(l))) 698 699 /* 700 * Append `tqe' in the end of the doubly-linked list denoted by l. 701 */ 702 #define TQ_APPEND(l, tqe) { \ 703 tqe->tqent_next = &l; \ 704 tqe->tqent_prev = l.tqent_prev; \ 705 tqe->tqent_next->tqent_prev = tqe; \ 706 tqe->tqent_prev->tqent_next = tqe; \ 707 } 708 /* 709 * Prepend 'tqe' to the beginning of l 710 */ 711 #define TQ_PREPEND(l, tqe) { \ 712 tqe->tqent_next = l.tqent_next; \ 713 tqe->tqent_prev = &l; \ 714 tqe->tqent_next->tqent_prev = tqe; \ 715 tqe->tqent_prev->tqent_next = tqe; \ 716 } 717 718 /* 719 * Schedule a task specified by func and arg into the task queue entry tqe. 720 */ 721 #define TQ_DO_ENQUEUE(tq, tqe, func, arg, front) { \ 722 ASSERT(MUTEX_HELD(&tq->tq_lock)); \ 723 _NOTE(CONSTCOND) \ 724 if (front) { \ 725 TQ_PREPEND(tq->tq_task, tqe); \ 726 } else { \ 727 TQ_APPEND(tq->tq_task, tqe); \ 728 } \ 729 tqe->tqent_func = (func); \ 730 tqe->tqent_arg = (arg); \ 731 tq->tq_tasks++; \ 732 if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks) \ 733 tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed; \ 734 cv_signal(&tq->tq_dispatch_cv); \ 735 DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \ 736 } 737 738 #define TQ_ENQUEUE(tq, tqe, func, arg) \ 739 TQ_DO_ENQUEUE(tq, tqe, func, arg, 0) 740 741 #define TQ_ENQUEUE_FRONT(tq, tqe, func, arg) \ 742 TQ_DO_ENQUEUE(tq, tqe, func, arg, 1) 743 744 /* 745 * Do-nothing task which may be used to prepopulate thread caches. 746 */ 747 /*ARGSUSED*/ 748 void 749 nulltask(void *unused) 750 { 751 } 752 753 /*ARGSUSED*/ 754 static int 755 taskq_constructor(void *buf, void *cdrarg, int kmflags) 756 { 757 taskq_t *tq = buf; 758 759 bzero(tq, sizeof (taskq_t)); 760 761 mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL); 762 rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL); 763 cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL); 764 cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL); 765 cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL); 766 767 tq->tq_task.tqent_next = &tq->tq_task; 768 tq->tq_task.tqent_prev = &tq->tq_task; 769 770 return (0); 771 } 772 773 /*ARGSUSED*/ 774 static void 775 taskq_destructor(void *buf, void *cdrarg) 776 { 777 taskq_t *tq = buf; 778 779 ASSERT(tq->tq_nthreads == 0); 780 ASSERT(tq->tq_buckets == NULL); 781 ASSERT(tq->tq_tcreates == 0); 782 ASSERT(tq->tq_tdeaths == 0); 783 784 mutex_destroy(&tq->tq_lock); 785 rw_destroy(&tq->tq_threadlock); 786 cv_destroy(&tq->tq_dispatch_cv); 787 cv_destroy(&tq->tq_exit_cv); 788 cv_destroy(&tq->tq_wait_cv); 789 } 790 791 /*ARGSUSED*/ 792 static int 793 taskq_ent_constructor(void *buf, void *cdrarg, int kmflags) 794 { 795 taskq_ent_t *tqe = buf; 796 797 tqe->tqent_thread = NULL; 798 cv_init(&tqe->tqent_cv, NULL, CV_DEFAULT, NULL); 799 800 return (0); 801 } 802 803 /*ARGSUSED*/ 804 static void 805 taskq_ent_destructor(void *buf, void *cdrarg) 806 { 807 taskq_ent_t *tqe = buf; 808 809 ASSERT(tqe->tqent_thread == NULL); 810 cv_destroy(&tqe->tqent_cv); 811 } 812 813 void 814 taskq_init(void) 815 { 816 taskq_ent_cache = kmem_cache_create("taskq_ent_cache", 817 sizeof (taskq_ent_t), 0, taskq_ent_constructor, 818 taskq_ent_destructor, NULL, NULL, NULL, 0); 819 taskq_cache = kmem_cache_create("taskq_cache", sizeof (taskq_t), 820 0, taskq_constructor, taskq_destructor, NULL, NULL, NULL, 0); 821 taskq_id_arena = vmem_create("taskq_id_arena", 822 (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0, 823 VM_SLEEP | VMC_IDENTIFIER); 824 825 list_create(&taskq_cpupct_list, sizeof (taskq_t), 826 offsetof(taskq_t, tq_cpupct_link)); 827 } 828 829 static void 830 taskq_update_nthreads(taskq_t *tq, uint_t ncpus) 831 { 832 uint_t newtarget = TASKQ_THREADS_PCT(ncpus, tq->tq_threads_ncpus_pct); 833 834 ASSERT(MUTEX_HELD(&cpu_lock)); 835 ASSERT(MUTEX_HELD(&tq->tq_lock)); 836 837 /* We must be going from non-zero to non-zero; no exiting. */ 838 ASSERT3U(tq->tq_nthreads_target, !=, 0); 839 ASSERT3U(newtarget, !=, 0); 840 841 ASSERT3U(newtarget, <=, tq->tq_nthreads_max); 842 if (newtarget != tq->tq_nthreads_target) { 843 tq->tq_flags |= TASKQ_CHANGING; 844 tq->tq_nthreads_target = newtarget; 845 cv_broadcast(&tq->tq_dispatch_cv); 846 cv_broadcast(&tq->tq_exit_cv); 847 } 848 } 849 850 /* called during task queue creation */ 851 static void 852 taskq_cpupct_install(taskq_t *tq, cpupart_t *cpup) 853 { 854 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 855 856 mutex_enter(&cpu_lock); 857 mutex_enter(&tq->tq_lock); 858 tq->tq_cpupart = cpup->cp_id; 859 taskq_update_nthreads(tq, cpup->cp_ncpus); 860 mutex_exit(&tq->tq_lock); 861 862 list_insert_tail(&taskq_cpupct_list, tq); 863 mutex_exit(&cpu_lock); 864 } 865 866 static void 867 taskq_cpupct_remove(taskq_t *tq) 868 { 869 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); 870 871 mutex_enter(&cpu_lock); 872 list_remove(&taskq_cpupct_list, tq); 873 mutex_exit(&cpu_lock); 874 } 875 876 /*ARGSUSED*/ 877 static int 878 taskq_cpu_setup(cpu_setup_t what, int id, void *arg) 879 { 880 taskq_t *tq; 881 cpupart_t *cp = cpu[id]->cpu_part; 882 uint_t ncpus = cp->cp_ncpus; 883 884 ASSERT(MUTEX_HELD(&cpu_lock)); 885 ASSERT(ncpus > 0); 886 887 switch (what) { 888 case CPU_OFF: 889 case CPU_CPUPART_OUT: 890 /* offlines are called *before* the cpu is offlined. */ 891 if (ncpus > 1) 892 ncpus--; 893 break; 894 895 case CPU_ON: 896 case CPU_CPUPART_IN: 897 break; 898 899 default: 900 return (0); /* doesn't affect cpu count */ 901 } 902 903 for (tq = list_head(&taskq_cpupct_list); tq != NULL; 904 tq = list_next(&taskq_cpupct_list, tq)) { 905 906 mutex_enter(&tq->tq_lock); 907 /* 908 * If the taskq is part of the cpuset which is changing, 909 * update its nthreads_target. 910 */ 911 if (tq->tq_cpupart == cp->cp_id) { 912 taskq_update_nthreads(tq, ncpus); 913 } 914 mutex_exit(&tq->tq_lock); 915 } 916 return (0); 917 } 918 919 void 920 taskq_mp_init(void) 921 { 922 mutex_enter(&cpu_lock); 923 register_cpu_setup_func(taskq_cpu_setup, NULL); 924 /* 925 * Make sure we're up to date. At this point in boot, there is only 926 * one processor set, so we only have to update the current CPU. 927 */ 928 (void) taskq_cpu_setup(CPU_ON, CPU->cpu_id, NULL); 929 mutex_exit(&cpu_lock); 930 } 931 932 /* 933 * Create global system dynamic task queue. 934 */ 935 void 936 system_taskq_init(void) 937 { 938 system_taskq = taskq_create_common("system_taskq", 0, 939 system_taskq_size * max_ncpus, minclsyspri, 4, 512, &p0, 0, 940 TASKQ_DYNAMIC | TASKQ_PREPOPULATE); 941 } 942 943 /* 944 * taskq_ent_alloc() 945 * 946 * Allocates a new taskq_ent_t structure either from the free list or from the 947 * cache. Returns NULL if it can't be allocated. 948 * 949 * Assumes: tq->tq_lock is held. 950 */ 951 static taskq_ent_t * 952 taskq_ent_alloc(taskq_t *tq, int flags) 953 { 954 int kmflags = (flags & TQ_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP; 955 956 taskq_ent_t *tqe; 957 958 ASSERT(MUTEX_HELD(&tq->tq_lock)); 959 960 /* 961 * TQ_NOALLOC allocations are allowed to use the freelist, even if 962 * we are below tq_minalloc. 963 */ 964 if ((tqe = tq->tq_freelist) != NULL && 965 ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) { 966 tq->tq_freelist = tqe->tqent_next; 967 } else { 968 if (flags & TQ_NOALLOC) 969 return (NULL); 970 971 mutex_exit(&tq->tq_lock); 972 if (tq->tq_nalloc >= tq->tq_maxalloc) { 973 if (kmflags & KM_NOSLEEP) { 974 mutex_enter(&tq->tq_lock); 975 return (NULL); 976 } 977 /* 978 * We don't want to exceed tq_maxalloc, but we can't 979 * wait for other tasks to complete (and thus free up 980 * task structures) without risking deadlock with 981 * the caller. So, we just delay for one second 982 * to throttle the allocation rate. 983 */ 984 delay(hz); 985 } 986 tqe = kmem_cache_alloc(taskq_ent_cache, kmflags); 987 mutex_enter(&tq->tq_lock); 988 if (tqe != NULL) 989 tq->tq_nalloc++; 990 } 991 return (tqe); 992 } 993 994 /* 995 * taskq_ent_free() 996 * 997 * Free taskq_ent_t structure by either putting it on the free list or freeing 998 * it to the cache. 999 * 1000 * Assumes: tq->tq_lock is held. 1001 */ 1002 static void 1003 taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe) 1004 { 1005 ASSERT(MUTEX_HELD(&tq->tq_lock)); 1006 1007 if (tq->tq_nalloc <= tq->tq_minalloc) { 1008 tqe->tqent_next = tq->tq_freelist; 1009 tq->tq_freelist = tqe; 1010 } else { 1011 tq->tq_nalloc--; 1012 mutex_exit(&tq->tq_lock); 1013 kmem_cache_free(taskq_ent_cache, tqe); 1014 mutex_enter(&tq->tq_lock); 1015 } 1016 } 1017 1018 /* 1019 * Dispatch a task "func(arg)" to a free entry of bucket b. 1020 * 1021 * Assumes: no bucket locks is held. 1022 * 1023 * Returns: a pointer to an entry if dispatch was successful. 1024 * NULL if there are no free entries or if the bucket is suspended. 1025 */ 1026 static taskq_ent_t * 1027 taskq_bucket_dispatch(taskq_bucket_t *b, task_func_t func, void *arg) 1028 { 1029 taskq_ent_t *tqe; 1030 1031 ASSERT(MUTEX_NOT_HELD(&b->tqbucket_lock)); 1032 ASSERT(func != NULL); 1033 1034 mutex_enter(&b->tqbucket_lock); 1035 1036 ASSERT(b->tqbucket_nfree != 0 || IS_EMPTY(b->tqbucket_freelist)); 1037 ASSERT(b->tqbucket_nfree == 0 || !IS_EMPTY(b->tqbucket_freelist)); 1038 1039 /* 1040 * Get en entry from the freelist if there is one. 1041 * Schedule task into the entry. 1042 */ 1043 if ((b->tqbucket_nfree != 0) && 1044 !(b->tqbucket_flags & TQBUCKET_SUSPEND)) { 1045 tqe = b->tqbucket_freelist.tqent_prev; 1046 1047 ASSERT(tqe != &b->tqbucket_freelist); 1048 ASSERT(tqe->tqent_thread != NULL); 1049 1050 tqe->tqent_prev->tqent_next = tqe->tqent_next; 1051 tqe->tqent_next->tqent_prev = tqe->tqent_prev; 1052 b->tqbucket_nalloc++; 1053 b->tqbucket_nfree--; 1054 tqe->tqent_func = func; 1055 tqe->tqent_arg = arg; 1056 TQ_STAT(b, tqs_hits); 1057 cv_signal(&tqe->tqent_cv); 1058 DTRACE_PROBE2(taskq__d__enqueue, taskq_bucket_t *, b, 1059 taskq_ent_t *, tqe); 1060 } else { 1061 tqe = NULL; 1062 TQ_STAT(b, tqs_misses); 1063 } 1064 mutex_exit(&b->tqbucket_lock); 1065 return (tqe); 1066 } 1067 1068 /* 1069 * Dispatch a task. 1070 * 1071 * Assumes: func != NULL 1072 * 1073 * Returns: NULL if dispatch failed. 1074 * non-NULL if task dispatched successfully. 1075 * Actual return value is the pointer to taskq entry that was used to 1076 * dispatch a task. This is useful for debugging. 1077 */ 1078 /* ARGSUSED */ 1079 taskqid_t 1080 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) 1081 { 1082 taskq_bucket_t *bucket = NULL; /* Which bucket needs extension */ 1083 taskq_ent_t *tqe = NULL; 1084 taskq_ent_t *tqe1; 1085 uint_t bsize; 1086 1087 ASSERT(tq != NULL); 1088 ASSERT(func != NULL); 1089 1090 if (!(tq->tq_flags & TASKQ_DYNAMIC)) { 1091 /* 1092 * TQ_NOQUEUE flag can't be used with non-dynamic task queues. 1093 */ 1094 ASSERT(! (flags & TQ_NOQUEUE)); 1095 /* 1096 * Enqueue the task to the underlying queue. 1097 */ 1098 mutex_enter(&tq->tq_lock); 1099 1100 TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags); 1101 1102 if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) { 1103 mutex_exit(&tq->tq_lock); 1104 return (NULL); 1105 } 1106 if (flags & TQ_FRONT) { 1107 TQ_ENQUEUE_FRONT(tq, tqe, func, arg); 1108 } else { 1109 TQ_ENQUEUE(tq, tqe, func, arg); 1110 } 1111 mutex_exit(&tq->tq_lock); 1112 return ((taskqid_t)tqe); 1113 } 1114 1115 /* 1116 * Dynamic taskq dispatching. 1117 */ 1118 ASSERT(!(flags & (TQ_NOALLOC | TQ_FRONT))); 1119 TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flags); 1120 1121 bsize = tq->tq_nbuckets; 1122 1123 if (bsize == 1) { 1124 /* 1125 * In a single-CPU case there is only one bucket, so get 1126 * entry directly from there. 1127 */ 1128 if ((tqe = taskq_bucket_dispatch(tq->tq_buckets, func, arg)) 1129 != NULL) 1130 return ((taskqid_t)tqe); /* Fastpath */ 1131 bucket = tq->tq_buckets; 1132 } else { 1133 int loopcount; 1134 taskq_bucket_t *b; 1135 uintptr_t h = ((uintptr_t)CPU + (uintptr_t)arg) >> 3; 1136 1137 h = TQ_HASH(h); 1138 1139 /* 1140 * The 'bucket' points to the original bucket that we hit. If we 1141 * can't allocate from it, we search other buckets, but only 1142 * extend this one. 1143 */ 1144 b = &tq->tq_buckets[h & (bsize - 1)]; 1145 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */ 1146 1147 /* 1148 * Do a quick check before grabbing the lock. If the bucket does 1149 * not have free entries now, chances are very small that it 1150 * will after we take the lock, so we just skip it. 1151 */ 1152 if (b->tqbucket_nfree != 0) { 1153 if ((tqe = taskq_bucket_dispatch(b, func, arg)) != NULL) 1154 return ((taskqid_t)tqe); /* Fastpath */ 1155 } else { 1156 TQ_STAT(b, tqs_misses); 1157 } 1158 1159 bucket = b; 1160 loopcount = MIN(taskq_search_depth, bsize); 1161 /* 1162 * If bucket dispatch failed, search loopcount number of buckets 1163 * before we give up and fail. 1164 */ 1165 do { 1166 b = &tq->tq_buckets[++h & (bsize - 1)]; 1167 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */ 1168 loopcount--; 1169 1170 if (b->tqbucket_nfree != 0) { 1171 tqe = taskq_bucket_dispatch(b, func, arg); 1172 } else { 1173 TQ_STAT(b, tqs_misses); 1174 } 1175 } while ((tqe == NULL) && (loopcount > 0)); 1176 } 1177 1178 /* 1179 * At this point we either scheduled a task and (tqe != NULL) or failed 1180 * (tqe == NULL). Try to recover from fails. 1181 */ 1182 1183 /* 1184 * For KM_SLEEP dispatches, try to extend the bucket and retry dispatch. 1185 */ 1186 if ((tqe == NULL) && !(flags & TQ_NOSLEEP)) { 1187 /* 1188 * taskq_bucket_extend() may fail to do anything, but this is 1189 * fine - we deal with it later. If the bucket was successfully 1190 * extended, there is a good chance that taskq_bucket_dispatch() 1191 * will get this new entry, unless someone is racing with us and 1192 * stealing the new entry from under our nose. 1193 * taskq_bucket_extend() may sleep. 1194 */ 1195 taskq_bucket_extend(bucket); 1196 TQ_STAT(bucket, tqs_disptcreates); 1197 if ((tqe = taskq_bucket_dispatch(bucket, func, arg)) != NULL) 1198 return ((taskqid_t)tqe); 1199 } 1200 1201 ASSERT(bucket != NULL); 1202 /* 1203 * Since there are not enough free entries in the bucket, extend it 1204 * in the background using backing queue. 1205 */ 1206 mutex_enter(&tq->tq_lock); 1207 if ((tqe1 = taskq_ent_alloc(tq, TQ_NOSLEEP)) != NULL) { 1208 TQ_ENQUEUE(tq, tqe1, taskq_bucket_extend, bucket); 1209 } else { 1210 TQ_STAT(bucket, tqs_nomem); 1211 } 1212 1213 /* 1214 * Dispatch failed and we can't find an entry to schedule a task. 1215 * Revert to the backing queue unless TQ_NOQUEUE was asked. 1216 */ 1217 if ((tqe == NULL) && !(flags & TQ_NOQUEUE)) { 1218 if ((tqe = taskq_ent_alloc(tq, flags)) != NULL) { 1219 TQ_ENQUEUE(tq, tqe, func, arg); 1220 } else { 1221 TQ_STAT(bucket, tqs_nomem); 1222 } 1223 } 1224 mutex_exit(&tq->tq_lock); 1225 1226 return ((taskqid_t)tqe); 1227 } 1228 1229 /* 1230 * Wait for all pending tasks to complete. 1231 * Calling taskq_wait from a task will cause deadlock. 1232 */ 1233 void 1234 taskq_wait(taskq_t *tq) 1235 { 1236 ASSERT(tq != curthread->t_taskq); 1237 1238 mutex_enter(&tq->tq_lock); 1239 while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0) 1240 cv_wait(&tq->tq_wait_cv, &tq->tq_lock); 1241 mutex_exit(&tq->tq_lock); 1242 1243 if (tq->tq_flags & TASKQ_DYNAMIC) { 1244 taskq_bucket_t *b = tq->tq_buckets; 1245 int bid = 0; 1246 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) { 1247 mutex_enter(&b->tqbucket_lock); 1248 while (b->tqbucket_nalloc > 0) 1249 cv_wait(&b->tqbucket_cv, &b->tqbucket_lock); 1250 mutex_exit(&b->tqbucket_lock); 1251 } 1252 } 1253 } 1254 1255 /* 1256 * Suspend execution of tasks. 1257 * 1258 * Tasks in the queue part will be suspended immediately upon return from this 1259 * function. Pending tasks in the dynamic part will continue to execute, but all 1260 * new tasks will be suspended. 1261 */ 1262 void 1263 taskq_suspend(taskq_t *tq) 1264 { 1265 rw_enter(&tq->tq_threadlock, RW_WRITER); 1266 1267 if (tq->tq_flags & TASKQ_DYNAMIC) { 1268 taskq_bucket_t *b = tq->tq_buckets; 1269 int bid = 0; 1270 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) { 1271 mutex_enter(&b->tqbucket_lock); 1272 b->tqbucket_flags |= TQBUCKET_SUSPEND; 1273 mutex_exit(&b->tqbucket_lock); 1274 } 1275 } 1276 /* 1277 * Mark task queue as being suspended. Needed for taskq_suspended(). 1278 */ 1279 mutex_enter(&tq->tq_lock); 1280 ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED)); 1281 tq->tq_flags |= TASKQ_SUSPENDED; 1282 mutex_exit(&tq->tq_lock); 1283 } 1284 1285 /* 1286 * returns: 1 if tq is suspended, 0 otherwise. 1287 */ 1288 int 1289 taskq_suspended(taskq_t *tq) 1290 { 1291 return ((tq->tq_flags & TASKQ_SUSPENDED) != 0); 1292 } 1293 1294 /* 1295 * Resume taskq execution. 1296 */ 1297 void 1298 taskq_resume(taskq_t *tq) 1299 { 1300 ASSERT(RW_WRITE_HELD(&tq->tq_threadlock)); 1301 1302 if (tq->tq_flags & TASKQ_DYNAMIC) { 1303 taskq_bucket_t *b = tq->tq_buckets; 1304 int bid = 0; 1305 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) { 1306 mutex_enter(&b->tqbucket_lock); 1307 b->tqbucket_flags &= ~TQBUCKET_SUSPEND; 1308 mutex_exit(&b->tqbucket_lock); 1309 } 1310 } 1311 mutex_enter(&tq->tq_lock); 1312 ASSERT(tq->tq_flags & TASKQ_SUSPENDED); 1313 tq->tq_flags &= ~TASKQ_SUSPENDED; 1314 mutex_exit(&tq->tq_lock); 1315 1316 rw_exit(&tq->tq_threadlock); 1317 } 1318 1319 int 1320 taskq_member(taskq_t *tq, kthread_t *thread) 1321 { 1322 return (thread->t_taskq == tq); 1323 } 1324 1325 /* 1326 * Creates a thread in the taskq. We only allow one outstanding create at 1327 * a time. We drop and reacquire the tq_lock in order to avoid blocking other 1328 * taskq activity while thread_create() or lwp_kernel_create() run. 1329 * 1330 * The first time we're called, we do some additional setup, and do not 1331 * return until there are enough threads to start servicing requests. 1332 */ 1333 static void 1334 taskq_thread_create(taskq_t *tq) 1335 { 1336 kthread_t *t; 1337 const boolean_t first = (tq->tq_nthreads == 0); 1338 1339 ASSERT(MUTEX_HELD(&tq->tq_lock)); 1340 ASSERT(tq->tq_flags & TASKQ_CHANGING); 1341 ASSERT(tq->tq_nthreads < tq->tq_nthreads_target); 1342 ASSERT(!(tq->tq_flags & TASKQ_THREAD_CREATED)); 1343 1344 1345 tq->tq_flags |= TASKQ_THREAD_CREATED; 1346 tq->tq_active++; 1347 mutex_exit(&tq->tq_lock); 1348 1349 if (tq->tq_proc != &p0) { 1350 t = lwp_kernel_create(tq->tq_proc, taskq_thread, tq, TS_RUN, 1351 tq->tq_pri); 1352 } else { 1353 t = thread_create(NULL, 0, taskq_thread, tq, 0, &p0, TS_RUN, 1354 tq->tq_pri); 1355 } 1356 1357 if (!first) { 1358 mutex_enter(&tq->tq_lock); 1359 return; 1360 } 1361 1362 /* 1363 * We know the thread cannot go away, since tq cannot be 1364 * destroyed until creation has completed. We can therefore 1365 * safely dereference t. 1366 */ 1367 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) { 1368 taskq_cpupct_install(tq, t->t_cpupart); 1369 } 1370 mutex_enter(&tq->tq_lock); 1371 1372 /* Wait until we can service requests. */ 1373 while (tq->tq_nthreads != tq->tq_nthreads_target && 1374 tq->tq_nthreads < TASKQ_CREATE_ACTIVE_THREADS) { 1375 cv_wait(&tq->tq_wait_cv, &tq->tq_lock); 1376 } 1377 } 1378 1379 /* 1380 * Common "sleep taskq thread" function, which handles CPR stuff, as well 1381 * as giving a nice common point for debuggers to find inactive threads. 1382 */ 1383 static clock_t 1384 taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv, 1385 callb_cpr_t *cprinfo, clock_t timeout) 1386 { 1387 clock_t ret = 0; 1388 1389 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) { 1390 CALLB_CPR_SAFE_BEGIN(cprinfo); 1391 } 1392 if (timeout < 0) 1393 cv_wait(cv, mx); 1394 else 1395 ret = cv_reltimedwait(cv, mx, timeout, TR_CLOCK_TICK); 1396 1397 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) { 1398 CALLB_CPR_SAFE_END(cprinfo, mx); 1399 } 1400 1401 return (ret); 1402 } 1403 1404 /* 1405 * Worker thread for processing task queue. 1406 */ 1407 static void 1408 taskq_thread(void *arg) 1409 { 1410 int thread_id; 1411 1412 taskq_t *tq = arg; 1413 taskq_ent_t *tqe; 1414 callb_cpr_t cprinfo; 1415 hrtime_t start, end; 1416 1417 curthread->t_taskq = tq; /* mark ourselves for taskq_member() */ 1418 1419 if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) { 1420 sysdc_thread_enter(curthread, tq->tq_DC, 1421 (tq->tq_flags & TASKQ_DC_BATCH) ? SYSDC_THREAD_BATCH : 0); 1422 } 1423 1424 if (tq->tq_flags & TASKQ_CPR_SAFE) { 1425 CALLB_CPR_INIT_SAFE(curthread, tq->tq_name); 1426 } else { 1427 CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr, 1428 tq->tq_name); 1429 } 1430 mutex_enter(&tq->tq_lock); 1431 thread_id = ++tq->tq_nthreads; 1432 ASSERT(tq->tq_flags & TASKQ_THREAD_CREATED); 1433 ASSERT(tq->tq_flags & TASKQ_CHANGING); 1434 tq->tq_flags &= ~TASKQ_THREAD_CREATED; 1435 1436 VERIFY3S(thread_id, <=, tq->tq_nthreads_max); 1437 1438 if (tq->tq_nthreads_max == 1) 1439 tq->tq_thread = curthread; 1440 else 1441 tq->tq_threadlist[thread_id - 1] = curthread; 1442 1443 /* Allow taskq_create_common()'s taskq_thread_create() to return. */ 1444 if (tq->tq_nthreads == TASKQ_CREATE_ACTIVE_THREADS) 1445 cv_broadcast(&tq->tq_wait_cv); 1446 1447 for (;;) { 1448 if (tq->tq_flags & TASKQ_CHANGING) { 1449 /* See if we're no longer needed */ 1450 if (thread_id > tq->tq_nthreads_target) { 1451 /* 1452 * To preserve the one-to-one mapping between 1453 * thread_id and thread, we must exit from 1454 * highest thread ID to least. 1455 * 1456 * However, if everyone is exiting, the order 1457 * doesn't matter, so just exit immediately. 1458 * (this is safe, since you must wait for 1459 * nthreads to reach 0 after setting 1460 * tq_nthreads_target to 0) 1461 */ 1462 if (thread_id == tq->tq_nthreads || 1463 tq->tq_nthreads_target == 0) 1464 break; 1465 1466 /* Wait for higher thread_ids to exit */ 1467 (void) taskq_thread_wait(tq, &tq->tq_lock, 1468 &tq->tq_exit_cv, &cprinfo, -1); 1469 continue; 1470 } 1471 1472 /* 1473 * If no thread is starting taskq_thread(), we can 1474 * do some bookkeeping. 1475 */ 1476 if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) { 1477 /* Check if we've reached our target */ 1478 if (tq->tq_nthreads == tq->tq_nthreads_target) { 1479 tq->tq_flags &= ~TASKQ_CHANGING; 1480 cv_broadcast(&tq->tq_wait_cv); 1481 } 1482 /* Check if we need to create a thread */ 1483 if (tq->tq_nthreads < tq->tq_nthreads_target) { 1484 taskq_thread_create(tq); 1485 continue; /* tq_lock was dropped */ 1486 } 1487 } 1488 } 1489 if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) { 1490 if (--tq->tq_active == 0) 1491 cv_broadcast(&tq->tq_wait_cv); 1492 (void) taskq_thread_wait(tq, &tq->tq_lock, 1493 &tq->tq_dispatch_cv, &cprinfo, -1); 1494 tq->tq_active++; 1495 continue; 1496 } 1497 1498 tqe->tqent_prev->tqent_next = tqe->tqent_next; 1499 tqe->tqent_next->tqent_prev = tqe->tqent_prev; 1500 mutex_exit(&tq->tq_lock); 1501 1502 rw_enter(&tq->tq_threadlock, RW_READER); 1503 start = gethrtime(); 1504 DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq, 1505 taskq_ent_t *, tqe); 1506 tqe->tqent_func(tqe->tqent_arg); 1507 DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq, 1508 taskq_ent_t *, tqe); 1509 end = gethrtime(); 1510 rw_exit(&tq->tq_threadlock); 1511 1512 mutex_enter(&tq->tq_lock); 1513 tq->tq_totaltime += end - start; 1514 tq->tq_executed++; 1515 1516 taskq_ent_free(tq, tqe); 1517 } 1518 1519 if (tq->tq_nthreads_max == 1) 1520 tq->tq_thread = NULL; 1521 else 1522 tq->tq_threadlist[thread_id - 1] = NULL; 1523 1524 /* We're exiting, and therefore no longer active */ 1525 ASSERT(tq->tq_active > 0); 1526 tq->tq_active--; 1527 1528 ASSERT(tq->tq_nthreads > 0); 1529 tq->tq_nthreads--; 1530 1531 /* Wake up anyone waiting for us to exit */ 1532 cv_broadcast(&tq->tq_exit_cv); 1533 if (tq->tq_nthreads == tq->tq_nthreads_target) { 1534 if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) 1535 tq->tq_flags &= ~TASKQ_CHANGING; 1536 1537 cv_broadcast(&tq->tq_wait_cv); 1538 } 1539 1540 ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE)); 1541 CALLB_CPR_EXIT(&cprinfo); /* drops tq->tq_lock */ 1542 if (curthread->t_lwp != NULL) { 1543 mutex_enter(&curproc->p_lock); 1544 lwp_exit(); 1545 } else { 1546 thread_exit(); 1547 } 1548 } 1549 1550 /* 1551 * Worker per-entry thread for dynamic dispatches. 1552 */ 1553 static void 1554 taskq_d_thread(taskq_ent_t *tqe) 1555 { 1556 taskq_bucket_t *bucket = tqe->tqent_bucket; 1557 taskq_t *tq = bucket->tqbucket_taskq; 1558 kmutex_t *lock = &bucket->tqbucket_lock; 1559 kcondvar_t *cv = &tqe->tqent_cv; 1560 callb_cpr_t cprinfo; 1561 clock_t w; 1562 1563 CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, tq->tq_name); 1564 1565 mutex_enter(lock); 1566 1567 for (;;) { 1568 /* 1569 * If a task is scheduled (func != NULL), execute it, otherwise 1570 * sleep, waiting for a job. 1571 */ 1572 if (tqe->tqent_func != NULL) { 1573 hrtime_t start; 1574 hrtime_t end; 1575 1576 ASSERT(bucket->tqbucket_nalloc > 0); 1577 1578 /* 1579 * It is possible to free the entry right away before 1580 * actually executing the task so that subsequent 1581 * dispatches may immediately reuse it. But this, 1582 * effectively, creates a two-length queue in the entry 1583 * and may lead to a deadlock if the execution of the 1584 * current task depends on the execution of the next 1585 * scheduled task. So, we keep the entry busy until the 1586 * task is processed. 1587 */ 1588 1589 mutex_exit(lock); 1590 start = gethrtime(); 1591 DTRACE_PROBE3(taskq__d__exec__start, taskq_t *, tq, 1592 taskq_bucket_t *, bucket, taskq_ent_t *, tqe); 1593 tqe->tqent_func(tqe->tqent_arg); 1594 DTRACE_PROBE3(taskq__d__exec__end, taskq_t *, tq, 1595 taskq_bucket_t *, bucket, taskq_ent_t *, tqe); 1596 end = gethrtime(); 1597 mutex_enter(lock); 1598 bucket->tqbucket_totaltime += end - start; 1599 1600 /* 1601 * Return the entry to the bucket free list. 1602 */ 1603 tqe->tqent_func = NULL; 1604 TQ_APPEND(bucket->tqbucket_freelist, tqe); 1605 bucket->tqbucket_nalloc--; 1606 bucket->tqbucket_nfree++; 1607 ASSERT(!IS_EMPTY(bucket->tqbucket_freelist)); 1608 /* 1609 * taskq_wait() waits for nalloc to drop to zero on 1610 * tqbucket_cv. 1611 */ 1612 cv_signal(&bucket->tqbucket_cv); 1613 } 1614 1615 /* 1616 * At this point the entry must be in the bucket free list - 1617 * either because it was there initially or because it just 1618 * finished executing a task and put itself on the free list. 1619 */ 1620 ASSERT(bucket->tqbucket_nfree > 0); 1621 /* 1622 * Go to sleep unless we are closing. 1623 * If a thread is sleeping too long, it dies. 1624 */ 1625 if (! (bucket->tqbucket_flags & TQBUCKET_CLOSE)) { 1626 w = taskq_thread_wait(tq, lock, cv, 1627 &cprinfo, taskq_thread_timeout * hz); 1628 } 1629 1630 /* 1631 * At this point we may be in two different states: 1632 * 1633 * (1) tqent_func is set which means that a new task is 1634 * dispatched and we need to execute it. 1635 * 1636 * (2) Thread is sleeping for too long or we are closing. In 1637 * both cases destroy the thread and the entry. 1638 */ 1639 1640 /* If func is NULL we should be on the freelist. */ 1641 ASSERT((tqe->tqent_func != NULL) || 1642 (bucket->tqbucket_nfree > 0)); 1643 /* If func is non-NULL we should be allocated */ 1644 ASSERT((tqe->tqent_func == NULL) || 1645 (bucket->tqbucket_nalloc > 0)); 1646 1647 /* Check freelist consistency */ 1648 ASSERT((bucket->tqbucket_nfree > 0) || 1649 IS_EMPTY(bucket->tqbucket_freelist)); 1650 ASSERT((bucket->tqbucket_nfree == 0) || 1651 !IS_EMPTY(bucket->tqbucket_freelist)); 1652 1653 if ((tqe->tqent_func == NULL) && 1654 ((w == -1) || (bucket->tqbucket_flags & TQBUCKET_CLOSE))) { 1655 /* 1656 * This thread is sleeping for too long or we are 1657 * closing - time to die. 1658 * Thread creation/destruction happens rarely, 1659 * so grabbing the lock is not a big performance issue. 1660 * The bucket lock is dropped by CALLB_CPR_EXIT(). 1661 */ 1662 1663 /* Remove the entry from the free list. */ 1664 tqe->tqent_prev->tqent_next = tqe->tqent_next; 1665 tqe->tqent_next->tqent_prev = tqe->tqent_prev; 1666 ASSERT(bucket->tqbucket_nfree > 0); 1667 bucket->tqbucket_nfree--; 1668 1669 TQ_STAT(bucket, tqs_tdeaths); 1670 cv_signal(&bucket->tqbucket_cv); 1671 tqe->tqent_thread = NULL; 1672 mutex_enter(&tq->tq_lock); 1673 tq->tq_tdeaths++; 1674 mutex_exit(&tq->tq_lock); 1675 CALLB_CPR_EXIT(&cprinfo); 1676 kmem_cache_free(taskq_ent_cache, tqe); 1677 thread_exit(); 1678 } 1679 } 1680 } 1681 1682 1683 /* 1684 * Taskq creation. May sleep for memory. 1685 * Always use automatically generated instances to avoid kstat name space 1686 * collisions. 1687 */ 1688 1689 taskq_t * 1690 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc, 1691 int maxalloc, uint_t flags) 1692 { 1693 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0); 1694 1695 return (taskq_create_common(name, 0, nthreads, pri, minalloc, 1696 maxalloc, &p0, 0, flags | TASKQ_NOINSTANCE)); 1697 } 1698 1699 /* 1700 * Create an instance of task queue. It is legal to create task queues with the 1701 * same name and different instances. 1702 * 1703 * taskq_create_instance is used by ddi_taskq_create() where it gets the 1704 * instance from ddi_get_instance(). In some cases the instance is not 1705 * initialized and is set to -1. This case is handled as if no instance was 1706 * passed at all. 1707 */ 1708 taskq_t * 1709 taskq_create_instance(const char *name, int instance, int nthreads, pri_t pri, 1710 int minalloc, int maxalloc, uint_t flags) 1711 { 1712 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0); 1713 ASSERT((instance >= 0) || (instance == -1)); 1714 1715 if (instance < 0) { 1716 flags |= TASKQ_NOINSTANCE; 1717 } 1718 1719 return (taskq_create_common(name, instance, nthreads, 1720 pri, minalloc, maxalloc, &p0, 0, flags)); 1721 } 1722 1723 taskq_t * 1724 taskq_create_proc(const char *name, int nthreads, pri_t pri, int minalloc, 1725 int maxalloc, proc_t *proc, uint_t flags) 1726 { 1727 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0); 1728 ASSERT(proc->p_flag & SSYS); 1729 1730 return (taskq_create_common(name, 0, nthreads, pri, minalloc, 1731 maxalloc, proc, 0, flags | TASKQ_NOINSTANCE)); 1732 } 1733 1734 taskq_t * 1735 taskq_create_sysdc(const char *name, int nthreads, int minalloc, 1736 int maxalloc, proc_t *proc, uint_t dc, uint_t flags) 1737 { 1738 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0); 1739 ASSERT(proc->p_flag & SSYS); 1740 1741 return (taskq_create_common(name, 0, nthreads, minclsyspri, minalloc, 1742 maxalloc, proc, dc, flags | TASKQ_NOINSTANCE | TASKQ_DUTY_CYCLE)); 1743 } 1744 1745 static taskq_t * 1746 taskq_create_common(const char *name, int instance, int nthreads, pri_t pri, 1747 int minalloc, int maxalloc, proc_t *proc, uint_t dc, uint_t flags) 1748 { 1749 taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP); 1750 uint_t ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 1751 uint_t bsize; /* # of buckets - always power of 2 */ 1752 int max_nthreads; 1753 1754 /* 1755 * TASKQ_DYNAMIC, TASKQ_CPR_SAFE and TASKQ_THREADS_CPU_PCT are all 1756 * mutually incompatible. 1757 */ 1758 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_CPR_SAFE)); 1759 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_THREADS_CPU_PCT)); 1760 IMPLY((flags & TASKQ_CPR_SAFE), !(flags & TASKQ_THREADS_CPU_PCT)); 1761 1762 /* Cannot have DUTY_CYCLE without a non-p0 kernel process */ 1763 IMPLY((flags & TASKQ_DUTY_CYCLE), proc != &p0); 1764 1765 /* Cannot have DC_BATCH without DUTY_CYCLE */ 1766 ASSERT((flags & (TASKQ_DUTY_CYCLE|TASKQ_DC_BATCH)) != TASKQ_DC_BATCH); 1767 1768 ASSERT(proc != NULL); 1769 1770 bsize = 1 << (highbit(ncpus) - 1); 1771 ASSERT(bsize >= 1); 1772 bsize = MIN(bsize, taskq_maxbuckets); 1773 1774 if (flags & TASKQ_DYNAMIC) { 1775 ASSERT3S(nthreads, >=, 1); 1776 tq->tq_maxsize = nthreads; 1777 1778 /* For dynamic task queues use just one backup thread */ 1779 nthreads = max_nthreads = 1; 1780 1781 } else if (flags & TASKQ_THREADS_CPU_PCT) { 1782 uint_t pct; 1783 ASSERT3S(nthreads, >=, 0); 1784 pct = nthreads; 1785 1786 if (pct > taskq_cpupct_max_percent) 1787 pct = taskq_cpupct_max_percent; 1788 1789 /* 1790 * If you're using THREADS_CPU_PCT, the process for the 1791 * taskq threads must be curproc. This allows any pset 1792 * binding to be inherited correctly. If proc is &p0, 1793 * we won't be creating LWPs, so new threads will be assigned 1794 * to the default processor set. 1795 */ 1796 ASSERT(curproc == proc || proc == &p0); 1797 tq->tq_threads_ncpus_pct = pct; 1798 nthreads = 1; /* corrected in taskq_thread_create() */ 1799 max_nthreads = TASKQ_THREADS_PCT(max_ncpus, pct); 1800 1801 } else { 1802 ASSERT3S(nthreads, >=, 1); 1803 max_nthreads = nthreads; 1804 } 1805 1806 if (max_nthreads < taskq_minimum_nthreads_max) 1807 max_nthreads = taskq_minimum_nthreads_max; 1808 1809 /* 1810 * Make sure the name is 0-terminated, and conforms to the rules for 1811 * C indentifiers 1812 */ 1813 (void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1); 1814 strident_canon(tq->tq_name, TASKQ_NAMELEN + 1); 1815 1816 tq->tq_flags = flags | TASKQ_CHANGING; 1817 tq->tq_active = 0; 1818 tq->tq_instance = instance; 1819 tq->tq_nthreads_target = nthreads; 1820 tq->tq_nthreads_max = max_nthreads; 1821 tq->tq_minalloc = minalloc; 1822 tq->tq_maxalloc = maxalloc; 1823 tq->tq_nbuckets = bsize; 1824 tq->tq_proc = proc; 1825 tq->tq_pri = pri; 1826 tq->tq_DC = dc; 1827 list_link_init(&tq->tq_cpupct_link); 1828 1829 if (max_nthreads > 1) 1830 tq->tq_threadlist = kmem_alloc( 1831 sizeof (kthread_t *) * max_nthreads, KM_SLEEP); 1832 1833 mutex_enter(&tq->tq_lock); 1834 if (flags & TASKQ_PREPOPULATE) { 1835 while (minalloc-- > 0) 1836 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP)); 1837 } 1838 1839 /* 1840 * Create the first thread, which will create any other threads 1841 * necessary. taskq_thread_create will not return until we have 1842 * enough threads to be able to process requests. 1843 */ 1844 taskq_thread_create(tq); 1845 mutex_exit(&tq->tq_lock); 1846 1847 if (flags & TASKQ_DYNAMIC) { 1848 taskq_bucket_t *bucket = kmem_zalloc(sizeof (taskq_bucket_t) * 1849 bsize, KM_SLEEP); 1850 int b_id; 1851 1852 tq->tq_buckets = bucket; 1853 1854 /* Initialize each bucket */ 1855 for (b_id = 0; b_id < bsize; b_id++, bucket++) { 1856 mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT, 1857 NULL); 1858 cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL); 1859 bucket->tqbucket_taskq = tq; 1860 bucket->tqbucket_freelist.tqent_next = 1861 bucket->tqbucket_freelist.tqent_prev = 1862 &bucket->tqbucket_freelist; 1863 if (flags & TASKQ_PREPOPULATE) 1864 taskq_bucket_extend(bucket); 1865 } 1866 } 1867 1868 /* 1869 * Install kstats. 1870 * We have two cases: 1871 * 1) Instance is provided to taskq_create_instance(). In this case it 1872 * should be >= 0 and we use it. 1873 * 1874 * 2) Instance is not provided and is automatically generated 1875 */ 1876 if (flags & TASKQ_NOINSTANCE) { 1877 instance = tq->tq_instance = 1878 (int)(uintptr_t)vmem_alloc(taskq_id_arena, 1, VM_SLEEP); 1879 } 1880 1881 if (flags & TASKQ_DYNAMIC) { 1882 if ((tq->tq_kstat = kstat_create("unix", instance, 1883 tq->tq_name, "taskq_d", KSTAT_TYPE_NAMED, 1884 sizeof (taskq_d_kstat) / sizeof (kstat_named_t), 1885 KSTAT_FLAG_VIRTUAL)) != NULL) { 1886 tq->tq_kstat->ks_lock = &taskq_d_kstat_lock; 1887 tq->tq_kstat->ks_data = &taskq_d_kstat; 1888 tq->tq_kstat->ks_update = taskq_d_kstat_update; 1889 tq->tq_kstat->ks_private = tq; 1890 kstat_install(tq->tq_kstat); 1891 } 1892 } else { 1893 if ((tq->tq_kstat = kstat_create("unix", instance, tq->tq_name, 1894 "taskq", KSTAT_TYPE_NAMED, 1895 sizeof (taskq_kstat) / sizeof (kstat_named_t), 1896 KSTAT_FLAG_VIRTUAL)) != NULL) { 1897 tq->tq_kstat->ks_lock = &taskq_kstat_lock; 1898 tq->tq_kstat->ks_data = &taskq_kstat; 1899 tq->tq_kstat->ks_update = taskq_kstat_update; 1900 tq->tq_kstat->ks_private = tq; 1901 kstat_install(tq->tq_kstat); 1902 } 1903 } 1904 1905 return (tq); 1906 } 1907 1908 /* 1909 * taskq_destroy(). 1910 * 1911 * Assumes: by the time taskq_destroy is called no one will use this task queue 1912 * in any way and no one will try to dispatch entries in it. 1913 */ 1914 void 1915 taskq_destroy(taskq_t *tq) 1916 { 1917 taskq_bucket_t *b = tq->tq_buckets; 1918 int bid = 0; 1919 1920 ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE)); 1921 1922 /* 1923 * Destroy kstats. 1924 */ 1925 if (tq->tq_kstat != NULL) { 1926 kstat_delete(tq->tq_kstat); 1927 tq->tq_kstat = NULL; 1928 } 1929 1930 /* 1931 * Destroy instance if needed. 1932 */ 1933 if (tq->tq_flags & TASKQ_NOINSTANCE) { 1934 vmem_free(taskq_id_arena, (void *)(uintptr_t)(tq->tq_instance), 1935 1); 1936 tq->tq_instance = 0; 1937 } 1938 1939 /* 1940 * Unregister from the cpupct list. 1941 */ 1942 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) { 1943 taskq_cpupct_remove(tq); 1944 } 1945 1946 /* 1947 * Wait for any pending entries to complete. 1948 */ 1949 taskq_wait(tq); 1950 1951 mutex_enter(&tq->tq_lock); 1952 ASSERT((tq->tq_task.tqent_next == &tq->tq_task) && 1953 (tq->tq_active == 0)); 1954 1955 /* notify all the threads that they need to exit */ 1956 tq->tq_nthreads_target = 0; 1957 1958 tq->tq_flags |= TASKQ_CHANGING; 1959 cv_broadcast(&tq->tq_dispatch_cv); 1960 cv_broadcast(&tq->tq_exit_cv); 1961 1962 while (tq->tq_nthreads != 0) 1963 cv_wait(&tq->tq_wait_cv, &tq->tq_lock); 1964 1965 if (tq->tq_nthreads_max != 1) 1966 kmem_free(tq->tq_threadlist, sizeof (kthread_t *) * 1967 tq->tq_nthreads_max); 1968 1969 tq->tq_minalloc = 0; 1970 while (tq->tq_nalloc != 0) 1971 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP)); 1972 1973 mutex_exit(&tq->tq_lock); 1974 1975 /* 1976 * Mark each bucket as closing and wakeup all sleeping threads. 1977 */ 1978 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) { 1979 taskq_ent_t *tqe; 1980 1981 mutex_enter(&b->tqbucket_lock); 1982 1983 b->tqbucket_flags |= TQBUCKET_CLOSE; 1984 /* Wakeup all sleeping threads */ 1985 1986 for (tqe = b->tqbucket_freelist.tqent_next; 1987 tqe != &b->tqbucket_freelist; tqe = tqe->tqent_next) 1988 cv_signal(&tqe->tqent_cv); 1989 1990 ASSERT(b->tqbucket_nalloc == 0); 1991 1992 /* 1993 * At this point we waited for all pending jobs to complete (in 1994 * both the task queue and the bucket and no new jobs should 1995 * arrive. Wait for all threads to die. 1996 */ 1997 while (b->tqbucket_nfree > 0) 1998 cv_wait(&b->tqbucket_cv, &b->tqbucket_lock); 1999 mutex_exit(&b->tqbucket_lock); 2000 mutex_destroy(&b->tqbucket_lock); 2001 cv_destroy(&b->tqbucket_cv); 2002 } 2003 2004 if (tq->tq_buckets != NULL) { 2005 ASSERT(tq->tq_flags & TASKQ_DYNAMIC); 2006 kmem_free(tq->tq_buckets, 2007 sizeof (taskq_bucket_t) * tq->tq_nbuckets); 2008 2009 /* Cleanup fields before returning tq to the cache */ 2010 tq->tq_buckets = NULL; 2011 tq->tq_tcreates = 0; 2012 tq->tq_tdeaths = 0; 2013 } else { 2014 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC)); 2015 } 2016 2017 tq->tq_threads_ncpus_pct = 0; 2018 tq->tq_totaltime = 0; 2019 tq->tq_tasks = 0; 2020 tq->tq_maxtasks = 0; 2021 tq->tq_executed = 0; 2022 kmem_cache_free(taskq_cache, tq); 2023 } 2024 2025 /* 2026 * Extend a bucket with a new entry on the free list and attach a worker thread 2027 * to it. 2028 * 2029 * Argument: pointer to the bucket. 2030 * 2031 * This function may quietly fail. It is only used by taskq_dispatch() which 2032 * handles such failures properly. 2033 */ 2034 static void 2035 taskq_bucket_extend(void *arg) 2036 { 2037 taskq_ent_t *tqe; 2038 taskq_bucket_t *b = (taskq_bucket_t *)arg; 2039 taskq_t *tq = b->tqbucket_taskq; 2040 int nthreads; 2041 2042 if (! ENOUGH_MEMORY()) { 2043 TQ_STAT(b, tqs_nomem); 2044 return; 2045 } 2046 2047 mutex_enter(&tq->tq_lock); 2048 2049 /* 2050 * Observe global taskq limits on the number of threads. 2051 */ 2052 if (tq->tq_tcreates++ - tq->tq_tdeaths > tq->tq_maxsize) { 2053 tq->tq_tcreates--; 2054 mutex_exit(&tq->tq_lock); 2055 return; 2056 } 2057 mutex_exit(&tq->tq_lock); 2058 2059 tqe = kmem_cache_alloc(taskq_ent_cache, KM_NOSLEEP); 2060 2061 if (tqe == NULL) { 2062 mutex_enter(&tq->tq_lock); 2063 TQ_STAT(b, tqs_nomem); 2064 tq->tq_tcreates--; 2065 mutex_exit(&tq->tq_lock); 2066 return; 2067 } 2068 2069 ASSERT(tqe->tqent_thread == NULL); 2070 2071 tqe->tqent_bucket = b; 2072 2073 /* 2074 * Create a thread in a TS_STOPPED state first. If it is successfully 2075 * created, place the entry on the free list and start the thread. 2076 */ 2077 tqe->tqent_thread = thread_create(NULL, 0, taskq_d_thread, tqe, 2078 0, &p0, TS_STOPPED, tq->tq_pri); 2079 2080 /* 2081 * Once the entry is ready, link it to the the bucket free list. 2082 */ 2083 mutex_enter(&b->tqbucket_lock); 2084 tqe->tqent_func = NULL; 2085 TQ_APPEND(b->tqbucket_freelist, tqe); 2086 b->tqbucket_nfree++; 2087 TQ_STAT(b, tqs_tcreates); 2088 2089 #if TASKQ_STATISTIC 2090 nthreads = b->tqbucket_stat.tqs_tcreates - 2091 b->tqbucket_stat.tqs_tdeaths; 2092 b->tqbucket_stat.tqs_maxthreads = MAX(nthreads, 2093 b->tqbucket_stat.tqs_maxthreads); 2094 #endif 2095 2096 mutex_exit(&b->tqbucket_lock); 2097 /* 2098 * Start the stopped thread. 2099 */ 2100 thread_lock(tqe->tqent_thread); 2101 tqe->tqent_thread->t_taskq = tq; 2102 tqe->tqent_thread->t_schedflag |= TS_ALLSTART; 2103 setrun_locked(tqe->tqent_thread); 2104 thread_unlock(tqe->tqent_thread); 2105 } 2106 2107 static int 2108 taskq_kstat_update(kstat_t *ksp, int rw) 2109 { 2110 struct taskq_kstat *tqsp = &taskq_kstat; 2111 taskq_t *tq = ksp->ks_private; 2112 2113 if (rw == KSTAT_WRITE) 2114 return (EACCES); 2115 2116 tqsp->tq_pid.value.ui64 = tq->tq_proc->p_pid; 2117 tqsp->tq_tasks.value.ui64 = tq->tq_tasks; 2118 tqsp->tq_executed.value.ui64 = tq->tq_executed; 2119 tqsp->tq_maxtasks.value.ui64 = tq->tq_maxtasks; 2120 tqsp->tq_totaltime.value.ui64 = tq->tq_totaltime; 2121 tqsp->tq_nactive.value.ui64 = tq->tq_active; 2122 tqsp->tq_nalloc.value.ui64 = tq->tq_nalloc; 2123 tqsp->tq_pri.value.ui64 = tq->tq_pri; 2124 tqsp->tq_nthreads.value.ui64 = tq->tq_nthreads; 2125 return (0); 2126 } 2127 2128 static int 2129 taskq_d_kstat_update(kstat_t *ksp, int rw) 2130 { 2131 struct taskq_d_kstat *tqsp = &taskq_d_kstat; 2132 taskq_t *tq = ksp->ks_private; 2133 taskq_bucket_t *b = tq->tq_buckets; 2134 int bid = 0; 2135 2136 if (rw == KSTAT_WRITE) 2137 return (EACCES); 2138 2139 ASSERT(tq->tq_flags & TASKQ_DYNAMIC); 2140 2141 tqsp->tqd_btasks.value.ui64 = tq->tq_tasks; 2142 tqsp->tqd_bexecuted.value.ui64 = tq->tq_executed; 2143 tqsp->tqd_bmaxtasks.value.ui64 = tq->tq_maxtasks; 2144 tqsp->tqd_bnalloc.value.ui64 = tq->tq_nalloc; 2145 tqsp->tqd_bnactive.value.ui64 = tq->tq_active; 2146 tqsp->tqd_btotaltime.value.ui64 = tq->tq_totaltime; 2147 tqsp->tqd_pri.value.ui64 = tq->tq_pri; 2148 2149 tqsp->tqd_hits.value.ui64 = 0; 2150 tqsp->tqd_misses.value.ui64 = 0; 2151 tqsp->tqd_overflows.value.ui64 = 0; 2152 tqsp->tqd_tcreates.value.ui64 = 0; 2153 tqsp->tqd_tdeaths.value.ui64 = 0; 2154 tqsp->tqd_maxthreads.value.ui64 = 0; 2155 tqsp->tqd_nomem.value.ui64 = 0; 2156 tqsp->tqd_disptcreates.value.ui64 = 0; 2157 tqsp->tqd_totaltime.value.ui64 = 0; 2158 tqsp->tqd_nalloc.value.ui64 = 0; 2159 tqsp->tqd_nfree.value.ui64 = 0; 2160 2161 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) { 2162 tqsp->tqd_hits.value.ui64 += b->tqbucket_stat.tqs_hits; 2163 tqsp->tqd_misses.value.ui64 += b->tqbucket_stat.tqs_misses; 2164 tqsp->tqd_overflows.value.ui64 += b->tqbucket_stat.tqs_overflow; 2165 tqsp->tqd_tcreates.value.ui64 += b->tqbucket_stat.tqs_tcreates; 2166 tqsp->tqd_tdeaths.value.ui64 += b->tqbucket_stat.tqs_tdeaths; 2167 tqsp->tqd_maxthreads.value.ui64 += 2168 b->tqbucket_stat.tqs_maxthreads; 2169 tqsp->tqd_nomem.value.ui64 += b->tqbucket_stat.tqs_nomem; 2170 tqsp->tqd_disptcreates.value.ui64 += 2171 b->tqbucket_stat.tqs_disptcreates; 2172 tqsp->tqd_totaltime.value.ui64 += b->tqbucket_totaltime; 2173 tqsp->tqd_nalloc.value.ui64 += b->tqbucket_nalloc; 2174 tqsp->tqd_nfree.value.ui64 += b->tqbucket_nfree; 2175 } 2176 return (0); 2177 } 2178