1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2017 by Delphix. All rights reserved.
29 * Copyright 2018, Joyent, Inc.
30 * Copyright 2023 RackTop Systems, Inc.
31 */
32
33 /*
34 * Kernel task queues: general-purpose asynchronous task scheduling.
35 *
36 * A common problem in kernel programming is the need to schedule tasks
37 * to be performed later, by another thread. There are several reasons
38 * you may want or need to do this:
39 *
40 * (1) The task isn't time-critical, but your current code path is.
41 *
42 * (2) The task may require grabbing locks that you already hold.
43 *
44 * (3) The task may need to block (e.g. to wait for memory), but you
45 * cannot block in your current context.
46 *
47 * (4) Your code path can't complete because of some condition, but you can't
48 * sleep or fail, so you queue the task for later execution when condition
49 * disappears.
50 *
51 * (5) You just want a simple way to launch multiple tasks in parallel.
52 *
53 * Task queues provide such a facility. In its simplest form (used when
54 * performance is not a critical consideration) a task queue consists of a
55 * single list of tasks, together with one or more threads to service the
56 * list. There are some cases when this simple queue is not sufficient:
57 *
58 * (1) The task queues are very hot and there is a need to avoid data and lock
59 * contention over global resources.
60 *
61 * (2) Some tasks may depend on other tasks to complete, so they can't be put in
62 * the same list managed by the same thread.
63 *
64 * (3) Some tasks may block for a long time, and this should not block other
65 * tasks in the queue.
66 *
67 * To provide useful service in such cases we define a "dynamic task queue"
68 * which has an individual thread for each of the tasks. These threads are
69 * dynamically created as they are needed and destroyed when they are not in
70 * use. The API for managing task pools is the same as for managing task queues
71 * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
72 * dynamic task pool behavior is desired.
73 *
74 * Dynamic task queues may also place tasks in the normal queue (called "backing
75 * queue") when task pool runs out of resources. Users of task queues may
76 * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
77 * flags.
78 *
79 * The backing task queue is also used for scheduling internal tasks needed for
80 * dynamic task queue maintenance.
81 *
82 * INTERFACES ==================================================================
83 *
84 * taskq_t *taskq_create(name, nthreads, pri, minalloc, maxalloc, flags);
85 *
86 * Create a taskq with specified properties.
87 * Possible 'flags':
88 *
89 * TASKQ_DYNAMIC: Create task pool for task management. If this flag is
90 * specified, 'nthreads' specifies the maximum number of threads in
91 * the task queue. Task execution order for dynamic task queues is
92 * not predictable.
93 *
94 * If this flag is not specified (default case) a
95 * single-list task queue is created with 'nthreads' threads
96 * servicing it. Entries in this queue are managed by
97 * taskq_ent_alloc() and taskq_ent_free() which try to keep the
98 * task population between 'minalloc' and 'maxalloc', but the
99 * latter limit is only advisory for TQ_SLEEP dispatches and the
100 * former limit is only advisory for TQ_NOALLOC dispatches. If
101 * TASKQ_PREPOPULATE is set in 'flags', the taskq will be
102 * prepopulated with 'minalloc' task structures.
103 *
104 * Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
105 * executed in the order they are scheduled if nthreads == 1.
106 * If nthreads > 1, task execution order is not predictable.
107 *
108 * TASKQ_PREPOPULATE: Prepopulate task queue with threads.
109 * Also prepopulate the task queue with 'minalloc' task structures.
110 *
111 * TASKQ_THREADS_CPU_PCT: This flag specifies that 'nthreads' should be
112 * interpreted as a percentage of the # of online CPUs on the
113 * system. The taskq subsystem will automatically adjust the
114 * number of threads in the taskq in response to CPU online
115 * and offline events, to keep the ratio. nthreads must be in
116 * the range [0,100].
117 *
118 * The calculation used is:
119 *
120 * MAX((ncpus_online * percentage)/100, 1)
121 *
122 * This flag is not supported for DYNAMIC task queues.
123 * This flag is not compatible with TASKQ_CPR_SAFE.
124 *
125 * TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
126 * use their own protocol for handling CPR issues. This flag is not
127 * supported for DYNAMIC task queues. This flag is not compatible
128 * with TASKQ_THREADS_CPU_PCT.
129 *
130 * The 'pri' field specifies the default priority for the threads that
131 * service all scheduled tasks.
132 *
133 * taskq_t *taskq_create_instance(name, instance, nthreads, pri, minalloc,
134 * maxalloc, flags);
135 *
136 * Like taskq_create(), but takes an instance number (or -1 to indicate
137 * no instance).
138 *
139 * taskq_t *taskq_create_proc(name, nthreads, pri, minalloc, maxalloc, proc,
140 * flags);
141 *
142 * Like taskq_create(), but creates the taskq threads in the specified
143 * system process. If proc != &p0, this must be called from a thread
144 * in that process.
145 *
146 * taskq_t *taskq_create_sysdc(name, nthreads, minalloc, maxalloc, proc,
147 * dc, flags);
148 *
149 * Like taskq_create_proc(), but the taskq threads will use the
150 * System Duty Cycle (SDC) scheduling class with a duty cycle of dc.
151 *
152 * void taskq_destroy(tap):
153 *
154 * Waits for any scheduled tasks to complete, then destroys the taskq.
155 * Caller should guarantee that no new tasks are scheduled in the closing
156 * taskq.
157 *
158 * taskqid_t taskq_dispatch(tq, func, arg, flags):
159 *
160 * Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
161 * the caller is willing to block for memory. The function returns an
162 * opaque value which is zero iff dispatch fails. If flags is TQ_NOSLEEP
163 * or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
164 * and returns TASKQID_INVALID.
165 *
166 * ASSUMES: func != NULL.
167 *
168 * Possible flags:
169 * TQ_NOSLEEP: Do not wait for resources; may fail.
170 *
171 * TQ_NOALLOC: Do not allocate memory; may fail. May only be used with
172 * non-dynamic task queues.
173 *
174 * TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
175 * lack of available resources and fail. If this flag is not
176 * set, and the task pool is exhausted, the task may be scheduled
177 * in the backing queue. This flag may ONLY be used with dynamic
178 * task queues.
179 *
180 * NOTE: This flag should always be used when a task queue is used
181 * for tasks that may depend on each other for completion.
182 * Enqueueing dependent tasks may create deadlocks.
183 *
184 * TQ_SLEEP: May block waiting for resources. May still fail for
185 * dynamic task queues if TQ_NOQUEUE is also specified, otherwise
186 * always succeed.
187 *
188 * TQ_FRONT: Puts the new task at the front of the queue. Be careful.
189 *
190 * NOTE: Dynamic task queues are much more likely to fail in
191 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
192 * is important to have backup strategies handling such failures.
193 *
194 * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
195 *
196 * This is a light-weight form of taskq_dispatch(), that uses a
197 * preallocated taskq_ent_t structure for scheduling. As a
198 * result, it does not perform allocations and cannot ever fail.
199 * Note especially that it cannot be used with TASKQ_DYNAMIC
200 * taskqs. The memory for the tqent must not be modified or used
201 * until the function (func) is called. (However, func itself
202 * may safely modify or free this memory, once it is called.)
203 * Note that the taskq framework will NOT free this memory.
204 *
205 * boolean_t taskq_empty(tq)
206 *
207 * Queries if there are tasks pending on the queue.
208 *
209 * void taskq_wait(tq):
210 *
211 * Waits for all previously scheduled tasks to complete.
212 *
213 * NOTE: It does not stop any new task dispatches.
214 * Do NOT call taskq_wait() from a task: it will cause deadlock.
215 *
216 * void taskq_suspend(tq)
217 *
218 * Suspend all task execution. Tasks already scheduled for a dynamic task
219 * queue will still be executed, but all new scheduled tasks will be
220 * suspended until taskq_resume() is called.
221 *
222 * int taskq_suspended(tq)
223 *
224 * Returns 1 if taskq is suspended and 0 otherwise. It is intended to
225 * ASSERT that the task queue is suspended.
226 *
227 * void taskq_resume(tq)
228 *
229 * Resume task queue execution.
230 *
231 * int taskq_member(tq, thread)
232 *
233 * Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
234 * intended use is to ASSERT that a given function is called in taskq
235 * context only.
236 *
237 * system_taskq
238 *
239 * Global system-wide dynamic task queue for common uses. It may be used by
240 * any subsystem that needs to schedule tasks and does not need to manage
241 * its own task queues. It is initialized quite early during system boot.
242 *
243 * IMPLEMENTATION ==============================================================
244 *
245 * This is schematic representation of the task queue structures.
246 *
247 * taskq:
248 * +-------------+
249 * | tq_lock | +---< taskq_ent_free()
250 * +-------------+ |
251 * |... | | tqent: tqent:
252 * +-------------+ | +------------+ +------------+
253 * | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
254 * +-------------+ +------------+ +------------+
255 * |... | | ... | | ... |
256 * +-------------+ +------------+ +------------+
257 * | tq_task | |
258 * | | +-------------->taskq_ent_alloc()
259 * +--------------------------------------------------------------------------+
260 * | | | tqent tqent |
261 * | +---------------------+ +--> +------------+ +--> +------------+ |
262 * | | ... | | | func, arg | | | func, arg | |
263 * +>+---------------------+ <---|-+ +------------+ <---|-+ +------------+ |
264 * | tq_taskq.tqent_next | ----+ | | tqent_next | --->+ | | tqent_next |--+
265 * +---------------------+ | +------------+ ^ | +------------+
266 * +-| tq_task.tqent_prev | +--| tqent_prev | | +--| tqent_prev | ^
267 * | +---------------------+ +------------+ | +------------+ |
268 * | |... | | ... | | | ... | |
269 * | +---------------------+ +------------+ | +------------+ |
270 * | ^ | |
271 * | | | |
272 * +--------------------------------------+--------------+ TQ_APPEND() -+
273 * | | |
274 * |... | taskq_thread()-----+
275 * +-------------+
276 * | tq_buckets |--+-------> [ NULL ] (for regular task queues)
277 * +-------------+ |
278 * | DYNAMIC TASK QUEUES:
279 * |
280 * +-> taskq_bucket[nCPU] taskq_bucket_dispatch()
281 * +-------------------+ ^
282 * +--->| tqbucket_lock | |
283 * | +-------------------+ +--------+ +--------+
284 * | | tqbucket_freelist |-->| tqent |-->...| tqent | ^
285 * | +-------------------+<--+--------+<--...+--------+ |
286 * | | ... | | thread | | thread | |
287 * | +-------------------+ +--------+ +--------+ |
288 * | +-------------------+ |
289 * taskq_dispatch()--+--->| tqbucket_lock | TQ_APPEND()------+
290 * TQ_HASH() | +-------------------+ +--------+ +--------+
291 * | | tqbucket_freelist |-->| tqent |-->...| tqent |
292 * | +-------------------+<--+--------+<--...+--------+
293 * | | ... | | thread | | thread |
294 * | +-------------------+ +--------+ +--------+
295 * +---> ...
296 *
297 *
298 * Task queues use tq_task field to link new entry in the queue. The queue is a
299 * circular doubly-linked list. Entries are put in the end of the list with
300 * TQ_APPEND() and processed from the front of the list by taskq_thread() in
301 * FIFO order. Task queue entries are cached in the free list managed by
302 * taskq_ent_alloc() and taskq_ent_free() functions.
303 *
304 * All threads used by task queues mark t_taskq field of the thread to
305 * point to the task queue.
306 *
307 * Taskq Thread Management -----------------------------------------------------
308 *
309 * Taskq's non-dynamic threads are managed with several variables and flags:
310 *
311 * * tq_nthreads - The number of threads in taskq_thread() for the
312 * taskq.
313 *
314 * * tq_active - The number of threads not waiting on a CV in
315 * taskq_thread(); includes newly created threads
316 * not yet counted in tq_nthreads.
317 *
318 * * tq_nthreads_target
319 * - The number of threads desired for the taskq.
320 *
321 * * tq_flags & TASKQ_CHANGING
322 * - Indicates that tq_nthreads != tq_nthreads_target.
323 *
324 * * tq_flags & TASKQ_THREAD_CREATED
325 * - Indicates that a thread is being created in the taskq.
326 *
327 * During creation, tq_nthreads and tq_active are set to 0, and
328 * tq_nthreads_target is set to the number of threads desired. The
329 * TASKQ_CHANGING flag is set, and taskq_thread_create() is called to
330 * create the first thread. taskq_thread_create() increments tq_active,
331 * sets TASKQ_THREAD_CREATED, and creates the new thread.
332 *
333 * Each thread starts in taskq_thread(), clears the TASKQ_THREAD_CREATED
334 * flag, and increments tq_nthreads. It stores the new value of
335 * tq_nthreads as its "thread_id", and stores its thread pointer in the
336 * tq_threadlist at the (thread_id - 1). We keep the thread_id space
337 * densely packed by requiring that only the largest thread_id can exit during
338 * normal adjustment. The exception is during the destruction of the
339 * taskq; once tq_nthreads_target is set to zero, no new threads will be created
340 * for the taskq queue, so every thread can exit without any ordering being
341 * necessary.
342 *
343 * Threads will only process work if their thread id is <= tq_nthreads_target.
344 *
345 * When TASKQ_CHANGING is set, threads will check the current thread target
346 * whenever they wake up, and do whatever they can to apply its effects.
347 *
348 * TASKQ_THREAD_CPU_PCT --------------------------------------------------------
349 *
350 * When a taskq is created with TASKQ_THREAD_CPU_PCT, we store their requested
351 * percentage in tq_threads_ncpus_pct, start them off with the correct thread
352 * target, and add them to the taskq_cpupct_list for later adjustment.
353 *
354 * We register taskq_cpu_setup() to be called whenever a CPU changes state. It
355 * walks the list of TASKQ_THREAD_CPU_PCT taskqs, adjusts their nthread_target
356 * if need be, and wakes up all of the threads to process the change.
357 *
358 * Dynamic Task Queues Implementation ------------------------------------------
359 *
360 * For a dynamic task queues there is a 1-to-1 mapping between a thread and
361 * taskq_ent_structure. Each entry is serviced by its own thread and each thread
362 * is controlled by a single entry.
363 *
364 * Entries are distributed over a set of buckets. To avoid using modulo
365 * arithmetics the number of buckets is 2^n and is determined as the nearest
366 * power of two roundown of the number of CPUs in the system. Tunable
367 * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
368 * is attached to a bucket for its lifetime and can't migrate to other buckets.
369 *
370 * Entries that have scheduled tasks are not placed in any list. The dispatch
371 * function sets their "func" and "arg" fields and signals the corresponding
372 * thread to execute the task. Once the thread executes the task it clears the
373 * "func" field and places an entry on the bucket cache of free entries pointed
374 * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
375 * field equal to NULL. The free list is a circular doubly-linked list identical
376 * in structure to the tq_task list above, but entries are taken from it in LIFO
377 * order - the last freed entry is the first to be allocated. The
378 * taskq_bucket_dispatch() function gets the most recently used entry from the
379 * free list, sets its "func" and "arg" fields and signals a worker thread.
380 *
381 * After executing each task a per-entry thread taskq_d_thread() places its
382 * entry on the bucket free list and goes to a timed sleep. If it wakes up
383 * without getting new task it removes the entry from the free list and destroys
384 * itself. The thread sleep time is controlled by a tunable variable
385 * `taskq_thread_timeout'.
386 *
387 * There are various statistics kept in the bucket which allows for later
388 * analysis of taskq usage patterns. Also, a global copy of taskq creation and
389 * death statistics is kept in the global taskq data structure. Since thread
390 * creation and death happen rarely, updating such global data does not present
391 * a performance problem.
392 *
393 * NOTE: Threads are not bound to any CPU and there is absolutely no association
394 * between the bucket and actual thread CPU, so buckets are used only to
395 * split resources and reduce resource contention. Having threads attached
396 * to the CPU denoted by a bucket may reduce number of times the job
397 * switches between CPUs.
398 *
399 * Current algorithm creates a thread whenever a bucket has no free
400 * entries. It would be nice to know how many threads are in the running
401 * state and don't create threads if all CPUs are busy with existing
402 * tasks, but it is unclear how such strategy can be implemented.
403 *
404 * Currently buckets are created statically as an array attached to task
405 * queue. On some system with nCPUs < max_ncpus it may waste system
406 * memory. One solution may be allocation of buckets when they are first
407 * touched, but it is not clear how useful it is.
408 *
409 * SUSPEND/RESUME implementation -----------------------------------------------
410 *
411 * Before executing a task taskq_thread() (executing non-dynamic task
412 * queues) obtains taskq's thread lock as a reader. The taskq_suspend()
413 * function gets the same lock as a writer blocking all non-dynamic task
414 * execution. The taskq_resume() function releases the lock allowing
415 * taskq_thread to continue execution.
416 *
417 * For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
418 * taskq_suspend() function. After that taskq_bucket_dispatch() always
419 * fails, so that taskq_dispatch() will either enqueue tasks for a
420 * suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
421 * flags.
422 *
423 * NOTE: taskq_suspend() does not immediately block any tasks already
424 * scheduled for dynamic task queues. It only suspends new tasks
425 * scheduled after taskq_suspend() was called.
426 *
427 * taskq_member() function works by comparing a thread t_taskq pointer with
428 * the passed thread pointer.
429 *
430 * LOCKS and LOCK Hierarchy ----------------------------------------------------
431 *
432 * There are three locks used in task queues:
433 *
434 * 1) The taskq_t's tq_lock, protecting global task queue state.
435 *
436 * 2) Each per-CPU bucket has a lock for bucket management.
437 *
438 * 3) The global taskq_cpupct_lock, which protects the list of
439 * TASKQ_THREADS_CPU_PCT taskqs.
440 *
441 * If both (1) and (2) are needed, tq_lock should be taken *after* the bucket
442 * lock.
443 *
444 * If both (1) and (3) are needed, tq_lock should be taken *after*
445 * taskq_cpupct_lock.
446 *
447 * DEBUG FACILITIES ------------------------------------------------------------
448 *
449 * For DEBUG kernels it is possible to induce random failures to
450 * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
451 * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
452 * failures for dynamic and static task queues respectively.
453 *
454 * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
455 *
456 * TUNABLES --------------------------------------------------------------------
457 *
458 * system_taskq_size - Size of the global system_taskq.
459 * This value is multiplied by nCPUs to determine
460 * actual size.
461 * Default value: 64
462 *
463 * taskq_minimum_nthreads_max
464 * - Minimum size of the thread list for a taskq.
465 * Useful for testing different thread pool
466 * sizes by overwriting tq_nthreads_target.
467 *
468 * taskq_thread_timeout - Maximum idle time for taskq_d_thread()
469 * Default value: 5 minutes
470 *
471 * taskq_maxbuckets - Maximum number of buckets in any task queue
472 * Default value: 128
473 *
474 * taskq_search_depth - Maximum # of buckets searched for a free entry
475 * Default value: 4
476 *
477 * taskq_dmtbf - Mean time between induced dispatch failures
478 * for dynamic task queues.
479 * Default value: UINT_MAX (no induced failures)
480 *
481 * taskq_smtbf - Mean time between induced dispatch failures
482 * for static task queues.
483 * Default value: UINT_MAX (no induced failures)
484 *
485 * CONDITIONAL compilation -----------------------------------------------------
486 *
487 * TASKQ_STATISTIC - If set will enable bucket statistic (default).
488 *
489 */
490
491 #include <sys/taskq_impl.h>
492 #include <sys/thread.h>
493 #include <sys/proc.h>
494 #include <sys/kmem.h>
495 #include <sys/vmem.h>
496 #include <sys/callb.h>
497 #include <sys/class.h>
498 #include <sys/systm.h>
499 #include <sys/cmn_err.h>
500 #include <sys/debug.h>
501 #include <sys/vmsystm.h> /* For throttlefree */
502 #include <sys/sysmacros.h>
503 #include <sys/cpuvar.h>
504 #include <sys/cpupart.h>
505 #include <sys/sdt.h>
506 #include <sys/sysdc.h>
507 #include <sys/note.h>
508
509 static kmem_cache_t *taskq_ent_cache, *taskq_cache;
510
511 /*
512 * Pseudo instance numbers for taskqs without explicitly provided instance.
513 */
514 static vmem_t *taskq_id_arena;
515
516 /* Global system task queue for common use */
517 taskq_t *system_taskq;
518
519 /*
520 * Maximum number of entries in global system taskq is
521 * system_taskq_size * max_ncpus
522 */
523 #define SYSTEM_TASKQ_SIZE 64
524 int system_taskq_size = SYSTEM_TASKQ_SIZE;
525
526 /*
527 * Minimum size for tq_nthreads_max; useful for those who want to play around
528 * with increasing a taskq's tq_nthreads_target.
529 */
530 int taskq_minimum_nthreads_max = 1;
531
532 /*
533 * We want to ensure that when taskq_create() returns, there is at least
534 * one thread ready to handle requests. To guarantee this, we have to wait
535 * for the second thread, since the first one cannot process requests until
536 * the second thread has been created.
537 */
538 #define TASKQ_CREATE_ACTIVE_THREADS 2
539
540 /* Maximum percentage allowed for TASKQ_THREADS_CPU_PCT */
541 #define TASKQ_CPUPCT_MAX_PERCENT 1000
542 int taskq_cpupct_max_percent = TASKQ_CPUPCT_MAX_PERCENT;
543
544 /*
545 * Dynamic task queue threads that don't get any work within
546 * taskq_thread_timeout destroy themselves
547 */
548 #define TASKQ_THREAD_TIMEOUT (60 * 5)
549 int taskq_thread_timeout = TASKQ_THREAD_TIMEOUT;
550
551 #define TASKQ_MAXBUCKETS 128
552 int taskq_maxbuckets = TASKQ_MAXBUCKETS;
553
554 /*
555 * When a bucket has no available entries another buckets are tried.
556 * taskq_search_depth parameter limits the amount of buckets that we search
557 * before failing. This is mostly useful in systems with many CPUs where we may
558 * spend too much time scanning busy buckets.
559 */
560 #define TASKQ_SEARCH_DEPTH 4
561 int taskq_search_depth = TASKQ_SEARCH_DEPTH;
562
563 /*
564 * Hashing function: mix various bits of x. May be pretty much anything.
565 */
566 #define TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
567
568 /*
569 * We do not create any new threads when the system is low on memory and start
570 * throttling memory allocations. The following macro tries to estimate such
571 * condition.
572 */
573 #define ENOUGH_MEMORY() (freemem > throttlefree)
574
575 /*
576 * Static functions.
577 */
578 static taskq_t *taskq_create_common(const char *, int, int, pri_t, int,
579 int, proc_t *, uint_t, uint_t);
580 static void taskq_thread(void *);
581 static void taskq_d_thread(taskq_ent_t *);
582 static void taskq_bucket_extend(void *);
583 static int taskq_constructor(void *, void *, int);
584 static void taskq_destructor(void *, void *);
585 static int taskq_ent_constructor(void *, void *, int);
586 static void taskq_ent_destructor(void *, void *);
587 static taskq_ent_t *taskq_ent_alloc(taskq_t *, int);
588 static void taskq_ent_free(taskq_t *, taskq_ent_t *);
589 static int taskq_ent_exists(taskq_t *, task_func_t, void *);
590 static taskq_ent_t *taskq_bucket_dispatch(taskq_bucket_t *, task_func_t,
591 void *);
592
593 /*
594 * Task queues kstats.
595 */
596 struct taskq_kstat {
597 kstat_named_t tq_pid;
598 kstat_named_t tq_tasks;
599 kstat_named_t tq_executed;
600 kstat_named_t tq_maxtasks;
601 kstat_named_t tq_totaltime;
602 kstat_named_t tq_nalloc;
603 kstat_named_t tq_nactive;
604 kstat_named_t tq_pri;
605 kstat_named_t tq_nthreads;
606 kstat_named_t tq_nomem;
607 } taskq_kstat = {
608 { "pid", KSTAT_DATA_UINT64 },
609 { "tasks", KSTAT_DATA_UINT64 },
610 { "executed", KSTAT_DATA_UINT64 },
611 { "maxtasks", KSTAT_DATA_UINT64 },
612 { "totaltime", KSTAT_DATA_UINT64 },
613 { "nalloc", KSTAT_DATA_UINT64 },
614 { "nactive", KSTAT_DATA_UINT64 },
615 { "priority", KSTAT_DATA_UINT64 },
616 { "threads", KSTAT_DATA_UINT64 },
617 { "nomem", KSTAT_DATA_UINT64 },
618 };
619
620 struct taskq_d_kstat {
621 kstat_named_t tqd_pri;
622 kstat_named_t tqd_btasks;
623 kstat_named_t tqd_bexecuted;
624 kstat_named_t tqd_bmaxtasks;
625 kstat_named_t tqd_bnalloc;
626 kstat_named_t tqd_bnactive;
627 kstat_named_t tqd_btotaltime;
628 kstat_named_t tqd_hits;
629 kstat_named_t tqd_misses;
630 kstat_named_t tqd_overflows;
631 kstat_named_t tqd_tcreates;
632 kstat_named_t tqd_tdeaths;
633 kstat_named_t tqd_maxthreads;
634 kstat_named_t tqd_nomem;
635 kstat_named_t tqd_disptcreates;
636 kstat_named_t tqd_totaltime;
637 kstat_named_t tqd_nalloc;
638 kstat_named_t tqd_nfree;
639 } taskq_d_kstat = {
640 { "priority", KSTAT_DATA_UINT64 },
641 { "btasks", KSTAT_DATA_UINT64 },
642 { "bexecuted", KSTAT_DATA_UINT64 },
643 { "bmaxtasks", KSTAT_DATA_UINT64 },
644 { "bnalloc", KSTAT_DATA_UINT64 },
645 { "bnactive", KSTAT_DATA_UINT64 },
646 { "btotaltime", KSTAT_DATA_UINT64 },
647 { "hits", KSTAT_DATA_UINT64 },
648 { "misses", KSTAT_DATA_UINT64 },
649 { "overflows", KSTAT_DATA_UINT64 },
650 { "tcreates", KSTAT_DATA_UINT64 },
651 { "tdeaths", KSTAT_DATA_UINT64 },
652 { "maxthreads", KSTAT_DATA_UINT64 },
653 { "nomem", KSTAT_DATA_UINT64 },
654 { "disptcreates", KSTAT_DATA_UINT64 },
655 { "totaltime", KSTAT_DATA_UINT64 },
656 { "nalloc", KSTAT_DATA_UINT64 },
657 { "nfree", KSTAT_DATA_UINT64 },
658 };
659
660 static kmutex_t taskq_kstat_lock;
661 static kmutex_t taskq_d_kstat_lock;
662 static int taskq_kstat_update(kstat_t *, int);
663 static int taskq_d_kstat_update(kstat_t *, int);
664
665 /*
666 * List of all TASKQ_THREADS_CPU_PCT taskqs.
667 */
668 static list_t taskq_cpupct_list; /* protected by cpu_lock */
669
670 /*
671 * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
672 */
673 #define TASKQ_STATISTIC 1
674
675 #if TASKQ_STATISTIC
676 #define TQ_STAT(b, x) b->tqbucket_stat.x++
677 #else
678 #define TQ_STAT(b, x)
679 #endif
680
681 /*
682 * Random fault injection.
683 */
684 uint_t taskq_random;
685 uint_t taskq_dmtbf = UINT_MAX; /* mean time between injected failures */
686 uint_t taskq_smtbf = UINT_MAX; /* mean time between injected failures */
687
688 /*
689 * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
690 *
691 * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
692 * they could prepopulate the cache and make sure that they do not use more
693 * then minalloc entries. So, fault injection in this case insures that
694 * either TASKQ_PREPOPULATE is not set or there are more entries allocated
695 * than is specified by minalloc. TQ_NOALLOC dispatches are always allowed
696 * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
697 * dispatches.
698 */
699 #ifdef DEBUG
700 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) \
701 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
702 if ((flag & TQ_NOSLEEP) && \
703 taskq_random < 1771875 / taskq_dmtbf) { \
704 return (TASKQID_INVALID); \
705 }
706
707 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) \
708 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
709 if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) && \
710 (!(tq->tq_flags & TASKQ_PREPOPULATE) || \
711 (tq->tq_nalloc > tq->tq_minalloc)) && \
712 (taskq_random < (1771875 / taskq_smtbf))) { \
713 mutex_exit(&tq->tq_lock); \
714 return (TASKQID_INVALID); \
715 }
716 #else
717 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
718 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
719 #endif
720
721 #define IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) && \
722 ((l).tqent_prev == &(l)))
723
724 /*
725 * Append `tqe' in the end of the doubly-linked list denoted by l.
726 */
727 #define TQ_APPEND(l, tqe) { \
728 tqe->tqent_next = &l; \
729 tqe->tqent_prev = l.tqent_prev; \
730 tqe->tqent_next->tqent_prev = tqe; \
731 tqe->tqent_prev->tqent_next = tqe; \
732 }
733 /*
734 * Prepend 'tqe' to the beginning of l
735 */
736 #define TQ_PREPEND(l, tqe) { \
737 tqe->tqent_next = l.tqent_next; \
738 tqe->tqent_prev = &l; \
739 tqe->tqent_next->tqent_prev = tqe; \
740 tqe->tqent_prev->tqent_next = tqe; \
741 }
742
743 /*
744 * Schedule a task specified by func and arg into the task queue entry tqe.
745 */
746 #define TQ_DO_ENQUEUE(tq, tqe, func, arg, front) { \
747 ASSERT(MUTEX_HELD(&tq->tq_lock)); \
748 _NOTE(CONSTCOND) \
749 if (front) { \
750 TQ_PREPEND(tq->tq_task, tqe); \
751 } else { \
752 TQ_APPEND(tq->tq_task, tqe); \
753 } \
754 tqe->tqent_func = (func); \
755 tqe->tqent_arg = (arg); \
756 tq->tq_tasks++; \
757 if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks) \
758 tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed; \
759 cv_signal(&tq->tq_dispatch_cv); \
760 DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
761 }
762
763 #define TQ_ENQUEUE(tq, tqe, func, arg) \
764 TQ_DO_ENQUEUE(tq, tqe, func, arg, 0)
765
766 #define TQ_ENQUEUE_FRONT(tq, tqe, func, arg) \
767 TQ_DO_ENQUEUE(tq, tqe, func, arg, 1)
768
769 /*
770 * Do-nothing task which may be used to prepopulate thread caches.
771 */
772 /*ARGSUSED*/
773 void
nulltask(void * unused)774 nulltask(void *unused)
775 {
776 }
777
778 /*ARGSUSED*/
779 static int
taskq_constructor(void * buf,void * cdrarg,int kmflags)780 taskq_constructor(void *buf, void *cdrarg, int kmflags)
781 {
782 taskq_t *tq = buf;
783
784 bzero(tq, sizeof (taskq_t));
785
786 mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
787 rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
788 cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
789 cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL);
790 cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
791 cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
792
793 tq->tq_task.tqent_next = &tq->tq_task;
794 tq->tq_task.tqent_prev = &tq->tq_task;
795
796 return (0);
797 }
798
799 /*ARGSUSED*/
800 static void
taskq_destructor(void * buf,void * cdrarg)801 taskq_destructor(void *buf, void *cdrarg)
802 {
803 taskq_t *tq = buf;
804
805 ASSERT(tq->tq_nthreads == 0);
806 ASSERT(tq->tq_buckets == NULL);
807 ASSERT(tq->tq_tcreates == 0);
808 ASSERT(tq->tq_tdeaths == 0);
809
810 mutex_destroy(&tq->tq_lock);
811 rw_destroy(&tq->tq_threadlock);
812 cv_destroy(&tq->tq_dispatch_cv);
813 cv_destroy(&tq->tq_exit_cv);
814 cv_destroy(&tq->tq_wait_cv);
815 cv_destroy(&tq->tq_maxalloc_cv);
816 }
817
818 /*ARGSUSED*/
819 static int
taskq_ent_constructor(void * buf,void * cdrarg,int kmflags)820 taskq_ent_constructor(void *buf, void *cdrarg, int kmflags)
821 {
822 taskq_ent_t *tqe = buf;
823
824 tqe->tqent_thread = NULL;
825 cv_init(&tqe->tqent_cv, NULL, CV_DEFAULT, NULL);
826
827 return (0);
828 }
829
830 /*ARGSUSED*/
831 static void
taskq_ent_destructor(void * buf,void * cdrarg)832 taskq_ent_destructor(void *buf, void *cdrarg)
833 {
834 taskq_ent_t *tqe = buf;
835
836 ASSERT(tqe->tqent_thread == NULL);
837 cv_destroy(&tqe->tqent_cv);
838 }
839
840 void
taskq_init(void)841 taskq_init(void)
842 {
843 taskq_ent_cache = kmem_cache_create("taskq_ent_cache",
844 sizeof (taskq_ent_t), 0, taskq_ent_constructor,
845 taskq_ent_destructor, NULL, NULL, NULL, 0);
846 taskq_cache = kmem_cache_create("taskq_cache", sizeof (taskq_t),
847 0, taskq_constructor, taskq_destructor, NULL, NULL, NULL, 0);
848 taskq_id_arena = vmem_create("taskq_id_arena",
849 (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0,
850 VM_SLEEP | VMC_IDENTIFIER);
851
852 list_create(&taskq_cpupct_list, sizeof (taskq_t),
853 offsetof(taskq_t, tq_cpupct_link));
854 }
855
856 static void
taskq_update_nthreads(taskq_t * tq,uint_t ncpus)857 taskq_update_nthreads(taskq_t *tq, uint_t ncpus)
858 {
859 uint_t newtarget = TASKQ_THREADS_PCT(ncpus, tq->tq_threads_ncpus_pct);
860
861 ASSERT(MUTEX_HELD(&cpu_lock));
862 ASSERT(MUTEX_HELD(&tq->tq_lock));
863
864 /* We must be going from non-zero to non-zero; no exiting. */
865 ASSERT3U(tq->tq_nthreads_target, !=, 0);
866 ASSERT3U(newtarget, !=, 0);
867
868 ASSERT3U(newtarget, <=, tq->tq_nthreads_max);
869 if (newtarget != tq->tq_nthreads_target) {
870 tq->tq_flags |= TASKQ_CHANGING;
871 tq->tq_nthreads_target = newtarget;
872 cv_broadcast(&tq->tq_dispatch_cv);
873 cv_broadcast(&tq->tq_exit_cv);
874 }
875 }
876
877 /* called during task queue creation */
878 static void
taskq_cpupct_install(taskq_t * tq,cpupart_t * cpup)879 taskq_cpupct_install(taskq_t *tq, cpupart_t *cpup)
880 {
881 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
882
883 mutex_enter(&cpu_lock);
884 mutex_enter(&tq->tq_lock);
885 tq->tq_cpupart = cpup->cp_id;
886 taskq_update_nthreads(tq, cpup->cp_ncpus);
887 mutex_exit(&tq->tq_lock);
888
889 list_insert_tail(&taskq_cpupct_list, tq);
890 mutex_exit(&cpu_lock);
891 }
892
893 static void
taskq_cpupct_remove(taskq_t * tq)894 taskq_cpupct_remove(taskq_t *tq)
895 {
896 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
897
898 mutex_enter(&cpu_lock);
899 list_remove(&taskq_cpupct_list, tq);
900 mutex_exit(&cpu_lock);
901 }
902
903 /*ARGSUSED*/
904 static int
taskq_cpu_setup(cpu_setup_t what,int id,void * arg)905 taskq_cpu_setup(cpu_setup_t what, int id, void *arg)
906 {
907 taskq_t *tq;
908 cpupart_t *cp = cpu[id]->cpu_part;
909 uint_t ncpus = cp->cp_ncpus;
910
911 ASSERT(MUTEX_HELD(&cpu_lock));
912 ASSERT(ncpus > 0);
913
914 switch (what) {
915 case CPU_OFF:
916 case CPU_CPUPART_OUT:
917 /* offlines are called *before* the cpu is offlined. */
918 if (ncpus > 1)
919 ncpus--;
920 break;
921
922 case CPU_ON:
923 case CPU_CPUPART_IN:
924 break;
925
926 default:
927 return (0); /* doesn't affect cpu count */
928 }
929
930 for (tq = list_head(&taskq_cpupct_list); tq != NULL;
931 tq = list_next(&taskq_cpupct_list, tq)) {
932
933 mutex_enter(&tq->tq_lock);
934 /*
935 * If the taskq is part of the cpuset which is changing,
936 * update its nthreads_target.
937 */
938 if (tq->tq_cpupart == cp->cp_id) {
939 taskq_update_nthreads(tq, ncpus);
940 }
941 mutex_exit(&tq->tq_lock);
942 }
943 return (0);
944 }
945
946 void
taskq_mp_init(void)947 taskq_mp_init(void)
948 {
949 mutex_enter(&cpu_lock);
950 register_cpu_setup_func(taskq_cpu_setup, NULL);
951 /*
952 * Make sure we're up to date. At this point in boot, there is only
953 * one processor set, so we only have to update the current CPU.
954 */
955 (void) taskq_cpu_setup(CPU_ON, CPU->cpu_id, NULL);
956 mutex_exit(&cpu_lock);
957 }
958
959 /*
960 * Create global system dynamic task queue.
961 */
962 void
system_taskq_init(void)963 system_taskq_init(void)
964 {
965 system_taskq = taskq_create_common("system_taskq", 0,
966 system_taskq_size * max_ncpus, minclsyspri, 4, 512, &p0, 0,
967 TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
968 }
969
970 /*
971 * taskq_ent_alloc()
972 *
973 * Allocates a new taskq_ent_t structure either from the free list or from the
974 * cache. Returns NULL if it can't be allocated.
975 *
976 * Assumes: tq->tq_lock is held.
977 */
978 static taskq_ent_t *
taskq_ent_alloc(taskq_t * tq,int flags)979 taskq_ent_alloc(taskq_t *tq, int flags)
980 {
981 int kmflags = (flags & TQ_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
982 taskq_ent_t *tqe;
983 clock_t wait_time;
984 clock_t wait_rv;
985
986 ASSERT(MUTEX_HELD(&tq->tq_lock));
987
988 /*
989 * TQ_NOALLOC allocations are allowed to use the freelist, even if
990 * we are below tq_minalloc.
991 */
992 again: if ((tqe = tq->tq_freelist) != NULL &&
993 ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
994 tq->tq_freelist = tqe->tqent_next;
995 } else {
996 if (flags & TQ_NOALLOC)
997 return (NULL);
998
999 if (tq->tq_nalloc >= tq->tq_maxalloc) {
1000 if (kmflags & KM_NOSLEEP)
1001 return (NULL);
1002
1003 /*
1004 * We don't want to exceed tq_maxalloc, but we can't
1005 * wait for other tasks to complete (and thus free up
1006 * task structures) without risking deadlock with
1007 * the caller. So, we just delay for one second
1008 * to throttle the allocation rate. If we have tasks
1009 * complete before one second timeout expires then
1010 * taskq_ent_free will signal us and we will
1011 * immediately retry the allocation (reap free).
1012 */
1013 wait_time = ddi_get_lbolt() + hz;
1014 while (tq->tq_freelist == NULL) {
1015 tq->tq_maxalloc_wait++;
1016 wait_rv = cv_timedwait(&tq->tq_maxalloc_cv,
1017 &tq->tq_lock, wait_time);
1018 tq->tq_maxalloc_wait--;
1019 if (wait_rv == -1)
1020 break;
1021 }
1022 if (tq->tq_freelist)
1023 goto again; /* reap freelist */
1024
1025 }
1026 mutex_exit(&tq->tq_lock);
1027
1028 tqe = kmem_cache_alloc(taskq_ent_cache, kmflags);
1029
1030 mutex_enter(&tq->tq_lock);
1031 if (tqe != NULL)
1032 tq->tq_nalloc++;
1033 }
1034 return (tqe);
1035 }
1036
1037 /*
1038 * taskq_ent_free()
1039 *
1040 * Free taskq_ent_t structure by either putting it on the free list or freeing
1041 * it to the cache.
1042 *
1043 * Assumes: tq->tq_lock is held.
1044 */
1045 static void
taskq_ent_free(taskq_t * tq,taskq_ent_t * tqe)1046 taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe)
1047 {
1048 ASSERT(MUTEX_HELD(&tq->tq_lock));
1049
1050 if (tq->tq_nalloc <= tq->tq_minalloc) {
1051 tqe->tqent_next = tq->tq_freelist;
1052 tq->tq_freelist = tqe;
1053 } else {
1054 tq->tq_nalloc--;
1055 mutex_exit(&tq->tq_lock);
1056 kmem_cache_free(taskq_ent_cache, tqe);
1057 mutex_enter(&tq->tq_lock);
1058 }
1059
1060 if (tq->tq_maxalloc_wait)
1061 cv_signal(&tq->tq_maxalloc_cv);
1062 }
1063
1064 /*
1065 * taskq_ent_exists()
1066 *
1067 * Return 1 if taskq already has entry for calling 'func(arg)'.
1068 *
1069 * Assumes: tq->tq_lock is held.
1070 */
1071 static int
taskq_ent_exists(taskq_t * tq,task_func_t func,void * arg)1072 taskq_ent_exists(taskq_t *tq, task_func_t func, void *arg)
1073 {
1074 taskq_ent_t *tqe;
1075
1076 ASSERT(MUTEX_HELD(&tq->tq_lock));
1077
1078 for (tqe = tq->tq_task.tqent_next; tqe != &tq->tq_task;
1079 tqe = tqe->tqent_next)
1080 if ((tqe->tqent_func == func) && (tqe->tqent_arg == arg))
1081 return (1);
1082 return (0);
1083 }
1084
1085 /*
1086 * Dispatch a task "func(arg)" to a free entry of bucket b.
1087 *
1088 * Assumes: no bucket locks is held.
1089 *
1090 * Returns: a pointer to an entry if dispatch was successful.
1091 * NULL if there are no free entries or if the bucket is suspended.
1092 */
1093 static taskq_ent_t *
taskq_bucket_dispatch(taskq_bucket_t * b,task_func_t func,void * arg)1094 taskq_bucket_dispatch(taskq_bucket_t *b, task_func_t func, void *arg)
1095 {
1096 taskq_ent_t *tqe;
1097
1098 ASSERT(MUTEX_NOT_HELD(&b->tqbucket_lock));
1099 ASSERT(func != NULL);
1100
1101 mutex_enter(&b->tqbucket_lock);
1102
1103 ASSERT(b->tqbucket_nfree != 0 || IS_EMPTY(b->tqbucket_freelist));
1104 ASSERT(b->tqbucket_nfree == 0 || !IS_EMPTY(b->tqbucket_freelist));
1105
1106 /*
1107 * Get en entry from the freelist if there is one.
1108 * Schedule task into the entry.
1109 */
1110 if ((b->tqbucket_nfree != 0) &&
1111 !(b->tqbucket_flags & TQBUCKET_SUSPEND)) {
1112 tqe = b->tqbucket_freelist.tqent_prev;
1113
1114 ASSERT(tqe != &b->tqbucket_freelist);
1115 ASSERT(tqe->tqent_thread != NULL);
1116
1117 tqe->tqent_prev->tqent_next = tqe->tqent_next;
1118 tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1119 b->tqbucket_nalloc++;
1120 b->tqbucket_nfree--;
1121 tqe->tqent_func = func;
1122 tqe->tqent_arg = arg;
1123 TQ_STAT(b, tqs_hits);
1124 cv_signal(&tqe->tqent_cv);
1125 DTRACE_PROBE2(taskq__d__enqueue, taskq_bucket_t *, b,
1126 taskq_ent_t *, tqe);
1127 } else {
1128 tqe = NULL;
1129 TQ_STAT(b, tqs_misses);
1130 }
1131 mutex_exit(&b->tqbucket_lock);
1132 return (tqe);
1133 }
1134
1135 /*
1136 * Dispatch a task.
1137 *
1138 * Assumes: func != NULL
1139 *
1140 * Returns: NULL if dispatch failed.
1141 * non-NULL if task dispatched successfully.
1142 * Actual return value is the pointer to taskq entry that was used to
1143 * dispatch a task. This is useful for debugging.
1144 */
1145 taskqid_t
taskq_dispatch(taskq_t * tq,task_func_t func,void * arg,uint_t flags)1146 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
1147 {
1148 taskq_bucket_t *bucket = NULL; /* Which bucket needs extension */
1149 taskq_ent_t *tqe = NULL;
1150 taskq_ent_t *tqe1;
1151 uint_t bsize;
1152
1153 ASSERT(tq != NULL);
1154 ASSERT(func != NULL);
1155
1156 if (!(tq->tq_flags & TASKQ_DYNAMIC)) {
1157 /*
1158 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
1159 */
1160 ASSERT(!(flags & TQ_NOQUEUE));
1161 /*
1162 * Enqueue the task to the underlying queue.
1163 */
1164 mutex_enter(&tq->tq_lock);
1165
1166 TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags);
1167
1168 if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) {
1169 tq->tq_nomem++;
1170 mutex_exit(&tq->tq_lock);
1171 return ((taskqid_t)tqe);
1172 }
1173 /* Make sure we start without any flags */
1174 tqe->tqent_un.tqent_flags = 0;
1175
1176 if (flags & TQ_FRONT) {
1177 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1178 } else {
1179 TQ_ENQUEUE(tq, tqe, func, arg);
1180 }
1181 mutex_exit(&tq->tq_lock);
1182 return ((taskqid_t)tqe);
1183 }
1184
1185 /*
1186 * Dynamic taskq dispatching.
1187 */
1188 ASSERT(!(flags & (TQ_NOALLOC | TQ_FRONT)));
1189 TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flags);
1190
1191 bsize = tq->tq_nbuckets;
1192
1193 if (bsize == 1) {
1194 /*
1195 * In a single-CPU case there is only one bucket, so get
1196 * entry directly from there.
1197 */
1198 if ((tqe = taskq_bucket_dispatch(tq->tq_buckets, func, arg))
1199 != NULL)
1200 return ((taskqid_t)tqe); /* Fastpath */
1201 bucket = tq->tq_buckets;
1202 } else {
1203 int loopcount;
1204 taskq_bucket_t *b;
1205 uintptr_t h = ((uintptr_t)CPU + (uintptr_t)arg) >> 3;
1206
1207 h = TQ_HASH(h);
1208
1209 /*
1210 * The 'bucket' points to the original bucket that we hit. If we
1211 * can't allocate from it, we search other buckets, but only
1212 * extend this one.
1213 */
1214 b = &tq->tq_buckets[h & (bsize - 1)];
1215 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */
1216
1217 /*
1218 * Do a quick check before grabbing the lock. If the bucket does
1219 * not have free entries now, chances are very small that it
1220 * will after we take the lock, so we just skip it.
1221 */
1222 if (b->tqbucket_nfree != 0) {
1223 if ((tqe = taskq_bucket_dispatch(b, func, arg)) != NULL)
1224 return ((taskqid_t)tqe); /* Fastpath */
1225 } else {
1226 TQ_STAT(b, tqs_misses);
1227 }
1228
1229 bucket = b;
1230 loopcount = MIN(taskq_search_depth, bsize);
1231 /*
1232 * If bucket dispatch failed, search loopcount number of buckets
1233 * before we give up and fail.
1234 */
1235 do {
1236 b = &tq->tq_buckets[++h & (bsize - 1)];
1237 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */
1238 loopcount--;
1239
1240 if (b->tqbucket_nfree != 0) {
1241 tqe = taskq_bucket_dispatch(b, func, arg);
1242 } else {
1243 TQ_STAT(b, tqs_misses);
1244 }
1245 } while ((tqe == NULL) && (loopcount > 0));
1246 }
1247
1248 /*
1249 * At this point we either scheduled a task and (tqe != NULL) or failed
1250 * (tqe == NULL). Try to recover from fails.
1251 */
1252
1253 /*
1254 * For KM_SLEEP dispatches, try to extend the bucket and retry dispatch.
1255 */
1256 if ((tqe == NULL) && !(flags & TQ_NOSLEEP)) {
1257 /*
1258 * taskq_bucket_extend() may fail to do anything, but this is
1259 * fine - we deal with it later. If the bucket was successfully
1260 * extended, there is a good chance that taskq_bucket_dispatch()
1261 * will get this new entry, unless someone is racing with us and
1262 * stealing the new entry from under our nose.
1263 * taskq_bucket_extend() may sleep.
1264 */
1265 taskq_bucket_extend(bucket);
1266 TQ_STAT(bucket, tqs_disptcreates);
1267 if ((tqe = taskq_bucket_dispatch(bucket, func, arg)) != NULL)
1268 return ((taskqid_t)tqe);
1269 }
1270
1271 ASSERT(bucket != NULL);
1272
1273 /*
1274 * Since there are not enough free entries in the bucket, add a
1275 * taskq entry to extend it in the background using backing queue
1276 * (unless we already have a taskq entry to perform that extension).
1277 */
1278 mutex_enter(&tq->tq_lock);
1279 if (!taskq_ent_exists(tq, taskq_bucket_extend, bucket)) {
1280 if ((tqe1 = taskq_ent_alloc(tq, TQ_NOSLEEP)) != NULL) {
1281 TQ_ENQUEUE_FRONT(tq, tqe1, taskq_bucket_extend, bucket);
1282 } else {
1283 tq->tq_nomem++;
1284 }
1285 }
1286
1287 /*
1288 * Dispatch failed and we can't find an entry to schedule a task.
1289 * Revert to the backing queue unless TQ_NOQUEUE was asked.
1290 */
1291 if ((tqe == NULL) && !(flags & TQ_NOQUEUE)) {
1292 if ((tqe = taskq_ent_alloc(tq, flags)) != NULL) {
1293 TQ_ENQUEUE(tq, tqe, func, arg);
1294 } else {
1295 tq->tq_nomem++;
1296 }
1297 }
1298 mutex_exit(&tq->tq_lock);
1299
1300 return ((taskqid_t)tqe);
1301 }
1302
1303 void
taskq_dispatch_ent(taskq_t * tq,task_func_t func,void * arg,uint_t flags,taskq_ent_t * tqe)1304 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
1305 taskq_ent_t *tqe)
1306 {
1307 ASSERT(func != NULL);
1308 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
1309
1310 /*
1311 * Mark it as a prealloc'd task. This is important
1312 * to ensure that we don't free it later.
1313 */
1314 tqe->tqent_un.tqent_flags |= TQENT_FLAG_PREALLOC;
1315 /*
1316 * Enqueue the task to the underlying queue.
1317 */
1318 mutex_enter(&tq->tq_lock);
1319
1320 if (flags & TQ_FRONT) {
1321 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1322 } else {
1323 TQ_ENQUEUE(tq, tqe, func, arg);
1324 }
1325 mutex_exit(&tq->tq_lock);
1326 }
1327
1328 /*
1329 * Allow our caller to ask if there are tasks pending on the queue.
1330 */
1331 boolean_t
taskq_empty(taskq_t * tq)1332 taskq_empty(taskq_t *tq)
1333 {
1334 boolean_t rv;
1335
1336 ASSERT3P(tq, !=, curthread->t_taskq);
1337 mutex_enter(&tq->tq_lock);
1338 rv = (tq->tq_task.tqent_next == &tq->tq_task) && (tq->tq_active == 0);
1339 mutex_exit(&tq->tq_lock);
1340
1341 return (rv);
1342 }
1343
1344 /*
1345 * Wait for all pending tasks to complete.
1346 * Calling taskq_wait from a task will cause deadlock.
1347 */
1348 void
taskq_wait(taskq_t * tq)1349 taskq_wait(taskq_t *tq)
1350 {
1351 ASSERT(tq != curthread->t_taskq);
1352
1353 mutex_enter(&tq->tq_lock);
1354 while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
1355 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1356 mutex_exit(&tq->tq_lock);
1357
1358 if (tq->tq_flags & TASKQ_DYNAMIC) {
1359 taskq_bucket_t *b = tq->tq_buckets;
1360 int bid = 0;
1361 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1362 mutex_enter(&b->tqbucket_lock);
1363 while (b->tqbucket_nalloc > 0)
1364 cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
1365 mutex_exit(&b->tqbucket_lock);
1366 }
1367 }
1368 }
1369
1370 void
taskq_wait_id(taskq_t * tq,taskqid_t id __unused)1371 taskq_wait_id(taskq_t *tq, taskqid_t id __unused)
1372 {
1373 taskq_wait(tq);
1374 }
1375
1376 /*
1377 * Suspend execution of tasks.
1378 *
1379 * Tasks in the queue part will be suspended immediately upon return from this
1380 * function. Pending tasks in the dynamic part will continue to execute, but all
1381 * new tasks will be suspended.
1382 */
1383 void
taskq_suspend(taskq_t * tq)1384 taskq_suspend(taskq_t *tq)
1385 {
1386 rw_enter(&tq->tq_threadlock, RW_WRITER);
1387
1388 if (tq->tq_flags & TASKQ_DYNAMIC) {
1389 taskq_bucket_t *b = tq->tq_buckets;
1390 int bid = 0;
1391 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1392 mutex_enter(&b->tqbucket_lock);
1393 b->tqbucket_flags |= TQBUCKET_SUSPEND;
1394 mutex_exit(&b->tqbucket_lock);
1395 }
1396 }
1397 /*
1398 * Mark task queue as being suspended. Needed for taskq_suspended().
1399 */
1400 mutex_enter(&tq->tq_lock);
1401 ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED));
1402 tq->tq_flags |= TASKQ_SUSPENDED;
1403 mutex_exit(&tq->tq_lock);
1404 }
1405
1406 /*
1407 * returns: 1 if tq is suspended, 0 otherwise.
1408 */
1409 int
taskq_suspended(taskq_t * tq)1410 taskq_suspended(taskq_t *tq)
1411 {
1412 return ((tq->tq_flags & TASKQ_SUSPENDED) != 0);
1413 }
1414
1415 /*
1416 * Resume taskq execution.
1417 */
1418 void
taskq_resume(taskq_t * tq)1419 taskq_resume(taskq_t *tq)
1420 {
1421 ASSERT(RW_WRITE_HELD(&tq->tq_threadlock));
1422
1423 if (tq->tq_flags & TASKQ_DYNAMIC) {
1424 taskq_bucket_t *b = tq->tq_buckets;
1425 int bid = 0;
1426 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1427 mutex_enter(&b->tqbucket_lock);
1428 b->tqbucket_flags &= ~TQBUCKET_SUSPEND;
1429 mutex_exit(&b->tqbucket_lock);
1430 }
1431 }
1432 mutex_enter(&tq->tq_lock);
1433 ASSERT(tq->tq_flags & TASKQ_SUSPENDED);
1434 tq->tq_flags &= ~TASKQ_SUSPENDED;
1435 mutex_exit(&tq->tq_lock);
1436
1437 rw_exit(&tq->tq_threadlock);
1438 }
1439
1440 int
taskq_member(taskq_t * tq,kthread_t * thread)1441 taskq_member(taskq_t *tq, kthread_t *thread)
1442 {
1443 return (thread->t_taskq == tq);
1444 }
1445
1446 /*
1447 * Creates a thread in the taskq. We only allow one outstanding create at
1448 * a time. We drop and reacquire the tq_lock in order to avoid blocking other
1449 * taskq activity while thread_create() or lwp_kernel_create() run.
1450 *
1451 * The first time we're called, we do some additional setup, and do not
1452 * return until there are enough threads to start servicing requests.
1453 */
1454 static void
taskq_thread_create(taskq_t * tq)1455 taskq_thread_create(taskq_t *tq)
1456 {
1457 kthread_t *t;
1458 const boolean_t first = (tq->tq_nthreads == 0);
1459
1460 ASSERT(MUTEX_HELD(&tq->tq_lock));
1461 ASSERT(tq->tq_flags & TASKQ_CHANGING);
1462 ASSERT(tq->tq_nthreads < tq->tq_nthreads_target);
1463 ASSERT(!(tq->tq_flags & TASKQ_THREAD_CREATED));
1464
1465
1466 tq->tq_flags |= TASKQ_THREAD_CREATED;
1467 tq->tq_active++;
1468 mutex_exit(&tq->tq_lock);
1469
1470 /*
1471 * With TASKQ_DUTY_CYCLE the new thread must have an LWP
1472 * as explained in ../disp/sysdc.c (for the msacct data).
1473 * Normally simple kthreads are preferred, unless the
1474 * caller has asked for LWPs for other reasons.
1475 */
1476 if ((tq->tq_flags & (TASKQ_DUTY_CYCLE | TASKQ_THREADS_LWP)) != 0) {
1477 /* Enforced in taskq_create_common */
1478 ASSERT3P(tq->tq_proc, !=, &p0);
1479 t = lwp_kernel_create(tq->tq_proc, taskq_thread, tq, TS_RUN,
1480 tq->tq_pri);
1481 } else {
1482 t = thread_create(NULL, 0, taskq_thread, tq, 0, tq->tq_proc,
1483 TS_RUN, tq->tq_pri);
1484 }
1485
1486 if (!first) {
1487 mutex_enter(&tq->tq_lock);
1488 return;
1489 }
1490
1491 /*
1492 * We know the thread cannot go away, since tq cannot be
1493 * destroyed until creation has completed. We can therefore
1494 * safely dereference t.
1495 */
1496 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1497 taskq_cpupct_install(tq, t->t_cpupart);
1498 }
1499 mutex_enter(&tq->tq_lock);
1500
1501 /* Wait until we can service requests. */
1502 while (tq->tq_nthreads != tq->tq_nthreads_target &&
1503 tq->tq_nthreads < TASKQ_CREATE_ACTIVE_THREADS) {
1504 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1505 }
1506 }
1507
1508 /*
1509 * Common "sleep taskq thread" function, which handles CPR stuff, as well
1510 * as giving a nice common point for debuggers to find inactive threads.
1511 */
1512 static clock_t
taskq_thread_wait(taskq_t * tq,kmutex_t * mx,kcondvar_t * cv,callb_cpr_t * cprinfo,clock_t timeout)1513 taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv,
1514 callb_cpr_t *cprinfo, clock_t timeout)
1515 {
1516 clock_t ret = 0;
1517
1518 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1519 CALLB_CPR_SAFE_BEGIN(cprinfo);
1520 }
1521 if (timeout < 0)
1522 cv_wait(cv, mx);
1523 else
1524 ret = cv_reltimedwait(cv, mx, timeout, TR_CLOCK_TICK);
1525
1526 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1527 CALLB_CPR_SAFE_END(cprinfo, mx);
1528 }
1529
1530 return (ret);
1531 }
1532
1533 /*
1534 * Worker thread for processing task queue.
1535 */
1536 static void
taskq_thread(void * arg)1537 taskq_thread(void *arg)
1538 {
1539 int thread_id;
1540
1541 taskq_t *tq = arg;
1542 taskq_ent_t *tqe;
1543 callb_cpr_t cprinfo;
1544 hrtime_t start, end;
1545 boolean_t freeit;
1546
1547 curthread->t_taskq = tq; /* mark ourselves for taskq_member() */
1548
1549 if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) {
1550 sysdc_thread_enter(curthread, tq->tq_DC,
1551 (tq->tq_flags & TASKQ_DC_BATCH) ? SYSDC_THREAD_BATCH : 0);
1552 }
1553
1554 if (tq->tq_flags & TASKQ_CPR_SAFE) {
1555 CALLB_CPR_INIT_SAFE(curthread, tq->tq_name);
1556 } else {
1557 CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr,
1558 tq->tq_name);
1559 }
1560 mutex_enter(&tq->tq_lock);
1561 thread_id = ++tq->tq_nthreads;
1562 ASSERT(tq->tq_flags & TASKQ_THREAD_CREATED);
1563 ASSERT(tq->tq_flags & TASKQ_CHANGING);
1564 tq->tq_flags &= ~TASKQ_THREAD_CREATED;
1565
1566 VERIFY3S(thread_id, <=, tq->tq_nthreads_max);
1567
1568 if (tq->tq_nthreads_max == 1)
1569 tq->tq_thread = curthread;
1570 else
1571 tq->tq_threadlist[thread_id - 1] = curthread;
1572
1573 /* Allow taskq_create_common()'s taskq_thread_create() to return. */
1574 if (tq->tq_nthreads == TASKQ_CREATE_ACTIVE_THREADS)
1575 cv_broadcast(&tq->tq_wait_cv);
1576
1577 for (;;) {
1578 if (tq->tq_flags & TASKQ_CHANGING) {
1579 /* See if we're no longer needed */
1580 if (thread_id > tq->tq_nthreads_target) {
1581 /*
1582 * To preserve the one-to-one mapping between
1583 * thread_id and thread, we must exit from
1584 * highest thread ID to least.
1585 *
1586 * However, if everyone is exiting, the order
1587 * doesn't matter, so just exit immediately.
1588 * (this is safe, since you must wait for
1589 * nthreads to reach 0 after setting
1590 * tq_nthreads_target to 0)
1591 */
1592 if (thread_id == tq->tq_nthreads ||
1593 tq->tq_nthreads_target == 0)
1594 break;
1595
1596 /* Wait for higher thread_ids to exit */
1597 (void) taskq_thread_wait(tq, &tq->tq_lock,
1598 &tq->tq_exit_cv, &cprinfo, -1);
1599 continue;
1600 }
1601
1602 /*
1603 * If no thread is starting taskq_thread(), we can
1604 * do some bookkeeping.
1605 */
1606 if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) {
1607 /* Check if we've reached our target */
1608 if (tq->tq_nthreads == tq->tq_nthreads_target) {
1609 tq->tq_flags &= ~TASKQ_CHANGING;
1610 cv_broadcast(&tq->tq_wait_cv);
1611 }
1612 /* Check if we need to create a thread */
1613 if (tq->tq_nthreads < tq->tq_nthreads_target) {
1614 taskq_thread_create(tq);
1615 continue; /* tq_lock was dropped */
1616 }
1617 }
1618 }
1619 if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
1620 if (--tq->tq_active == 0)
1621 cv_broadcast(&tq->tq_wait_cv);
1622 (void) taskq_thread_wait(tq, &tq->tq_lock,
1623 &tq->tq_dispatch_cv, &cprinfo, -1);
1624 tq->tq_active++;
1625 continue;
1626 }
1627
1628 tqe->tqent_prev->tqent_next = tqe->tqent_next;
1629 tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1630 mutex_exit(&tq->tq_lock);
1631
1632 /*
1633 * For prealloc'd tasks, we don't free anything. We
1634 * have to check this now, because once we call the
1635 * function for a prealloc'd taskq, we can't touch the
1636 * tqent any longer (calling the function returns the
1637 * ownershp of the tqent back to caller of
1638 * taskq_dispatch.)
1639 */
1640 if ((!(tq->tq_flags & TASKQ_DYNAMIC)) &&
1641 (tqe->tqent_un.tqent_flags & TQENT_FLAG_PREALLOC)) {
1642 /* clear pointers to assist assertion checks */
1643 tqe->tqent_next = tqe->tqent_prev = NULL;
1644 freeit = B_FALSE;
1645 } else {
1646 freeit = B_TRUE;
1647 }
1648
1649 rw_enter(&tq->tq_threadlock, RW_READER);
1650 start = gethrtime();
1651 DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
1652 taskq_ent_t *, tqe);
1653 tqe->tqent_func(tqe->tqent_arg);
1654 DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
1655 taskq_ent_t *, tqe);
1656 end = gethrtime();
1657 rw_exit(&tq->tq_threadlock);
1658
1659 mutex_enter(&tq->tq_lock);
1660 tq->tq_totaltime += end - start;
1661 tq->tq_executed++;
1662
1663 if (freeit)
1664 taskq_ent_free(tq, tqe);
1665 }
1666
1667 if (tq->tq_nthreads_max == 1)
1668 tq->tq_thread = NULL;
1669 else
1670 tq->tq_threadlist[thread_id - 1] = NULL;
1671
1672 /* We're exiting, and therefore no longer active */
1673 ASSERT(tq->tq_active > 0);
1674 tq->tq_active--;
1675
1676 ASSERT(tq->tq_nthreads > 0);
1677 tq->tq_nthreads--;
1678
1679 /* Wake up anyone waiting for us to exit */
1680 cv_broadcast(&tq->tq_exit_cv);
1681 if (tq->tq_nthreads == tq->tq_nthreads_target) {
1682 if (!(tq->tq_flags & TASKQ_THREAD_CREATED))
1683 tq->tq_flags &= ~TASKQ_CHANGING;
1684
1685 cv_broadcast(&tq->tq_wait_cv);
1686 }
1687
1688 ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
1689 CALLB_CPR_EXIT(&cprinfo); /* drops tq->tq_lock */
1690 if (curthread->t_lwp != NULL) {
1691 mutex_enter(&curproc->p_lock);
1692 lwp_exit();
1693 } else {
1694 thread_exit();
1695 }
1696 }
1697
1698 /*
1699 * Worker per-entry thread for dynamic dispatches.
1700 */
1701 static void
taskq_d_thread(taskq_ent_t * tqe)1702 taskq_d_thread(taskq_ent_t *tqe)
1703 {
1704 taskq_bucket_t *bucket = tqe->tqent_un.tqent_bucket;
1705 taskq_t *tq = bucket->tqbucket_taskq;
1706 kmutex_t *lock = &bucket->tqbucket_lock;
1707 kcondvar_t *cv = &tqe->tqent_cv;
1708 callb_cpr_t cprinfo;
1709 clock_t w = 0;
1710
1711 CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, tq->tq_name);
1712
1713 mutex_enter(lock);
1714
1715 for (;;) {
1716 /*
1717 * If a task is scheduled (func != NULL), execute it, otherwise
1718 * sleep, waiting for a job.
1719 */
1720 if (tqe->tqent_func != NULL) {
1721 hrtime_t start;
1722 hrtime_t end;
1723
1724 ASSERT(bucket->tqbucket_nalloc > 0);
1725
1726 /*
1727 * It is possible to free the entry right away before
1728 * actually executing the task so that subsequent
1729 * dispatches may immediately reuse it. But this,
1730 * effectively, creates a two-length queue in the entry
1731 * and may lead to a deadlock if the execution of the
1732 * current task depends on the execution of the next
1733 * scheduled task. So, we keep the entry busy until the
1734 * task is processed.
1735 */
1736
1737 mutex_exit(lock);
1738 start = gethrtime();
1739 DTRACE_PROBE3(taskq__d__exec__start, taskq_t *, tq,
1740 taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1741 tqe->tqent_func(tqe->tqent_arg);
1742 DTRACE_PROBE3(taskq__d__exec__end, taskq_t *, tq,
1743 taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1744 end = gethrtime();
1745 mutex_enter(lock);
1746 bucket->tqbucket_totaltime += end - start;
1747
1748 /*
1749 * Return the entry to the bucket free list.
1750 */
1751 tqe->tqent_func = NULL;
1752 TQ_APPEND(bucket->tqbucket_freelist, tqe);
1753 bucket->tqbucket_nalloc--;
1754 bucket->tqbucket_nfree++;
1755 ASSERT(!IS_EMPTY(bucket->tqbucket_freelist));
1756 /*
1757 * taskq_wait() waits for nalloc to drop to zero on
1758 * tqbucket_cv.
1759 */
1760 cv_signal(&bucket->tqbucket_cv);
1761 }
1762
1763 /*
1764 * At this point the entry must be in the bucket free list -
1765 * either because it was there initially or because it just
1766 * finished executing a task and put itself on the free list.
1767 */
1768 ASSERT(bucket->tqbucket_nfree > 0);
1769 /*
1770 * Go to sleep unless we are closing.
1771 * If a thread is sleeping too long, it dies.
1772 */
1773 if (! (bucket->tqbucket_flags & TQBUCKET_CLOSE)) {
1774 w = taskq_thread_wait(tq, lock, cv,
1775 &cprinfo, taskq_thread_timeout * hz);
1776 }
1777
1778 /*
1779 * At this point we may be in two different states:
1780 *
1781 * (1) tqent_func is set which means that a new task is
1782 * dispatched and we need to execute it.
1783 *
1784 * (2) Thread is sleeping for too long or we are closing. In
1785 * both cases destroy the thread and the entry.
1786 */
1787
1788 /* If func is NULL we should be on the freelist. */
1789 ASSERT((tqe->tqent_func != NULL) ||
1790 (bucket->tqbucket_nfree > 0));
1791 /* If func is non-NULL we should be allocated */
1792 ASSERT((tqe->tqent_func == NULL) ||
1793 (bucket->tqbucket_nalloc > 0));
1794
1795 /* Check freelist consistency */
1796 ASSERT((bucket->tqbucket_nfree > 0) ||
1797 IS_EMPTY(bucket->tqbucket_freelist));
1798 ASSERT((bucket->tqbucket_nfree == 0) ||
1799 !IS_EMPTY(bucket->tqbucket_freelist));
1800
1801 if ((tqe->tqent_func == NULL) &&
1802 ((w == -1) || (bucket->tqbucket_flags & TQBUCKET_CLOSE))) {
1803 /*
1804 * This thread is sleeping for too long or we are
1805 * closing - time to die.
1806 */
1807 break;
1808 }
1809 }
1810
1811 /*
1812 * Thread creation/destruction happens rarely,
1813 * so grabbing the lock is not a big performance issue.
1814 * The bucket lock is dropped by CALLB_CPR_EXIT().
1815 */
1816
1817 /* Remove the entry from the free list. */
1818 tqe->tqent_prev->tqent_next = tqe->tqent_next;
1819 tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1820 ASSERT(bucket->tqbucket_nfree > 0);
1821 bucket->tqbucket_nfree--;
1822
1823 TQ_STAT(bucket, tqs_tdeaths);
1824 cv_signal(&bucket->tqbucket_cv);
1825 tqe->tqent_thread = NULL;
1826 mutex_enter(&tq->tq_lock);
1827 tq->tq_tdeaths++;
1828 mutex_exit(&tq->tq_lock);
1829 CALLB_CPR_EXIT(&cprinfo); /* mutex_exit(lock) */
1830
1831 kmem_cache_free(taskq_ent_cache, tqe);
1832
1833 if (curthread->t_lwp != NULL) {
1834 mutex_enter(&curproc->p_lock);
1835 lwp_exit(); /* noreturn. drops p_lock */
1836 } else {
1837 thread_exit();
1838 }
1839 }
1840
1841
1842 /*
1843 * Taskq creation. May sleep for memory.
1844 * Always use automatically generated instances to avoid kstat name space
1845 * collisions.
1846 */
1847
1848 taskq_t *
taskq_create(const char * name,int nthreads,pri_t pri,int minalloc,int maxalloc,uint_t flags)1849 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
1850 int maxalloc, uint_t flags)
1851 {
1852 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1853
1854 return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1855 maxalloc, &p0, 0, flags | TASKQ_NOINSTANCE));
1856 }
1857
1858 /*
1859 * Create an instance of task queue. It is legal to create task queues with the
1860 * same name and different instances.
1861 *
1862 * taskq_create_instance is used by ddi_taskq_create() where it gets the
1863 * instance from ddi_get_instance(). In some cases the instance is not
1864 * initialized and is set to -1. This case is handled as if no instance was
1865 * passed at all.
1866 */
1867 taskq_t *
taskq_create_instance(const char * name,int instance,int nthreads,pri_t pri,int minalloc,int maxalloc,uint_t flags)1868 taskq_create_instance(const char *name, int instance, int nthreads, pri_t pri,
1869 int minalloc, int maxalloc, uint_t flags)
1870 {
1871 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1872 ASSERT((instance >= 0) || (instance == -1));
1873
1874 if (instance < 0) {
1875 flags |= TASKQ_NOINSTANCE;
1876 }
1877
1878 return (taskq_create_common(name, instance, nthreads,
1879 pri, minalloc, maxalloc, &p0, 0, flags));
1880 }
1881
1882 taskq_t *
taskq_create_proc(const char * name,int nthreads,pri_t pri,int minalloc,int maxalloc,proc_t * proc,uint_t flags)1883 taskq_create_proc(const char *name, int nthreads, pri_t pri, int minalloc,
1884 int maxalloc, proc_t *proc, uint_t flags)
1885 {
1886 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1887 ASSERT(proc->p_flag & SSYS);
1888
1889 return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1890 maxalloc, proc, 0, flags | TASKQ_NOINSTANCE));
1891 }
1892
1893 taskq_t *
taskq_create_sysdc(const char * name,int nthreads,int minalloc,int maxalloc,proc_t * proc,uint_t dc,uint_t flags)1894 taskq_create_sysdc(const char *name, int nthreads, int minalloc,
1895 int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1896 {
1897 ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1898 ASSERT(proc->p_flag & SSYS);
1899
1900 return (taskq_create_common(name, 0, nthreads, minclsyspri, minalloc,
1901 maxalloc, proc, dc, flags | TASKQ_NOINSTANCE | TASKQ_DUTY_CYCLE));
1902 }
1903
1904 static taskq_t *
taskq_create_common(const char * name,int instance,int nthreads,pri_t pri,int minalloc,int maxalloc,proc_t * proc,uint_t dc,uint_t flags)1905 taskq_create_common(const char *name, int instance, int nthreads, pri_t pri,
1906 int minalloc, int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1907 {
1908 taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP);
1909 uint_t ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
1910 uint_t bsize; /* # of buckets - always power of 2 */
1911 int max_nthreads;
1912
1913 /*
1914 * TASKQ_DYNAMIC, TASKQ_CPR_SAFE and TASKQ_THREADS_CPU_PCT are all
1915 * mutually incompatible.
1916 */
1917 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_CPR_SAFE));
1918 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_THREADS_CPU_PCT));
1919 IMPLY((flags & TASKQ_CPR_SAFE), !(flags & TASKQ_THREADS_CPU_PCT));
1920
1921 /* Cannot have DYNAMIC with DUTY_CYCLE */
1922 IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_DUTY_CYCLE));
1923
1924 /* Cannot have DUTY_CYCLE with a p0 kernel process */
1925 IMPLY((flags & TASKQ_DUTY_CYCLE), proc != &p0);
1926
1927 /* Cannot have THREADS_LWP with a p0 kernel process */
1928 IMPLY((flags & TASKQ_THREADS_LWP), proc != &p0);
1929
1930 /* Cannot have DC_BATCH without DUTY_CYCLE */
1931 ASSERT((flags & (TASKQ_DUTY_CYCLE|TASKQ_DC_BATCH)) != TASKQ_DC_BATCH);
1932
1933 ASSERT(proc != NULL);
1934
1935 bsize = 1 << (highbit(ncpus) - 1);
1936 ASSERT(bsize >= 1);
1937 bsize = MIN(bsize, taskq_maxbuckets);
1938
1939 if (flags & TASKQ_DYNAMIC) {
1940 ASSERT3S(nthreads, >=, 1);
1941 tq->tq_maxsize = nthreads;
1942
1943 /* For dynamic task queues use just one backup thread */
1944 nthreads = max_nthreads = 1;
1945
1946 } else if (flags & TASKQ_THREADS_CPU_PCT) {
1947 uint_t pct;
1948 ASSERT3S(nthreads, >=, 0);
1949 pct = nthreads;
1950
1951 if (pct > taskq_cpupct_max_percent)
1952 pct = taskq_cpupct_max_percent;
1953
1954 /*
1955 * If you're using THREADS_CPU_PCT, the process for the
1956 * taskq threads must be curproc. This allows any pset
1957 * binding to be inherited correctly. If proc is &p0,
1958 * we won't be creating LWPs, so new threads will be assigned
1959 * to the default processor set.
1960 */
1961 ASSERT(curproc == proc || proc == &p0);
1962 tq->tq_threads_ncpus_pct = pct;
1963 nthreads = 1; /* corrected in taskq_thread_create() */
1964 max_nthreads = TASKQ_THREADS_PCT(max_ncpus, pct);
1965
1966 } else {
1967 ASSERT3S(nthreads, >=, 1);
1968 max_nthreads = nthreads;
1969 }
1970
1971 if (max_nthreads < taskq_minimum_nthreads_max)
1972 max_nthreads = taskq_minimum_nthreads_max;
1973
1974 /*
1975 * Make sure the name is 0-terminated, and conforms to the rules for
1976 * C indentifiers
1977 */
1978 (void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
1979 strident_canon(tq->tq_name, TASKQ_NAMELEN + 1);
1980
1981 tq->tq_flags = flags | TASKQ_CHANGING;
1982 tq->tq_active = 0;
1983 tq->tq_instance = instance;
1984 tq->tq_nthreads_target = nthreads;
1985 tq->tq_nthreads_max = max_nthreads;
1986 tq->tq_minalloc = minalloc;
1987 tq->tq_maxalloc = maxalloc;
1988 tq->tq_nbuckets = bsize;
1989 tq->tq_proc = proc;
1990 tq->tq_pri = pri;
1991 tq->tq_DC = dc;
1992 list_link_init(&tq->tq_cpupct_link);
1993
1994 if (max_nthreads > 1)
1995 tq->tq_threadlist = kmem_alloc(
1996 sizeof (kthread_t *) * max_nthreads, KM_SLEEP);
1997
1998 mutex_enter(&tq->tq_lock);
1999 if (flags & TASKQ_PREPOPULATE) {
2000 while (minalloc-- > 0)
2001 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
2002 }
2003
2004 /*
2005 * Before we start creating threads for this taskq, take a
2006 * zone hold so the zone can't go away before taskq_destroy
2007 * makes sure all the taskq threads are gone. This hold is
2008 * similar in purpose to those taken by zthread_create().
2009 */
2010 zone_hold(tq->tq_proc->p_zone);
2011
2012 /*
2013 * Create the first thread, which will create any other threads
2014 * necessary. taskq_thread_create will not return until we have
2015 * enough threads to be able to process requests.
2016 */
2017 taskq_thread_create(tq);
2018 mutex_exit(&tq->tq_lock);
2019
2020 if (flags & TASKQ_DYNAMIC) {
2021 taskq_bucket_t *bucket = kmem_zalloc(sizeof (taskq_bucket_t) *
2022 bsize, KM_SLEEP);
2023 int b_id;
2024
2025 tq->tq_buckets = bucket;
2026
2027 /* Initialize each bucket */
2028 for (b_id = 0; b_id < bsize; b_id++, bucket++) {
2029 mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT,
2030 NULL);
2031 cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL);
2032 bucket->tqbucket_taskq = tq;
2033 bucket->tqbucket_freelist.tqent_next =
2034 bucket->tqbucket_freelist.tqent_prev =
2035 &bucket->tqbucket_freelist;
2036 if (flags & TASKQ_PREPOPULATE)
2037 taskq_bucket_extend(bucket);
2038 }
2039 }
2040
2041 /*
2042 * Install kstats.
2043 * We have two cases:
2044 * 1) Instance is provided to taskq_create_instance(). In this case it
2045 * should be >= 0 and we use it.
2046 *
2047 * 2) Instance is not provided and is automatically generated
2048 */
2049 if (flags & TASKQ_NOINSTANCE) {
2050 instance = tq->tq_instance =
2051 (int)(uintptr_t)vmem_alloc(taskq_id_arena, 1, VM_SLEEP);
2052 }
2053
2054 if (flags & TASKQ_DYNAMIC) {
2055 if ((tq->tq_kstat = kstat_create("unix", instance,
2056 tq->tq_name, "taskq_d", KSTAT_TYPE_NAMED,
2057 sizeof (taskq_d_kstat) / sizeof (kstat_named_t),
2058 KSTAT_FLAG_VIRTUAL)) != NULL) {
2059 tq->tq_kstat->ks_lock = &taskq_d_kstat_lock;
2060 tq->tq_kstat->ks_data = &taskq_d_kstat;
2061 tq->tq_kstat->ks_update = taskq_d_kstat_update;
2062 tq->tq_kstat->ks_private = tq;
2063 kstat_install(tq->tq_kstat);
2064 }
2065 } else {
2066 if ((tq->tq_kstat = kstat_create("unix", instance, tq->tq_name,
2067 "taskq", KSTAT_TYPE_NAMED,
2068 sizeof (taskq_kstat) / sizeof (kstat_named_t),
2069 KSTAT_FLAG_VIRTUAL)) != NULL) {
2070 tq->tq_kstat->ks_lock = &taskq_kstat_lock;
2071 tq->tq_kstat->ks_data = &taskq_kstat;
2072 tq->tq_kstat->ks_update = taskq_kstat_update;
2073 tq->tq_kstat->ks_private = tq;
2074 kstat_install(tq->tq_kstat);
2075 }
2076 }
2077
2078 return (tq);
2079 }
2080
2081 /*
2082 * taskq_destroy().
2083 *
2084 * Assumes: by the time taskq_destroy is called no one will use this task queue
2085 * in any way and no one will try to dispatch entries in it.
2086 */
2087 void
taskq_destroy(taskq_t * tq)2088 taskq_destroy(taskq_t *tq)
2089 {
2090 taskq_bucket_t *b = tq->tq_buckets;
2091 int bid = 0;
2092
2093 ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE));
2094
2095 /*
2096 * Destroy kstats.
2097 */
2098 if (tq->tq_kstat != NULL) {
2099 kstat_delete(tq->tq_kstat);
2100 tq->tq_kstat = NULL;
2101 }
2102
2103 /*
2104 * Destroy instance if needed.
2105 */
2106 if (tq->tq_flags & TASKQ_NOINSTANCE) {
2107 vmem_free(taskq_id_arena, (void *)(uintptr_t)(tq->tq_instance),
2108 1);
2109 tq->tq_instance = 0;
2110 }
2111
2112 /*
2113 * Unregister from the cpupct list.
2114 */
2115 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
2116 taskq_cpupct_remove(tq);
2117 }
2118
2119 /*
2120 * Wait for any pending entries to complete.
2121 */
2122 taskq_wait(tq);
2123
2124 mutex_enter(&tq->tq_lock);
2125 ASSERT((tq->tq_task.tqent_next == &tq->tq_task) &&
2126 (tq->tq_active == 0));
2127
2128 /* notify all the threads that they need to exit */
2129 tq->tq_nthreads_target = 0;
2130
2131 tq->tq_flags |= TASKQ_CHANGING;
2132 cv_broadcast(&tq->tq_dispatch_cv);
2133 cv_broadcast(&tq->tq_exit_cv);
2134
2135 while (tq->tq_nthreads != 0)
2136 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
2137
2138 if (tq->tq_nthreads_max != 1)
2139 kmem_free(tq->tq_threadlist, sizeof (kthread_t *) *
2140 tq->tq_nthreads_max);
2141
2142 tq->tq_minalloc = 0;
2143 while (tq->tq_nalloc != 0)
2144 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
2145
2146 mutex_exit(&tq->tq_lock);
2147
2148 /*
2149 * Mark each bucket as closing and wakeup all sleeping threads.
2150 */
2151 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2152 taskq_ent_t *tqe;
2153
2154 mutex_enter(&b->tqbucket_lock);
2155
2156 b->tqbucket_flags |= TQBUCKET_CLOSE;
2157 /* Wakeup all sleeping threads */
2158
2159 for (tqe = b->tqbucket_freelist.tqent_next;
2160 tqe != &b->tqbucket_freelist; tqe = tqe->tqent_next)
2161 cv_signal(&tqe->tqent_cv);
2162
2163 ASSERT(b->tqbucket_nalloc == 0);
2164
2165 /*
2166 * At this point we waited for all pending jobs to complete (in
2167 * both the task queue and the bucket and no new jobs should
2168 * arrive. Wait for all threads to die.
2169 */
2170 while (b->tqbucket_nfree > 0)
2171 cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
2172 mutex_exit(&b->tqbucket_lock);
2173 mutex_destroy(&b->tqbucket_lock);
2174 cv_destroy(&b->tqbucket_cv);
2175 }
2176
2177 if (tq->tq_buckets != NULL) {
2178 ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2179 kmem_free(tq->tq_buckets,
2180 sizeof (taskq_bucket_t) * tq->tq_nbuckets);
2181
2182 /* Cleanup fields before returning tq to the cache */
2183 tq->tq_buckets = NULL;
2184 tq->tq_tcreates = 0;
2185 tq->tq_tdeaths = 0;
2186 } else {
2187 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
2188 }
2189
2190 /*
2191 * Now that all the taskq threads are gone, we can
2192 * drop the zone hold taken in taskq_create_common
2193 */
2194 zone_rele(tq->tq_proc->p_zone);
2195
2196 tq->tq_threads_ncpus_pct = 0;
2197 tq->tq_totaltime = 0;
2198 tq->tq_tasks = 0;
2199 tq->tq_maxtasks = 0;
2200 tq->tq_executed = 0;
2201 kmem_cache_free(taskq_cache, tq);
2202 }
2203
2204 /*
2205 * Extend a bucket with a new entry on the free list and attach a worker thread
2206 * to it.
2207 *
2208 * Argument: pointer to the bucket.
2209 *
2210 * This function may quietly fail. It is only used by taskq_dispatch() which
2211 * handles such failures properly.
2212 */
2213 static void
taskq_bucket_extend(void * arg)2214 taskq_bucket_extend(void *arg)
2215 {
2216 taskq_ent_t *tqe;
2217 taskq_bucket_t *b = (taskq_bucket_t *)arg;
2218 taskq_t *tq = b->tqbucket_taskq;
2219 kthread_t *t;
2220 int nthreads;
2221
2222 mutex_enter(&tq->tq_lock);
2223
2224 if (! ENOUGH_MEMORY()) {
2225 tq->tq_nomem++;
2226 mutex_exit(&tq->tq_lock);
2227 return;
2228 }
2229
2230 /*
2231 * Observe global taskq limits on the number of threads.
2232 */
2233 if (tq->tq_tcreates++ - tq->tq_tdeaths > tq->tq_maxsize) {
2234 tq->tq_tcreates--;
2235 mutex_exit(&tq->tq_lock);
2236 return;
2237 }
2238 mutex_exit(&tq->tq_lock);
2239
2240 tqe = kmem_cache_alloc(taskq_ent_cache, KM_NOSLEEP);
2241
2242 if (tqe == NULL) {
2243 mutex_enter(&tq->tq_lock);
2244 tq->tq_nomem++;
2245 tq->tq_tcreates--;
2246 mutex_exit(&tq->tq_lock);
2247 return;
2248 }
2249
2250 ASSERT(tqe->tqent_thread == NULL);
2251
2252 tqe->tqent_un.tqent_bucket = b;
2253
2254 /*
2255 * Create a thread in a TS_STOPPED state first. If it is successfully
2256 * created, place the entry on the free list and start the thread.
2257 */
2258 if ((tq->tq_flags & TASKQ_THREADS_LWP) != 0) {
2259 /* Enforced in taskq_create_common */
2260 ASSERT3P(tq->tq_proc, !=, &p0);
2261 t = lwp_kernel_create(tq->tq_proc, taskq_d_thread,
2262 tqe, TS_STOPPED, tq->tq_pri);
2263 } else {
2264 t = thread_create(NULL, 0, taskq_d_thread, tqe,
2265 0, tq->tq_proc, TS_STOPPED, tq->tq_pri);
2266 }
2267 tqe->tqent_thread = t;
2268 t->t_taskq = tq; /* mark thread as a taskq_member() */
2269
2270 /*
2271 * Once the entry is ready, link it to the the bucket free list.
2272 */
2273 mutex_enter(&b->tqbucket_lock);
2274 tqe->tqent_func = NULL;
2275 TQ_APPEND(b->tqbucket_freelist, tqe);
2276 b->tqbucket_nfree++;
2277 TQ_STAT(b, tqs_tcreates);
2278
2279 #if TASKQ_STATISTIC
2280 nthreads = b->tqbucket_stat.tqs_tcreates -
2281 b->tqbucket_stat.tqs_tdeaths;
2282 b->tqbucket_stat.tqs_maxthreads = MAX(nthreads,
2283 b->tqbucket_stat.tqs_maxthreads);
2284 #endif
2285
2286 mutex_exit(&b->tqbucket_lock);
2287
2288 /*
2289 * Start the stopped thread.
2290 */
2291 if (t->t_lwp != NULL) {
2292 proc_t *p = tq->tq_proc;
2293 mutex_enter(&p->p_lock);
2294 t->t_proc_flag &= ~TP_HOLDLWP;
2295 lwp_create_done(t); /* Sets TS_ALLSTART etc. */
2296 mutex_exit(&p->p_lock);
2297 } else {
2298 thread_lock(t);
2299 t->t_schedflag |= TS_ALLSTART;
2300 setrun_locked(t);
2301 thread_unlock(t);
2302 }
2303 }
2304
2305 static int
taskq_kstat_update(kstat_t * ksp,int rw)2306 taskq_kstat_update(kstat_t *ksp, int rw)
2307 {
2308 struct taskq_kstat *tqsp = &taskq_kstat;
2309 taskq_t *tq = ksp->ks_private;
2310
2311 if (rw == KSTAT_WRITE)
2312 return (EACCES);
2313
2314 tqsp->tq_pid.value.ui64 = tq->tq_proc->p_pid;
2315 tqsp->tq_tasks.value.ui64 = tq->tq_tasks;
2316 tqsp->tq_executed.value.ui64 = tq->tq_executed;
2317 tqsp->tq_maxtasks.value.ui64 = tq->tq_maxtasks;
2318 tqsp->tq_totaltime.value.ui64 = tq->tq_totaltime;
2319 tqsp->tq_nactive.value.ui64 = tq->tq_active;
2320 tqsp->tq_nalloc.value.ui64 = tq->tq_nalloc;
2321 tqsp->tq_pri.value.ui64 = tq->tq_pri;
2322 tqsp->tq_nthreads.value.ui64 = tq->tq_nthreads;
2323 tqsp->tq_nomem.value.ui64 = tq->tq_nomem;
2324 return (0);
2325 }
2326
2327 static int
taskq_d_kstat_update(kstat_t * ksp,int rw)2328 taskq_d_kstat_update(kstat_t *ksp, int rw)
2329 {
2330 struct taskq_d_kstat *tqsp = &taskq_d_kstat;
2331 taskq_t *tq = ksp->ks_private;
2332 taskq_bucket_t *b = tq->tq_buckets;
2333 int bid = 0;
2334
2335 if (rw == KSTAT_WRITE)
2336 return (EACCES);
2337
2338 ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2339
2340 tqsp->tqd_btasks.value.ui64 = tq->tq_tasks;
2341 tqsp->tqd_bexecuted.value.ui64 = tq->tq_executed;
2342 tqsp->tqd_bmaxtasks.value.ui64 = tq->tq_maxtasks;
2343 tqsp->tqd_bnalloc.value.ui64 = tq->tq_nalloc;
2344 tqsp->tqd_bnactive.value.ui64 = tq->tq_active;
2345 tqsp->tqd_btotaltime.value.ui64 = tq->tq_totaltime;
2346 tqsp->tqd_pri.value.ui64 = tq->tq_pri;
2347 tqsp->tqd_nomem.value.ui64 = tq->tq_nomem;
2348
2349 tqsp->tqd_hits.value.ui64 = 0;
2350 tqsp->tqd_misses.value.ui64 = 0;
2351 tqsp->tqd_overflows.value.ui64 = 0;
2352 tqsp->tqd_tcreates.value.ui64 = 0;
2353 tqsp->tqd_tdeaths.value.ui64 = 0;
2354 tqsp->tqd_maxthreads.value.ui64 = 0;
2355 tqsp->tqd_nomem.value.ui64 = 0;
2356 tqsp->tqd_disptcreates.value.ui64 = 0;
2357 tqsp->tqd_totaltime.value.ui64 = 0;
2358 tqsp->tqd_nalloc.value.ui64 = 0;
2359 tqsp->tqd_nfree.value.ui64 = 0;
2360
2361 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2362 tqsp->tqd_hits.value.ui64 += b->tqbucket_stat.tqs_hits;
2363 tqsp->tqd_misses.value.ui64 += b->tqbucket_stat.tqs_misses;
2364 tqsp->tqd_overflows.value.ui64 += b->tqbucket_stat.tqs_overflow;
2365 tqsp->tqd_tcreates.value.ui64 += b->tqbucket_stat.tqs_tcreates;
2366 tqsp->tqd_tdeaths.value.ui64 += b->tqbucket_stat.tqs_tdeaths;
2367 tqsp->tqd_maxthreads.value.ui64 +=
2368 b->tqbucket_stat.tqs_maxthreads;
2369 tqsp->tqd_disptcreates.value.ui64 +=
2370 b->tqbucket_stat.tqs_disptcreates;
2371 tqsp->tqd_totaltime.value.ui64 += b->tqbucket_totaltime;
2372 tqsp->tqd_nalloc.value.ui64 += b->tqbucket_nalloc;
2373 tqsp->tqd_nfree.value.ui64 += b->tqbucket_nfree;
2374 }
2375 return (0);
2376 }
2377