xref: /illumos-gate/usr/src/uts/common/os/taskq.c (revision 8d0c3d29bb99f6521f2dc5058a7e4debebad7899)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Kernel task queues: general-purpose asynchronous task scheduling.
28  *
29  * A common problem in kernel programming is the need to schedule tasks
30  * to be performed later, by another thread. There are several reasons
31  * you may want or need to do this:
32  *
33  * (1) The task isn't time-critical, but your current code path is.
34  *
35  * (2) The task may require grabbing locks that you already hold.
36  *
37  * (3) The task may need to block (e.g. to wait for memory), but you
38  *     cannot block in your current context.
39  *
40  * (4) Your code path can't complete because of some condition, but you can't
41  *     sleep or fail, so you queue the task for later execution when condition
42  *     disappears.
43  *
44  * (5) You just want a simple way to launch multiple tasks in parallel.
45  *
46  * Task queues provide such a facility. In its simplest form (used when
47  * performance is not a critical consideration) a task queue consists of a
48  * single list of tasks, together with one or more threads to service the
49  * list. There are some cases when this simple queue is not sufficient:
50  *
51  * (1) The task queues are very hot and there is a need to avoid data and lock
52  *	contention over global resources.
53  *
54  * (2) Some tasks may depend on other tasks to complete, so they can't be put in
55  *	the same list managed by the same thread.
56  *
57  * (3) Some tasks may block for a long time, and this should not block other
58  *	tasks in the queue.
59  *
60  * To provide useful service in such cases we define a "dynamic task queue"
61  * which has an individual thread for each of the tasks. These threads are
62  * dynamically created as they are needed and destroyed when they are not in
63  * use. The API for managing task pools is the same as for managing task queues
64  * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
65  * dynamic task pool behavior is desired.
66  *
67  * Dynamic task queues may also place tasks in the normal queue (called "backing
68  * queue") when task pool runs out of resources. Users of task queues may
69  * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
70  * flags.
71  *
72  * The backing task queue is also used for scheduling internal tasks needed for
73  * dynamic task queue maintenance.
74  *
75  * INTERFACES ==================================================================
76  *
77  * taskq_t *taskq_create(name, nthreads, pri, minalloc, maxall, flags);
78  *
79  *	Create a taskq with specified properties.
80  *	Possible 'flags':
81  *
82  *	  TASKQ_DYNAMIC: Create task pool for task management. If this flag is
83  *		specified, 'nthreads' specifies the maximum number of threads in
84  *		the task queue. Task execution order for dynamic task queues is
85  *		not predictable.
86  *
87  *		If this flag is not specified (default case) a
88  *		single-list task queue is created with 'nthreads' threads
89  *		servicing it. Entries in this queue are managed by
90  *		taskq_ent_alloc() and taskq_ent_free() which try to keep the
91  *		task population between 'minalloc' and 'maxalloc', but the
92  *		latter limit is only advisory for TQ_SLEEP dispatches and the
93  *		former limit is only advisory for TQ_NOALLOC dispatches. If
94  *		TASKQ_PREPOPULATE is set in 'flags', the taskq will be
95  *		prepopulated with 'minalloc' task structures.
96  *
97  *		Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
98  *		executed in the order they are scheduled if nthreads == 1.
99  *		If nthreads > 1, task execution order is not predictable.
100  *
101  *	  TASKQ_PREPOPULATE: Prepopulate task queue with threads.
102  *		Also prepopulate the task queue with 'minalloc' task structures.
103  *
104  *	  TASKQ_THREADS_CPU_PCT: This flag specifies that 'nthreads' should be
105  *		interpreted as a percentage of the # of online CPUs on the
106  *		system.  The taskq subsystem will automatically adjust the
107  *		number of threads in the taskq in response to CPU online
108  *		and offline events, to keep the ratio.  nthreads must be in
109  *		the range [0,100].
110  *
111  *		The calculation used is:
112  *
113  *			MAX((ncpus_online * percentage)/100, 1)
114  *
115  *		This flag is not supported for DYNAMIC task queues.
116  *		This flag is not compatible with TASKQ_CPR_SAFE.
117  *
118  *	  TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
119  *		use their own protocol for handling CPR issues. This flag is not
120  *		supported for DYNAMIC task queues.  This flag is not compatible
121  *		with TASKQ_THREADS_CPU_PCT.
122  *
123  *	The 'pri' field specifies the default priority for the threads that
124  *	service all scheduled tasks.
125  *
126  * taskq_t *taskq_create_instance(name, instance, nthreads, pri, minalloc,
127  *    maxall, flags);
128  *
129  *	Like taskq_create(), but takes an instance number (or -1 to indicate
130  *	no instance).
131  *
132  * taskq_t *taskq_create_proc(name, nthreads, pri, minalloc, maxall, proc,
133  *    flags);
134  *
135  *	Like taskq_create(), but creates the taskq threads in the specified
136  *	system process.  If proc != &p0, this must be called from a thread
137  *	in that process.
138  *
139  * taskq_t *taskq_create_sysdc(name, nthreads, minalloc, maxall, proc,
140  *    dc, flags);
141  *
142  *	Like taskq_create_proc(), but the taskq threads will use the
143  *	System Duty Cycle (SDC) scheduling class with a duty cycle of dc.
144  *
145  * void taskq_destroy(tap):
146  *
147  *	Waits for any scheduled tasks to complete, then destroys the taskq.
148  *	Caller should guarantee that no new tasks are scheduled in the closing
149  *	taskq.
150  *
151  * taskqid_t taskq_dispatch(tq, func, arg, flags):
152  *
153  *	Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
154  *	the caller is willing to block for memory.  The function returns an
155  *	opaque value which is zero iff dispatch fails.  If flags is TQ_NOSLEEP
156  *	or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
157  *	and returns (taskqid_t)0.
158  *
159  *	ASSUMES: func != NULL.
160  *
161  *	Possible flags:
162  *	  TQ_NOSLEEP: Do not wait for resources; may fail.
163  *
164  *	  TQ_NOALLOC: Do not allocate memory; may fail.  May only be used with
165  *		non-dynamic task queues.
166  *
167  *	  TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
168  *		lack of available resources and fail. If this flag is not
169  *		set, and the task pool is exhausted, the task may be scheduled
170  *		in the backing queue. This flag may ONLY be used with dynamic
171  *		task queues.
172  *
173  *		NOTE: This flag should always be used when a task queue is used
174  *		for tasks that may depend on each other for completion.
175  *		Enqueueing dependent tasks may create deadlocks.
176  *
177  *	  TQ_SLEEP:   May block waiting for resources. May still fail for
178  *		dynamic task queues if TQ_NOQUEUE is also specified, otherwise
179  *		always succeed.
180  *
181  *	  TQ_FRONT:   Puts the new task at the front of the queue.  Be careful.
182  *
183  *	NOTE: Dynamic task queues are much more likely to fail in
184  *		taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
185  *		is important to have backup strategies handling such failures.
186  *
187  * void taskq_wait(tq):
188  *
189  *	Waits for all previously scheduled tasks to complete.
190  *
191  *	NOTE: It does not stop any new task dispatches.
192  *	      Do NOT call taskq_wait() from a task: it will cause deadlock.
193  *
194  * void taskq_suspend(tq)
195  *
196  *	Suspend all task execution. Tasks already scheduled for a dynamic task
197  *	queue will still be executed, but all new scheduled tasks will be
198  *	suspended until taskq_resume() is called.
199  *
200  * int  taskq_suspended(tq)
201  *
202  *	Returns 1 if taskq is suspended and 0 otherwise. It is intended to
203  *	ASSERT that the task queue is suspended.
204  *
205  * void taskq_resume(tq)
206  *
207  *	Resume task queue execution.
208  *
209  * int  taskq_member(tq, thread)
210  *
211  *	Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
212  *	intended use is to ASSERT that a given function is called in taskq
213  *	context only.
214  *
215  * system_taskq
216  *
217  *	Global system-wide dynamic task queue for common uses. It may be used by
218  *	any subsystem that needs to schedule tasks and does not need to manage
219  *	its own task queues. It is initialized quite early during system boot.
220  *
221  * IMPLEMENTATION ==============================================================
222  *
223  * This is schematic representation of the task queue structures.
224  *
225  *   taskq:
226  *   +-------------+
227  *   | tq_lock     | +---< taskq_ent_free()
228  *   +-------------+ |
229  *   |...          | | tqent:                  tqent:
230  *   +-------------+ | +------------+          +------------+
231  *   | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
232  *   +-------------+   +------------+          +------------+
233  *   |...          |   | ...        |          | ...        |
234  *   +-------------+   +------------+          +------------+
235  *   | tq_task     |    |
236  *   |             |    +-------------->taskq_ent_alloc()
237  * +--------------------------------------------------------------------------+
238  * | |                     |            tqent                   tqent         |
239  * | +---------------------+     +--> +------------+     +--> +------------+  |
240  * | | ...		   |     |    | func, arg  |     |    | func, arg  |  |
241  * +>+---------------------+ <---|-+  +------------+ <---|-+  +------------+  |
242  *   | tq_taskq.tqent_next | ----+ |  | tqent_next | --->+ |  | tqent_next |--+
243  *   +---------------------+	   |  +------------+     ^ |  +------------+
244  * +-| tq_task.tqent_prev  |	   +--| tqent_prev |     | +--| tqent_prev |  ^
245  * | +---------------------+	      +------------+     |    +------------+  |
246  * | |...		   |	      | ...        |     |    | ...        |  |
247  * | +---------------------+	      +------------+     |    +------------+  |
248  * |                                      ^              |                    |
249  * |                                      |              |                    |
250  * +--------------------------------------+--------------+       TQ_APPEND() -+
251  *   |             |                      |
252  *   |...          |   taskq_thread()-----+
253  *   +-------------+
254  *   | tq_buckets  |--+-------> [ NULL ] (for regular task queues)
255  *   +-------------+  |
256  *                    |   DYNAMIC TASK QUEUES:
257  *                    |
258  *                    +-> taskq_bucket[nCPU]		taskq_bucket_dispatch()
259  *                        +-------------------+                    ^
260  *                   +--->| tqbucket_lock     |                    |
261  *                   |    +-------------------+   +--------+      +--------+
262  *                   |    | tqbucket_freelist |-->| tqent  |-->...| tqent  | ^
263  *                   |    +-------------------+<--+--------+<--...+--------+ |
264  *                   |    | ...               |   | thread |      | thread | |
265  *                   |    +-------------------+   +--------+      +--------+ |
266  *                   |    +-------------------+                              |
267  * taskq_dispatch()--+--->| tqbucket_lock     |             TQ_APPEND()------+
268  *      TQ_HASH()    |    +-------------------+   +--------+      +--------+
269  *                   |    | tqbucket_freelist |-->| tqent  |-->...| tqent  |
270  *                   |    +-------------------+<--+--------+<--...+--------+
271  *                   |    | ...               |   | thread |      | thread |
272  *                   |    +-------------------+   +--------+      +--------+
273  *		     +--->	...
274  *
275  *
276  * Task queues use tq_task field to link new entry in the queue. The queue is a
277  * circular doubly-linked list. Entries are put in the end of the list with
278  * TQ_APPEND() and processed from the front of the list by taskq_thread() in
279  * FIFO order. Task queue entries are cached in the free list managed by
280  * taskq_ent_alloc() and taskq_ent_free() functions.
281  *
282  *	All threads used by task queues mark t_taskq field of the thread to
283  *	point to the task queue.
284  *
285  * Taskq Thread Management -----------------------------------------------------
286  *
287  * Taskq's non-dynamic threads are managed with several variables and flags:
288  *
289  *	* tq_nthreads	- The number of threads in taskq_thread() for the
290  *			  taskq.
291  *
292  *	* tq_active	- The number of threads not waiting on a CV in
293  *			  taskq_thread(); includes newly created threads
294  *			  not yet counted in tq_nthreads.
295  *
296  *	* tq_nthreads_target
297  *			- The number of threads desired for the taskq.
298  *
299  *	* tq_flags & TASKQ_CHANGING
300  *			- Indicates that tq_nthreads != tq_nthreads_target.
301  *
302  *	* tq_flags & TASKQ_THREAD_CREATED
303  *			- Indicates that a thread is being created in the taskq.
304  *
305  * During creation, tq_nthreads and tq_active are set to 0, and
306  * tq_nthreads_target is set to the number of threads desired.  The
307  * TASKQ_CHANGING flag is set, and taskq_thread_create() is called to
308  * create the first thread. taskq_thread_create() increments tq_active,
309  * sets TASKQ_THREAD_CREATED, and creates the new thread.
310  *
311  * Each thread starts in taskq_thread(), clears the TASKQ_THREAD_CREATED
312  * flag, and increments tq_nthreads.  It stores the new value of
313  * tq_nthreads as its "thread_id", and stores its thread pointer in the
314  * tq_threadlist at the (thread_id - 1).  We keep the thread_id space
315  * densely packed by requiring that only the largest thread_id can exit during
316  * normal adjustment.   The exception is during the destruction of the
317  * taskq; once tq_nthreads_target is set to zero, no new threads will be created
318  * for the taskq queue, so every thread can exit without any ordering being
319  * necessary.
320  *
321  * Threads will only process work if their thread id is <= tq_nthreads_target.
322  *
323  * When TASKQ_CHANGING is set, threads will check the current thread target
324  * whenever they wake up, and do whatever they can to apply its effects.
325  *
326  * TASKQ_THREAD_CPU_PCT --------------------------------------------------------
327  *
328  * When a taskq is created with TASKQ_THREAD_CPU_PCT, we store their requested
329  * percentage in tq_threads_ncpus_pct, start them off with the correct thread
330  * target, and add them to the taskq_cpupct_list for later adjustment.
331  *
332  * We register taskq_cpu_setup() to be called whenever a CPU changes state.  It
333  * walks the list of TASKQ_THREAD_CPU_PCT taskqs, adjusts their nthread_target
334  * if need be, and wakes up all of the threads to process the change.
335  *
336  * Dynamic Task Queues Implementation ------------------------------------------
337  *
338  * For a dynamic task queues there is a 1-to-1 mapping between a thread and
339  * taskq_ent_structure. Each entry is serviced by its own thread and each thread
340  * is controlled by a single entry.
341  *
342  * Entries are distributed over a set of buckets. To avoid using modulo
343  * arithmetics the number of buckets is 2^n and is determined as the nearest
344  * power of two roundown of the number of CPUs in the system. Tunable
345  * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
346  * is attached to a bucket for its lifetime and can't migrate to other buckets.
347  *
348  * Entries that have scheduled tasks are not placed in any list. The dispatch
349  * function sets their "func" and "arg" fields and signals the corresponding
350  * thread to execute the task. Once the thread executes the task it clears the
351  * "func" field and places an entry on the bucket cache of free entries pointed
352  * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
353  * field equal to NULL. The free list is a circular doubly-linked list identical
354  * in structure to the tq_task list above, but entries are taken from it in LIFO
355  * order - the last freed entry is the first to be allocated. The
356  * taskq_bucket_dispatch() function gets the most recently used entry from the
357  * free list, sets its "func" and "arg" fields and signals a worker thread.
358  *
359  * After executing each task a per-entry thread taskq_d_thread() places its
360  * entry on the bucket free list and goes to a timed sleep. If it wakes up
361  * without getting new task it removes the entry from the free list and destroys
362  * itself. The thread sleep time is controlled by a tunable variable
363  * `taskq_thread_timeout'.
364  *
365  * There are various statistics kept in the bucket which allows for later
366  * analysis of taskq usage patterns. Also, a global copy of taskq creation and
367  * death statistics is kept in the global taskq data structure. Since thread
368  * creation and death happen rarely, updating such global data does not present
369  * a performance problem.
370  *
371  * NOTE: Threads are not bound to any CPU and there is absolutely no association
372  *       between the bucket and actual thread CPU, so buckets are used only to
373  *	 split resources and reduce resource contention. Having threads attached
374  *	 to the CPU denoted by a bucket may reduce number of times the job
375  *	 switches between CPUs.
376  *
377  *	 Current algorithm creates a thread whenever a bucket has no free
378  *	 entries. It would be nice to know how many threads are in the running
379  *	 state and don't create threads if all CPUs are busy with existing
380  *	 tasks, but it is unclear how such strategy can be implemented.
381  *
382  *	 Currently buckets are created statically as an array attached to task
383  *	 queue. On some system with nCPUs < max_ncpus it may waste system
384  *	 memory. One solution may be allocation of buckets when they are first
385  *	 touched, but it is not clear how useful it is.
386  *
387  * SUSPEND/RESUME implementation -----------------------------------------------
388  *
389  *	Before executing a task taskq_thread() (executing non-dynamic task
390  *	queues) obtains taskq's thread lock as a reader. The taskq_suspend()
391  *	function gets the same lock as a writer blocking all non-dynamic task
392  *	execution. The taskq_resume() function releases the lock allowing
393  *	taskq_thread to continue execution.
394  *
395  *	For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
396  *	taskq_suspend() function. After that taskq_bucket_dispatch() always
397  *	fails, so that taskq_dispatch() will either enqueue tasks for a
398  *	suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
399  *	flags.
400  *
401  *	NOTE: taskq_suspend() does not immediately block any tasks already
402  *	      scheduled for dynamic task queues. It only suspends new tasks
403  *	      scheduled after taskq_suspend() was called.
404  *
405  *	taskq_member() function works by comparing a thread t_taskq pointer with
406  *	the passed thread pointer.
407  *
408  * LOCKS and LOCK Hierarchy ----------------------------------------------------
409  *
410  *   There are three locks used in task queues:
411  *
412  *   1) The taskq_t's tq_lock, protecting global task queue state.
413  *
414  *   2) Each per-CPU bucket has a lock for bucket management.
415  *
416  *   3) The global taskq_cpupct_lock, which protects the list of
417  *      TASKQ_THREADS_CPU_PCT taskqs.
418  *
419  *   If both (1) and (2) are needed, tq_lock should be taken *after* the bucket
420  *   lock.
421  *
422  *   If both (1) and (3) are needed, tq_lock should be taken *after*
423  *   taskq_cpupct_lock.
424  *
425  * DEBUG FACILITIES ------------------------------------------------------------
426  *
427  * For DEBUG kernels it is possible to induce random failures to
428  * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
429  * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
430  * failures for dynamic and static task queues respectively.
431  *
432  * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
433  *
434  * TUNABLES --------------------------------------------------------------------
435  *
436  *	system_taskq_size	- Size of the global system_taskq.
437  *				  This value is multiplied by nCPUs to determine
438  *				  actual size.
439  *				  Default value: 64
440  *
441  *	taskq_minimum_nthreads_max
442  *				- Minimum size of the thread list for a taskq.
443  *				  Useful for testing different thread pool
444  *				  sizes by overwriting tq_nthreads_target.
445  *
446  *	taskq_thread_timeout	- Maximum idle time for taskq_d_thread()
447  *				  Default value: 5 minutes
448  *
449  *	taskq_maxbuckets	- Maximum number of buckets in any task queue
450  *				  Default value: 128
451  *
452  *	taskq_search_depth	- Maximum # of buckets searched for a free entry
453  *				  Default value: 4
454  *
455  *	taskq_dmtbf		- Mean time between induced dispatch failures
456  *				  for dynamic task queues.
457  *				  Default value: UINT_MAX (no induced failures)
458  *
459  *	taskq_smtbf		- Mean time between induced dispatch failures
460  *				  for static task queues.
461  *				  Default value: UINT_MAX (no induced failures)
462  *
463  * CONDITIONAL compilation -----------------------------------------------------
464  *
465  *    TASKQ_STATISTIC	- If set will enable bucket statistic (default).
466  *
467  */
468 
469 #include <sys/taskq_impl.h>
470 #include <sys/thread.h>
471 #include <sys/proc.h>
472 #include <sys/kmem.h>
473 #include <sys/vmem.h>
474 #include <sys/callb.h>
475 #include <sys/class.h>
476 #include <sys/systm.h>
477 #include <sys/cmn_err.h>
478 #include <sys/debug.h>
479 #include <sys/vmsystm.h>	/* For throttlefree */
480 #include <sys/sysmacros.h>
481 #include <sys/cpuvar.h>
482 #include <sys/cpupart.h>
483 #include <sys/sdt.h>
484 #include <sys/sysdc.h>
485 #include <sys/note.h>
486 
487 static kmem_cache_t *taskq_ent_cache, *taskq_cache;
488 
489 /*
490  * Pseudo instance numbers for taskqs without explicitly provided instance.
491  */
492 static vmem_t *taskq_id_arena;
493 
494 /* Global system task queue for common use */
495 taskq_t	*system_taskq;
496 
497 /*
498  * Maximum number of entries in global system taskq is
499  *	system_taskq_size * max_ncpus
500  */
501 #define	SYSTEM_TASKQ_SIZE 64
502 int system_taskq_size = SYSTEM_TASKQ_SIZE;
503 
504 /*
505  * Minimum size for tq_nthreads_max; useful for those who want to play around
506  * with increasing a taskq's tq_nthreads_target.
507  */
508 int taskq_minimum_nthreads_max = 1;
509 
510 /*
511  * We want to ensure that when taskq_create() returns, there is at least
512  * one thread ready to handle requests.  To guarantee this, we have to wait
513  * for the second thread, since the first one cannot process requests until
514  * the second thread has been created.
515  */
516 #define	TASKQ_CREATE_ACTIVE_THREADS	2
517 
518 /* Maximum percentage allowed for TASKQ_THREADS_CPU_PCT */
519 #define	TASKQ_CPUPCT_MAX_PERCENT	1000
520 int taskq_cpupct_max_percent = TASKQ_CPUPCT_MAX_PERCENT;
521 
522 /*
523  * Dynamic task queue threads that don't get any work within
524  * taskq_thread_timeout destroy themselves
525  */
526 #define	TASKQ_THREAD_TIMEOUT (60 * 5)
527 int taskq_thread_timeout = TASKQ_THREAD_TIMEOUT;
528 
529 #define	TASKQ_MAXBUCKETS 128
530 int taskq_maxbuckets = TASKQ_MAXBUCKETS;
531 
532 /*
533  * When a bucket has no available entries another buckets are tried.
534  * taskq_search_depth parameter limits the amount of buckets that we search
535  * before failing. This is mostly useful in systems with many CPUs where we may
536  * spend too much time scanning busy buckets.
537  */
538 #define	TASKQ_SEARCH_DEPTH 4
539 int taskq_search_depth = TASKQ_SEARCH_DEPTH;
540 
541 /*
542  * Hashing function: mix various bits of x. May be pretty much anything.
543  */
544 #define	TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
545 
546 /*
547  * We do not create any new threads when the system is low on memory and start
548  * throttling memory allocations. The following macro tries to estimate such
549  * condition.
550  */
551 #define	ENOUGH_MEMORY() (freemem > throttlefree)
552 
553 /*
554  * Static functions.
555  */
556 static taskq_t	*taskq_create_common(const char *, int, int, pri_t, int,
557     int, proc_t *, uint_t, uint_t);
558 static void taskq_thread(void *);
559 static void taskq_d_thread(taskq_ent_t *);
560 static void taskq_bucket_extend(void *);
561 static int  taskq_constructor(void *, void *, int);
562 static void taskq_destructor(void *, void *);
563 static int  taskq_ent_constructor(void *, void *, int);
564 static void taskq_ent_destructor(void *, void *);
565 static taskq_ent_t *taskq_ent_alloc(taskq_t *, int);
566 static void taskq_ent_free(taskq_t *, taskq_ent_t *);
567 static int taskq_ent_exists(taskq_t *, task_func_t, void *);
568 static taskq_ent_t *taskq_bucket_dispatch(taskq_bucket_t *, task_func_t,
569     void *);
570 
571 /*
572  * Task queues kstats.
573  */
574 struct taskq_kstat {
575 	kstat_named_t	tq_pid;
576 	kstat_named_t	tq_tasks;
577 	kstat_named_t	tq_executed;
578 	kstat_named_t	tq_maxtasks;
579 	kstat_named_t	tq_totaltime;
580 	kstat_named_t	tq_nalloc;
581 	kstat_named_t	tq_nactive;
582 	kstat_named_t	tq_pri;
583 	kstat_named_t	tq_nthreads;
584 } taskq_kstat = {
585 	{ "pid",		KSTAT_DATA_UINT64 },
586 	{ "tasks",		KSTAT_DATA_UINT64 },
587 	{ "executed",		KSTAT_DATA_UINT64 },
588 	{ "maxtasks",		KSTAT_DATA_UINT64 },
589 	{ "totaltime",		KSTAT_DATA_UINT64 },
590 	{ "nactive",		KSTAT_DATA_UINT64 },
591 	{ "nalloc",		KSTAT_DATA_UINT64 },
592 	{ "priority",		KSTAT_DATA_UINT64 },
593 	{ "threads",		KSTAT_DATA_UINT64 },
594 };
595 
596 struct taskq_d_kstat {
597 	kstat_named_t	tqd_pri;
598 	kstat_named_t	tqd_btasks;
599 	kstat_named_t	tqd_bexecuted;
600 	kstat_named_t	tqd_bmaxtasks;
601 	kstat_named_t	tqd_bnalloc;
602 	kstat_named_t	tqd_bnactive;
603 	kstat_named_t	tqd_btotaltime;
604 	kstat_named_t	tqd_hits;
605 	kstat_named_t	tqd_misses;
606 	kstat_named_t	tqd_overflows;
607 	kstat_named_t	tqd_tcreates;
608 	kstat_named_t	tqd_tdeaths;
609 	kstat_named_t	tqd_maxthreads;
610 	kstat_named_t	tqd_nomem;
611 	kstat_named_t	tqd_disptcreates;
612 	kstat_named_t	tqd_totaltime;
613 	kstat_named_t	tqd_nalloc;
614 	kstat_named_t	tqd_nfree;
615 } taskq_d_kstat = {
616 	{ "priority",		KSTAT_DATA_UINT64 },
617 	{ "btasks",		KSTAT_DATA_UINT64 },
618 	{ "bexecuted",		KSTAT_DATA_UINT64 },
619 	{ "bmaxtasks",		KSTAT_DATA_UINT64 },
620 	{ "bnalloc",		KSTAT_DATA_UINT64 },
621 	{ "bnactive",		KSTAT_DATA_UINT64 },
622 	{ "btotaltime",		KSTAT_DATA_UINT64 },
623 	{ "hits",		KSTAT_DATA_UINT64 },
624 	{ "misses",		KSTAT_DATA_UINT64 },
625 	{ "overflows",		KSTAT_DATA_UINT64 },
626 	{ "tcreates",		KSTAT_DATA_UINT64 },
627 	{ "tdeaths",		KSTAT_DATA_UINT64 },
628 	{ "maxthreads",		KSTAT_DATA_UINT64 },
629 	{ "nomem",		KSTAT_DATA_UINT64 },
630 	{ "disptcreates",	KSTAT_DATA_UINT64 },
631 	{ "totaltime",		KSTAT_DATA_UINT64 },
632 	{ "nalloc",		KSTAT_DATA_UINT64 },
633 	{ "nfree",		KSTAT_DATA_UINT64 },
634 };
635 
636 static kmutex_t taskq_kstat_lock;
637 static kmutex_t taskq_d_kstat_lock;
638 static int taskq_kstat_update(kstat_t *, int);
639 static int taskq_d_kstat_update(kstat_t *, int);
640 
641 /*
642  * List of all TASKQ_THREADS_CPU_PCT taskqs.
643  */
644 static list_t taskq_cpupct_list;	/* protected by cpu_lock */
645 
646 /*
647  * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
648  */
649 #define	TASKQ_STATISTIC 1
650 
651 #if TASKQ_STATISTIC
652 #define	TQ_STAT(b, x)	b->tqbucket_stat.x++
653 #else
654 #define	TQ_STAT(b, x)
655 #endif
656 
657 /*
658  * Random fault injection.
659  */
660 uint_t taskq_random;
661 uint_t taskq_dmtbf = UINT_MAX;    /* mean time between injected failures */
662 uint_t taskq_smtbf = UINT_MAX;    /* mean time between injected failures */
663 
664 /*
665  * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
666  *
667  * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
668  * they could prepopulate the cache and make sure that they do not use more
669  * then minalloc entries.  So, fault injection in this case insures that
670  * either TASKQ_PREPOPULATE is not set or there are more entries allocated
671  * than is specified by minalloc.  TQ_NOALLOC dispatches are always allowed
672  * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
673  * dispatches.
674  */
675 #ifdef DEBUG
676 #define	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)		\
677 	taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
678 	if ((flag & TQ_NOSLEEP) &&				\
679 	    taskq_random < 1771875 / taskq_dmtbf) {		\
680 		return (NULL);					\
681 	}
682 
683 #define	TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)		\
684 	taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
685 	if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) &&		\
686 	    (!(tq->tq_flags & TASKQ_PREPOPULATE) ||		\
687 	    (tq->tq_nalloc > tq->tq_minalloc)) &&		\
688 	    (taskq_random < (1771875 / taskq_smtbf))) {		\
689 		mutex_exit(&tq->tq_lock);			\
690 		return (NULL);					\
691 	}
692 #else
693 #define	TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
694 #define	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
695 #endif
696 
697 #define	IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) &&	\
698 	((l).tqent_prev == &(l)))
699 
700 /*
701  * Append `tqe' in the end of the doubly-linked list denoted by l.
702  */
703 #define	TQ_APPEND(l, tqe) {					\
704 	tqe->tqent_next = &l;					\
705 	tqe->tqent_prev = l.tqent_prev;				\
706 	tqe->tqent_next->tqent_prev = tqe;			\
707 	tqe->tqent_prev->tqent_next = tqe;			\
708 }
709 /*
710  * Prepend 'tqe' to the beginning of l
711  */
712 #define	TQ_PREPEND(l, tqe) {					\
713 	tqe->tqent_next = l.tqent_next;				\
714 	tqe->tqent_prev = &l;					\
715 	tqe->tqent_next->tqent_prev = tqe;			\
716 	tqe->tqent_prev->tqent_next = tqe;			\
717 }
718 
719 /*
720  * Schedule a task specified by func and arg into the task queue entry tqe.
721  */
722 #define	TQ_DO_ENQUEUE(tq, tqe, func, arg, front) {			\
723 	ASSERT(MUTEX_HELD(&tq->tq_lock));				\
724 	_NOTE(CONSTCOND)						\
725 	if (front) {							\
726 		TQ_PREPEND(tq->tq_task, tqe);				\
727 	} else {							\
728 		TQ_APPEND(tq->tq_task, tqe);				\
729 	}								\
730 	tqe->tqent_func = (func);					\
731 	tqe->tqent_arg = (arg);						\
732 	tq->tq_tasks++;							\
733 	if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks)		\
734 		tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed;	\
735 	cv_signal(&tq->tq_dispatch_cv);					\
736 	DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
737 }
738 
739 #define	TQ_ENQUEUE(tq, tqe, func, arg)					\
740 	TQ_DO_ENQUEUE(tq, tqe, func, arg, 0)
741 
742 #define	TQ_ENQUEUE_FRONT(tq, tqe, func, arg)				\
743 	TQ_DO_ENQUEUE(tq, tqe, func, arg, 1)
744 
745 /*
746  * Do-nothing task which may be used to prepopulate thread caches.
747  */
748 /*ARGSUSED*/
749 void
750 nulltask(void *unused)
751 {
752 }
753 
754 /*ARGSUSED*/
755 static int
756 taskq_constructor(void *buf, void *cdrarg, int kmflags)
757 {
758 	taskq_t *tq = buf;
759 
760 	bzero(tq, sizeof (taskq_t));
761 
762 	mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
763 	rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
764 	cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
765 	cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL);
766 	cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
767 	cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
768 
769 	tq->tq_task.tqent_next = &tq->tq_task;
770 	tq->tq_task.tqent_prev = &tq->tq_task;
771 
772 	return (0);
773 }
774 
775 /*ARGSUSED*/
776 static void
777 taskq_destructor(void *buf, void *cdrarg)
778 {
779 	taskq_t *tq = buf;
780 
781 	ASSERT(tq->tq_nthreads == 0);
782 	ASSERT(tq->tq_buckets == NULL);
783 	ASSERT(tq->tq_tcreates == 0);
784 	ASSERT(tq->tq_tdeaths == 0);
785 
786 	mutex_destroy(&tq->tq_lock);
787 	rw_destroy(&tq->tq_threadlock);
788 	cv_destroy(&tq->tq_dispatch_cv);
789 	cv_destroy(&tq->tq_exit_cv);
790 	cv_destroy(&tq->tq_wait_cv);
791 	cv_destroy(&tq->tq_maxalloc_cv);
792 }
793 
794 /*ARGSUSED*/
795 static int
796 taskq_ent_constructor(void *buf, void *cdrarg, int kmflags)
797 {
798 	taskq_ent_t *tqe = buf;
799 
800 	tqe->tqent_thread = NULL;
801 	cv_init(&tqe->tqent_cv, NULL, CV_DEFAULT, NULL);
802 
803 	return (0);
804 }
805 
806 /*ARGSUSED*/
807 static void
808 taskq_ent_destructor(void *buf, void *cdrarg)
809 {
810 	taskq_ent_t *tqe = buf;
811 
812 	ASSERT(tqe->tqent_thread == NULL);
813 	cv_destroy(&tqe->tqent_cv);
814 }
815 
816 void
817 taskq_init(void)
818 {
819 	taskq_ent_cache = kmem_cache_create("taskq_ent_cache",
820 	    sizeof (taskq_ent_t), 0, taskq_ent_constructor,
821 	    taskq_ent_destructor, NULL, NULL, NULL, 0);
822 	taskq_cache = kmem_cache_create("taskq_cache", sizeof (taskq_t),
823 	    0, taskq_constructor, taskq_destructor, NULL, NULL, NULL, 0);
824 	taskq_id_arena = vmem_create("taskq_id_arena",
825 	    (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0,
826 	    VM_SLEEP | VMC_IDENTIFIER);
827 
828 	list_create(&taskq_cpupct_list, sizeof (taskq_t),
829 	    offsetof(taskq_t, tq_cpupct_link));
830 }
831 
832 static void
833 taskq_update_nthreads(taskq_t *tq, uint_t ncpus)
834 {
835 	uint_t newtarget = TASKQ_THREADS_PCT(ncpus, tq->tq_threads_ncpus_pct);
836 
837 	ASSERT(MUTEX_HELD(&cpu_lock));
838 	ASSERT(MUTEX_HELD(&tq->tq_lock));
839 
840 	/* We must be going from non-zero to non-zero; no exiting. */
841 	ASSERT3U(tq->tq_nthreads_target, !=, 0);
842 	ASSERT3U(newtarget, !=, 0);
843 
844 	ASSERT3U(newtarget, <=, tq->tq_nthreads_max);
845 	if (newtarget != tq->tq_nthreads_target) {
846 		tq->tq_flags |= TASKQ_CHANGING;
847 		tq->tq_nthreads_target = newtarget;
848 		cv_broadcast(&tq->tq_dispatch_cv);
849 		cv_broadcast(&tq->tq_exit_cv);
850 	}
851 }
852 
853 /* called during task queue creation */
854 static void
855 taskq_cpupct_install(taskq_t *tq, cpupart_t *cpup)
856 {
857 	ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
858 
859 	mutex_enter(&cpu_lock);
860 	mutex_enter(&tq->tq_lock);
861 	tq->tq_cpupart = cpup->cp_id;
862 	taskq_update_nthreads(tq, cpup->cp_ncpus);
863 	mutex_exit(&tq->tq_lock);
864 
865 	list_insert_tail(&taskq_cpupct_list, tq);
866 	mutex_exit(&cpu_lock);
867 }
868 
869 static void
870 taskq_cpupct_remove(taskq_t *tq)
871 {
872 	ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
873 
874 	mutex_enter(&cpu_lock);
875 	list_remove(&taskq_cpupct_list, tq);
876 	mutex_exit(&cpu_lock);
877 }
878 
879 /*ARGSUSED*/
880 static int
881 taskq_cpu_setup(cpu_setup_t what, int id, void *arg)
882 {
883 	taskq_t *tq;
884 	cpupart_t *cp = cpu[id]->cpu_part;
885 	uint_t ncpus = cp->cp_ncpus;
886 
887 	ASSERT(MUTEX_HELD(&cpu_lock));
888 	ASSERT(ncpus > 0);
889 
890 	switch (what) {
891 	case CPU_OFF:
892 	case CPU_CPUPART_OUT:
893 		/* offlines are called *before* the cpu is offlined. */
894 		if (ncpus > 1)
895 			ncpus--;
896 		break;
897 
898 	case CPU_ON:
899 	case CPU_CPUPART_IN:
900 		break;
901 
902 	default:
903 		return (0);		/* doesn't affect cpu count */
904 	}
905 
906 	for (tq = list_head(&taskq_cpupct_list); tq != NULL;
907 	    tq = list_next(&taskq_cpupct_list, tq)) {
908 
909 		mutex_enter(&tq->tq_lock);
910 		/*
911 		 * If the taskq is part of the cpuset which is changing,
912 		 * update its nthreads_target.
913 		 */
914 		if (tq->tq_cpupart == cp->cp_id) {
915 			taskq_update_nthreads(tq, ncpus);
916 		}
917 		mutex_exit(&tq->tq_lock);
918 	}
919 	return (0);
920 }
921 
922 void
923 taskq_mp_init(void)
924 {
925 	mutex_enter(&cpu_lock);
926 	register_cpu_setup_func(taskq_cpu_setup, NULL);
927 	/*
928 	 * Make sure we're up to date.  At this point in boot, there is only
929 	 * one processor set, so we only have to update the current CPU.
930 	 */
931 	(void) taskq_cpu_setup(CPU_ON, CPU->cpu_id, NULL);
932 	mutex_exit(&cpu_lock);
933 }
934 
935 /*
936  * Create global system dynamic task queue.
937  */
938 void
939 system_taskq_init(void)
940 {
941 	system_taskq = taskq_create_common("system_taskq", 0,
942 	    system_taskq_size * max_ncpus, minclsyspri, 4, 512, &p0, 0,
943 	    TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
944 }
945 
946 /*
947  * taskq_ent_alloc()
948  *
949  * Allocates a new taskq_ent_t structure either from the free list or from the
950  * cache. Returns NULL if it can't be allocated.
951  *
952  * Assumes: tq->tq_lock is held.
953  */
954 static taskq_ent_t *
955 taskq_ent_alloc(taskq_t *tq, int flags)
956 {
957 	int kmflags = (flags & TQ_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
958 	taskq_ent_t *tqe;
959 	clock_t wait_time;
960 	clock_t	wait_rv;
961 
962 	ASSERT(MUTEX_HELD(&tq->tq_lock));
963 
964 	/*
965 	 * TQ_NOALLOC allocations are allowed to use the freelist, even if
966 	 * we are below tq_minalloc.
967 	 */
968 again:	if ((tqe = tq->tq_freelist) != NULL &&
969 	    ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
970 		tq->tq_freelist = tqe->tqent_next;
971 	} else {
972 		if (flags & TQ_NOALLOC)
973 			return (NULL);
974 
975 		if (tq->tq_nalloc >= tq->tq_maxalloc) {
976 			if (kmflags & KM_NOSLEEP)
977 				return (NULL);
978 
979 			/*
980 			 * We don't want to exceed tq_maxalloc, but we can't
981 			 * wait for other tasks to complete (and thus free up
982 			 * task structures) without risking deadlock with
983 			 * the caller.  So, we just delay for one second
984 			 * to throttle the allocation rate. If we have tasks
985 			 * complete before one second timeout expires then
986 			 * taskq_ent_free will signal us and we will
987 			 * immediately retry the allocation (reap free).
988 			 */
989 			wait_time = ddi_get_lbolt() + hz;
990 			while (tq->tq_freelist == NULL) {
991 				tq->tq_maxalloc_wait++;
992 				wait_rv = cv_timedwait(&tq->tq_maxalloc_cv,
993 				    &tq->tq_lock, wait_time);
994 				tq->tq_maxalloc_wait--;
995 				if (wait_rv == -1)
996 					break;
997 			}
998 			if (tq->tq_freelist)
999 				goto again;		/* reap freelist */
1000 
1001 		}
1002 		mutex_exit(&tq->tq_lock);
1003 
1004 		tqe = kmem_cache_alloc(taskq_ent_cache, kmflags);
1005 
1006 		mutex_enter(&tq->tq_lock);
1007 		if (tqe != NULL)
1008 			tq->tq_nalloc++;
1009 	}
1010 	return (tqe);
1011 }
1012 
1013 /*
1014  * taskq_ent_free()
1015  *
1016  * Free taskq_ent_t structure by either putting it on the free list or freeing
1017  * it to the cache.
1018  *
1019  * Assumes: tq->tq_lock is held.
1020  */
1021 static void
1022 taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe)
1023 {
1024 	ASSERT(MUTEX_HELD(&tq->tq_lock));
1025 
1026 	if (tq->tq_nalloc <= tq->tq_minalloc) {
1027 		tqe->tqent_next = tq->tq_freelist;
1028 		tq->tq_freelist = tqe;
1029 	} else {
1030 		tq->tq_nalloc--;
1031 		mutex_exit(&tq->tq_lock);
1032 		kmem_cache_free(taskq_ent_cache, tqe);
1033 		mutex_enter(&tq->tq_lock);
1034 	}
1035 
1036 	if (tq->tq_maxalloc_wait)
1037 		cv_signal(&tq->tq_maxalloc_cv);
1038 }
1039 
1040 /*
1041  * taskq_ent_exists()
1042  *
1043  * Return 1 if taskq already has entry for calling 'func(arg)'.
1044  *
1045  * Assumes: tq->tq_lock is held.
1046  */
1047 static int
1048 taskq_ent_exists(taskq_t *tq, task_func_t func, void *arg)
1049 {
1050 	taskq_ent_t	*tqe;
1051 
1052 	ASSERT(MUTEX_HELD(&tq->tq_lock));
1053 
1054 	for (tqe = tq->tq_task.tqent_next; tqe != &tq->tq_task;
1055 	    tqe = tqe->tqent_next)
1056 		if ((tqe->tqent_func == func) && (tqe->tqent_arg == arg))
1057 			return (1);
1058 	return (0);
1059 }
1060 
1061 /*
1062  * Dispatch a task "func(arg)" to a free entry of bucket b.
1063  *
1064  * Assumes: no bucket locks is held.
1065  *
1066  * Returns: a pointer to an entry if dispatch was successful.
1067  *	    NULL if there are no free entries or if the bucket is suspended.
1068  */
1069 static taskq_ent_t *
1070 taskq_bucket_dispatch(taskq_bucket_t *b, task_func_t func, void *arg)
1071 {
1072 	taskq_ent_t *tqe;
1073 
1074 	ASSERT(MUTEX_NOT_HELD(&b->tqbucket_lock));
1075 	ASSERT(func != NULL);
1076 
1077 	mutex_enter(&b->tqbucket_lock);
1078 
1079 	ASSERT(b->tqbucket_nfree != 0 || IS_EMPTY(b->tqbucket_freelist));
1080 	ASSERT(b->tqbucket_nfree == 0 || !IS_EMPTY(b->tqbucket_freelist));
1081 
1082 	/*
1083 	 * Get en entry from the freelist if there is one.
1084 	 * Schedule task into the entry.
1085 	 */
1086 	if ((b->tqbucket_nfree != 0) &&
1087 	    !(b->tqbucket_flags & TQBUCKET_SUSPEND)) {
1088 		tqe = b->tqbucket_freelist.tqent_prev;
1089 
1090 		ASSERT(tqe != &b->tqbucket_freelist);
1091 		ASSERT(tqe->tqent_thread != NULL);
1092 
1093 		tqe->tqent_prev->tqent_next = tqe->tqent_next;
1094 		tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1095 		b->tqbucket_nalloc++;
1096 		b->tqbucket_nfree--;
1097 		tqe->tqent_func = func;
1098 		tqe->tqent_arg = arg;
1099 		TQ_STAT(b, tqs_hits);
1100 		cv_signal(&tqe->tqent_cv);
1101 		DTRACE_PROBE2(taskq__d__enqueue, taskq_bucket_t *, b,
1102 		    taskq_ent_t *, tqe);
1103 	} else {
1104 		tqe = NULL;
1105 		TQ_STAT(b, tqs_misses);
1106 	}
1107 	mutex_exit(&b->tqbucket_lock);
1108 	return (tqe);
1109 }
1110 
1111 /*
1112  * Dispatch a task.
1113  *
1114  * Assumes: func != NULL
1115  *
1116  * Returns: NULL if dispatch failed.
1117  *	    non-NULL if task dispatched successfully.
1118  *	    Actual return value is the pointer to taskq entry that was used to
1119  *	    dispatch a task. This is useful for debugging.
1120  */
1121 /* ARGSUSED */
1122 taskqid_t
1123 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
1124 {
1125 	taskq_bucket_t *bucket = NULL;	/* Which bucket needs extension */
1126 	taskq_ent_t *tqe = NULL;
1127 	taskq_ent_t *tqe1;
1128 	uint_t bsize;
1129 
1130 	ASSERT(tq != NULL);
1131 	ASSERT(func != NULL);
1132 
1133 	if (!(tq->tq_flags & TASKQ_DYNAMIC)) {
1134 		/*
1135 		 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
1136 		 */
1137 		ASSERT(! (flags & TQ_NOQUEUE));
1138 		/*
1139 		 * Enqueue the task to the underlying queue.
1140 		 */
1141 		mutex_enter(&tq->tq_lock);
1142 
1143 		TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags);
1144 
1145 		if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) {
1146 			mutex_exit(&tq->tq_lock);
1147 			return (NULL);
1148 		}
1149 		if (flags & TQ_FRONT) {
1150 			TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1151 		} else {
1152 			TQ_ENQUEUE(tq, tqe, func, arg);
1153 		}
1154 		mutex_exit(&tq->tq_lock);
1155 		return ((taskqid_t)tqe);
1156 	}
1157 
1158 	/*
1159 	 * Dynamic taskq dispatching.
1160 	 */
1161 	ASSERT(!(flags & (TQ_NOALLOC | TQ_FRONT)));
1162 	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flags);
1163 
1164 	bsize = tq->tq_nbuckets;
1165 
1166 	if (bsize == 1) {
1167 		/*
1168 		 * In a single-CPU case there is only one bucket, so get
1169 		 * entry directly from there.
1170 		 */
1171 		if ((tqe = taskq_bucket_dispatch(tq->tq_buckets, func, arg))
1172 		    != NULL)
1173 			return ((taskqid_t)tqe);	/* Fastpath */
1174 		bucket = tq->tq_buckets;
1175 	} else {
1176 		int loopcount;
1177 		taskq_bucket_t *b;
1178 		uintptr_t h = ((uintptr_t)CPU + (uintptr_t)arg) >> 3;
1179 
1180 		h = TQ_HASH(h);
1181 
1182 		/*
1183 		 * The 'bucket' points to the original bucket that we hit. If we
1184 		 * can't allocate from it, we search other buckets, but only
1185 		 * extend this one.
1186 		 */
1187 		b = &tq->tq_buckets[h & (bsize - 1)];
1188 		ASSERT(b->tqbucket_taskq == tq);	/* Sanity check */
1189 
1190 		/*
1191 		 * Do a quick check before grabbing the lock. If the bucket does
1192 		 * not have free entries now, chances are very small that it
1193 		 * will after we take the lock, so we just skip it.
1194 		 */
1195 		if (b->tqbucket_nfree != 0) {
1196 			if ((tqe = taskq_bucket_dispatch(b, func, arg)) != NULL)
1197 				return ((taskqid_t)tqe);	/* Fastpath */
1198 		} else {
1199 			TQ_STAT(b, tqs_misses);
1200 		}
1201 
1202 		bucket = b;
1203 		loopcount = MIN(taskq_search_depth, bsize);
1204 		/*
1205 		 * If bucket dispatch failed, search loopcount number of buckets
1206 		 * before we give up and fail.
1207 		 */
1208 		do {
1209 			b = &tq->tq_buckets[++h & (bsize - 1)];
1210 			ASSERT(b->tqbucket_taskq == tq);  /* Sanity check */
1211 			loopcount--;
1212 
1213 			if (b->tqbucket_nfree != 0) {
1214 				tqe = taskq_bucket_dispatch(b, func, arg);
1215 			} else {
1216 				TQ_STAT(b, tqs_misses);
1217 			}
1218 		} while ((tqe == NULL) && (loopcount > 0));
1219 	}
1220 
1221 	/*
1222 	 * At this point we either scheduled a task and (tqe != NULL) or failed
1223 	 * (tqe == NULL). Try to recover from fails.
1224 	 */
1225 
1226 	/*
1227 	 * For KM_SLEEP dispatches, try to extend the bucket and retry dispatch.
1228 	 */
1229 	if ((tqe == NULL) && !(flags & TQ_NOSLEEP)) {
1230 		/*
1231 		 * taskq_bucket_extend() may fail to do anything, but this is
1232 		 * fine - we deal with it later. If the bucket was successfully
1233 		 * extended, there is a good chance that taskq_bucket_dispatch()
1234 		 * will get this new entry, unless someone is racing with us and
1235 		 * stealing the new entry from under our nose.
1236 		 * taskq_bucket_extend() may sleep.
1237 		 */
1238 		taskq_bucket_extend(bucket);
1239 		TQ_STAT(bucket, tqs_disptcreates);
1240 		if ((tqe = taskq_bucket_dispatch(bucket, func, arg)) != NULL)
1241 			return ((taskqid_t)tqe);
1242 	}
1243 
1244 	ASSERT(bucket != NULL);
1245 
1246 	/*
1247 	 * Since there are not enough free entries in the bucket, add a
1248 	 * taskq entry to extend it in the background using backing queue
1249 	 * (unless we already have a taskq entry to perform that extension).
1250 	 */
1251 	mutex_enter(&tq->tq_lock);
1252 	if (!taskq_ent_exists(tq, taskq_bucket_extend, bucket)) {
1253 		if ((tqe1 = taskq_ent_alloc(tq, TQ_NOSLEEP)) != NULL) {
1254 			TQ_ENQUEUE_FRONT(tq, tqe1, taskq_bucket_extend, bucket);
1255 		} else {
1256 			TQ_STAT(bucket, tqs_nomem);
1257 		}
1258 	}
1259 
1260 	/*
1261 	 * Dispatch failed and we can't find an entry to schedule a task.
1262 	 * Revert to the backing queue unless TQ_NOQUEUE was asked.
1263 	 */
1264 	if ((tqe == NULL) && !(flags & TQ_NOQUEUE)) {
1265 		if ((tqe = taskq_ent_alloc(tq, flags)) != NULL) {
1266 			TQ_ENQUEUE(tq, tqe, func, arg);
1267 		} else {
1268 			TQ_STAT(bucket, tqs_nomem);
1269 		}
1270 	}
1271 	mutex_exit(&tq->tq_lock);
1272 
1273 	return ((taskqid_t)tqe);
1274 }
1275 
1276 /*
1277  * Wait for all pending tasks to complete.
1278  * Calling taskq_wait from a task will cause deadlock.
1279  */
1280 void
1281 taskq_wait(taskq_t *tq)
1282 {
1283 	ASSERT(tq != curthread->t_taskq);
1284 
1285 	mutex_enter(&tq->tq_lock);
1286 	while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
1287 		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1288 	mutex_exit(&tq->tq_lock);
1289 
1290 	if (tq->tq_flags & TASKQ_DYNAMIC) {
1291 		taskq_bucket_t *b = tq->tq_buckets;
1292 		int bid = 0;
1293 		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1294 			mutex_enter(&b->tqbucket_lock);
1295 			while (b->tqbucket_nalloc > 0)
1296 				cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
1297 			mutex_exit(&b->tqbucket_lock);
1298 		}
1299 	}
1300 }
1301 
1302 /*
1303  * Suspend execution of tasks.
1304  *
1305  * Tasks in the queue part will be suspended immediately upon return from this
1306  * function. Pending tasks in the dynamic part will continue to execute, but all
1307  * new tasks will  be suspended.
1308  */
1309 void
1310 taskq_suspend(taskq_t *tq)
1311 {
1312 	rw_enter(&tq->tq_threadlock, RW_WRITER);
1313 
1314 	if (tq->tq_flags & TASKQ_DYNAMIC) {
1315 		taskq_bucket_t *b = tq->tq_buckets;
1316 		int bid = 0;
1317 		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1318 			mutex_enter(&b->tqbucket_lock);
1319 			b->tqbucket_flags |= TQBUCKET_SUSPEND;
1320 			mutex_exit(&b->tqbucket_lock);
1321 		}
1322 	}
1323 	/*
1324 	 * Mark task queue as being suspended. Needed for taskq_suspended().
1325 	 */
1326 	mutex_enter(&tq->tq_lock);
1327 	ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED));
1328 	tq->tq_flags |= TASKQ_SUSPENDED;
1329 	mutex_exit(&tq->tq_lock);
1330 }
1331 
1332 /*
1333  * returns: 1 if tq is suspended, 0 otherwise.
1334  */
1335 int
1336 taskq_suspended(taskq_t *tq)
1337 {
1338 	return ((tq->tq_flags & TASKQ_SUSPENDED) != 0);
1339 }
1340 
1341 /*
1342  * Resume taskq execution.
1343  */
1344 void
1345 taskq_resume(taskq_t *tq)
1346 {
1347 	ASSERT(RW_WRITE_HELD(&tq->tq_threadlock));
1348 
1349 	if (tq->tq_flags & TASKQ_DYNAMIC) {
1350 		taskq_bucket_t *b = tq->tq_buckets;
1351 		int bid = 0;
1352 		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1353 			mutex_enter(&b->tqbucket_lock);
1354 			b->tqbucket_flags &= ~TQBUCKET_SUSPEND;
1355 			mutex_exit(&b->tqbucket_lock);
1356 		}
1357 	}
1358 	mutex_enter(&tq->tq_lock);
1359 	ASSERT(tq->tq_flags & TASKQ_SUSPENDED);
1360 	tq->tq_flags &= ~TASKQ_SUSPENDED;
1361 	mutex_exit(&tq->tq_lock);
1362 
1363 	rw_exit(&tq->tq_threadlock);
1364 }
1365 
1366 int
1367 taskq_member(taskq_t *tq, kthread_t *thread)
1368 {
1369 	return (thread->t_taskq == tq);
1370 }
1371 
1372 /*
1373  * Creates a thread in the taskq.  We only allow one outstanding create at
1374  * a time.  We drop and reacquire the tq_lock in order to avoid blocking other
1375  * taskq activity while thread_create() or lwp_kernel_create() run.
1376  *
1377  * The first time we're called, we do some additional setup, and do not
1378  * return until there are enough threads to start servicing requests.
1379  */
1380 static void
1381 taskq_thread_create(taskq_t *tq)
1382 {
1383 	kthread_t	*t;
1384 	const boolean_t	first = (tq->tq_nthreads == 0);
1385 
1386 	ASSERT(MUTEX_HELD(&tq->tq_lock));
1387 	ASSERT(tq->tq_flags & TASKQ_CHANGING);
1388 	ASSERT(tq->tq_nthreads < tq->tq_nthreads_target);
1389 	ASSERT(!(tq->tq_flags & TASKQ_THREAD_CREATED));
1390 
1391 
1392 	tq->tq_flags |= TASKQ_THREAD_CREATED;
1393 	tq->tq_active++;
1394 	mutex_exit(&tq->tq_lock);
1395 
1396 	if (tq->tq_proc != &p0) {
1397 		t = lwp_kernel_create(tq->tq_proc, taskq_thread, tq, TS_RUN,
1398 		    tq->tq_pri);
1399 	} else {
1400 		t = thread_create(NULL, 0, taskq_thread, tq, 0, &p0, TS_RUN,
1401 		    tq->tq_pri);
1402 	}
1403 
1404 	if (!first) {
1405 		mutex_enter(&tq->tq_lock);
1406 		return;
1407 	}
1408 
1409 	/*
1410 	 * We know the thread cannot go away, since tq cannot be
1411 	 * destroyed until creation has completed.  We can therefore
1412 	 * safely dereference t.
1413 	 */
1414 	if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1415 		taskq_cpupct_install(tq, t->t_cpupart);
1416 	}
1417 	mutex_enter(&tq->tq_lock);
1418 
1419 	/* Wait until we can service requests. */
1420 	while (tq->tq_nthreads != tq->tq_nthreads_target &&
1421 	    tq->tq_nthreads < TASKQ_CREATE_ACTIVE_THREADS) {
1422 		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1423 	}
1424 }
1425 
1426 /*
1427  * Common "sleep taskq thread" function, which handles CPR stuff, as well
1428  * as giving a nice common point for debuggers to find inactive threads.
1429  */
1430 static clock_t
1431 taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv,
1432     callb_cpr_t *cprinfo, clock_t timeout)
1433 {
1434 	clock_t ret = 0;
1435 
1436 	if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1437 		CALLB_CPR_SAFE_BEGIN(cprinfo);
1438 	}
1439 	if (timeout < 0)
1440 		cv_wait(cv, mx);
1441 	else
1442 		ret = cv_reltimedwait(cv, mx, timeout, TR_CLOCK_TICK);
1443 
1444 	if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1445 		CALLB_CPR_SAFE_END(cprinfo, mx);
1446 	}
1447 
1448 	return (ret);
1449 }
1450 
1451 /*
1452  * Worker thread for processing task queue.
1453  */
1454 static void
1455 taskq_thread(void *arg)
1456 {
1457 	int thread_id;
1458 
1459 	taskq_t *tq = arg;
1460 	taskq_ent_t *tqe;
1461 	callb_cpr_t cprinfo;
1462 	hrtime_t start, end;
1463 
1464 	curthread->t_taskq = tq;	/* mark ourselves for taskq_member() */
1465 
1466 	if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) {
1467 		sysdc_thread_enter(curthread, tq->tq_DC,
1468 		    (tq->tq_flags & TASKQ_DC_BATCH) ? SYSDC_THREAD_BATCH : 0);
1469 	}
1470 
1471 	if (tq->tq_flags & TASKQ_CPR_SAFE) {
1472 		CALLB_CPR_INIT_SAFE(curthread, tq->tq_name);
1473 	} else {
1474 		CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr,
1475 		    tq->tq_name);
1476 	}
1477 	mutex_enter(&tq->tq_lock);
1478 	thread_id = ++tq->tq_nthreads;
1479 	ASSERT(tq->tq_flags & TASKQ_THREAD_CREATED);
1480 	ASSERT(tq->tq_flags & TASKQ_CHANGING);
1481 	tq->tq_flags &= ~TASKQ_THREAD_CREATED;
1482 
1483 	VERIFY3S(thread_id, <=, tq->tq_nthreads_max);
1484 
1485 	if (tq->tq_nthreads_max == 1)
1486 		tq->tq_thread = curthread;
1487 	else
1488 		tq->tq_threadlist[thread_id - 1] = curthread;
1489 
1490 	/* Allow taskq_create_common()'s taskq_thread_create() to return. */
1491 	if (tq->tq_nthreads == TASKQ_CREATE_ACTIVE_THREADS)
1492 		cv_broadcast(&tq->tq_wait_cv);
1493 
1494 	for (;;) {
1495 		if (tq->tq_flags & TASKQ_CHANGING) {
1496 			/* See if we're no longer needed */
1497 			if (thread_id > tq->tq_nthreads_target) {
1498 				/*
1499 				 * To preserve the one-to-one mapping between
1500 				 * thread_id and thread, we must exit from
1501 				 * highest thread ID to least.
1502 				 *
1503 				 * However, if everyone is exiting, the order
1504 				 * doesn't matter, so just exit immediately.
1505 				 * (this is safe, since you must wait for
1506 				 * nthreads to reach 0 after setting
1507 				 * tq_nthreads_target to 0)
1508 				 */
1509 				if (thread_id == tq->tq_nthreads ||
1510 				    tq->tq_nthreads_target == 0)
1511 					break;
1512 
1513 				/* Wait for higher thread_ids to exit */
1514 				(void) taskq_thread_wait(tq, &tq->tq_lock,
1515 				    &tq->tq_exit_cv, &cprinfo, -1);
1516 				continue;
1517 			}
1518 
1519 			/*
1520 			 * If no thread is starting taskq_thread(), we can
1521 			 * do some bookkeeping.
1522 			 */
1523 			if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) {
1524 				/* Check if we've reached our target */
1525 				if (tq->tq_nthreads == tq->tq_nthreads_target) {
1526 					tq->tq_flags &= ~TASKQ_CHANGING;
1527 					cv_broadcast(&tq->tq_wait_cv);
1528 				}
1529 				/* Check if we need to create a thread */
1530 				if (tq->tq_nthreads < tq->tq_nthreads_target) {
1531 					taskq_thread_create(tq);
1532 					continue; /* tq_lock was dropped */
1533 				}
1534 			}
1535 		}
1536 		if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
1537 			if (--tq->tq_active == 0)
1538 				cv_broadcast(&tq->tq_wait_cv);
1539 			(void) taskq_thread_wait(tq, &tq->tq_lock,
1540 			    &tq->tq_dispatch_cv, &cprinfo, -1);
1541 			tq->tq_active++;
1542 			continue;
1543 		}
1544 
1545 		tqe->tqent_prev->tqent_next = tqe->tqent_next;
1546 		tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1547 		mutex_exit(&tq->tq_lock);
1548 
1549 		rw_enter(&tq->tq_threadlock, RW_READER);
1550 		start = gethrtime();
1551 		DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
1552 		    taskq_ent_t *, tqe);
1553 		tqe->tqent_func(tqe->tqent_arg);
1554 		DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
1555 		    taskq_ent_t *, tqe);
1556 		end = gethrtime();
1557 		rw_exit(&tq->tq_threadlock);
1558 
1559 		mutex_enter(&tq->tq_lock);
1560 		tq->tq_totaltime += end - start;
1561 		tq->tq_executed++;
1562 
1563 		taskq_ent_free(tq, tqe);
1564 	}
1565 
1566 	if (tq->tq_nthreads_max == 1)
1567 		tq->tq_thread = NULL;
1568 	else
1569 		tq->tq_threadlist[thread_id - 1] = NULL;
1570 
1571 	/* We're exiting, and therefore no longer active */
1572 	ASSERT(tq->tq_active > 0);
1573 	tq->tq_active--;
1574 
1575 	ASSERT(tq->tq_nthreads > 0);
1576 	tq->tq_nthreads--;
1577 
1578 	/* Wake up anyone waiting for us to exit */
1579 	cv_broadcast(&tq->tq_exit_cv);
1580 	if (tq->tq_nthreads == tq->tq_nthreads_target) {
1581 		if (!(tq->tq_flags & TASKQ_THREAD_CREATED))
1582 			tq->tq_flags &= ~TASKQ_CHANGING;
1583 
1584 		cv_broadcast(&tq->tq_wait_cv);
1585 	}
1586 
1587 	ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
1588 	CALLB_CPR_EXIT(&cprinfo);		/* drops tq->tq_lock */
1589 	if (curthread->t_lwp != NULL) {
1590 		mutex_enter(&curproc->p_lock);
1591 		lwp_exit();
1592 	} else {
1593 		thread_exit();
1594 	}
1595 }
1596 
1597 /*
1598  * Worker per-entry thread for dynamic dispatches.
1599  */
1600 static void
1601 taskq_d_thread(taskq_ent_t *tqe)
1602 {
1603 	taskq_bucket_t	*bucket = tqe->tqent_bucket;
1604 	taskq_t		*tq = bucket->tqbucket_taskq;
1605 	kmutex_t	*lock = &bucket->tqbucket_lock;
1606 	kcondvar_t	*cv = &tqe->tqent_cv;
1607 	callb_cpr_t	cprinfo;
1608 	clock_t		w;
1609 
1610 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, tq->tq_name);
1611 
1612 	mutex_enter(lock);
1613 
1614 	for (;;) {
1615 		/*
1616 		 * If a task is scheduled (func != NULL), execute it, otherwise
1617 		 * sleep, waiting for a job.
1618 		 */
1619 		if (tqe->tqent_func != NULL) {
1620 			hrtime_t	start;
1621 			hrtime_t	end;
1622 
1623 			ASSERT(bucket->tqbucket_nalloc > 0);
1624 
1625 			/*
1626 			 * It is possible to free the entry right away before
1627 			 * actually executing the task so that subsequent
1628 			 * dispatches may immediately reuse it. But this,
1629 			 * effectively, creates a two-length queue in the entry
1630 			 * and may lead to a deadlock if the execution of the
1631 			 * current task depends on the execution of the next
1632 			 * scheduled task. So, we keep the entry busy until the
1633 			 * task is processed.
1634 			 */
1635 
1636 			mutex_exit(lock);
1637 			start = gethrtime();
1638 			DTRACE_PROBE3(taskq__d__exec__start, taskq_t *, tq,
1639 			    taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1640 			tqe->tqent_func(tqe->tqent_arg);
1641 			DTRACE_PROBE3(taskq__d__exec__end, taskq_t *, tq,
1642 			    taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1643 			end = gethrtime();
1644 			mutex_enter(lock);
1645 			bucket->tqbucket_totaltime += end - start;
1646 
1647 			/*
1648 			 * Return the entry to the bucket free list.
1649 			 */
1650 			tqe->tqent_func = NULL;
1651 			TQ_APPEND(bucket->tqbucket_freelist, tqe);
1652 			bucket->tqbucket_nalloc--;
1653 			bucket->tqbucket_nfree++;
1654 			ASSERT(!IS_EMPTY(bucket->tqbucket_freelist));
1655 			/*
1656 			 * taskq_wait() waits for nalloc to drop to zero on
1657 			 * tqbucket_cv.
1658 			 */
1659 			cv_signal(&bucket->tqbucket_cv);
1660 		}
1661 
1662 		/*
1663 		 * At this point the entry must be in the bucket free list -
1664 		 * either because it was there initially or because it just
1665 		 * finished executing a task and put itself on the free list.
1666 		 */
1667 		ASSERT(bucket->tqbucket_nfree > 0);
1668 		/*
1669 		 * Go to sleep unless we are closing.
1670 		 * If a thread is sleeping too long, it dies.
1671 		 */
1672 		if (! (bucket->tqbucket_flags & TQBUCKET_CLOSE)) {
1673 			w = taskq_thread_wait(tq, lock, cv,
1674 			    &cprinfo, taskq_thread_timeout * hz);
1675 		}
1676 
1677 		/*
1678 		 * At this point we may be in two different states:
1679 		 *
1680 		 * (1) tqent_func is set which means that a new task is
1681 		 *	dispatched and we need to execute it.
1682 		 *
1683 		 * (2) Thread is sleeping for too long or we are closing. In
1684 		 *	both cases destroy the thread and the entry.
1685 		 */
1686 
1687 		/* If func is NULL we should be on the freelist. */
1688 		ASSERT((tqe->tqent_func != NULL) ||
1689 		    (bucket->tqbucket_nfree > 0));
1690 		/* If func is non-NULL we should be allocated */
1691 		ASSERT((tqe->tqent_func == NULL) ||
1692 		    (bucket->tqbucket_nalloc > 0));
1693 
1694 		/* Check freelist consistency */
1695 		ASSERT((bucket->tqbucket_nfree > 0) ||
1696 		    IS_EMPTY(bucket->tqbucket_freelist));
1697 		ASSERT((bucket->tqbucket_nfree == 0) ||
1698 		    !IS_EMPTY(bucket->tqbucket_freelist));
1699 
1700 		if ((tqe->tqent_func == NULL) &&
1701 		    ((w == -1) || (bucket->tqbucket_flags & TQBUCKET_CLOSE))) {
1702 			/*
1703 			 * This thread is sleeping for too long or we are
1704 			 * closing - time to die.
1705 			 * Thread creation/destruction happens rarely,
1706 			 * so grabbing the lock is not a big performance issue.
1707 			 * The bucket lock is dropped by CALLB_CPR_EXIT().
1708 			 */
1709 
1710 			/* Remove the entry from the free list. */
1711 			tqe->tqent_prev->tqent_next = tqe->tqent_next;
1712 			tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1713 			ASSERT(bucket->tqbucket_nfree > 0);
1714 			bucket->tqbucket_nfree--;
1715 
1716 			TQ_STAT(bucket, tqs_tdeaths);
1717 			cv_signal(&bucket->tqbucket_cv);
1718 			tqe->tqent_thread = NULL;
1719 			mutex_enter(&tq->tq_lock);
1720 			tq->tq_tdeaths++;
1721 			mutex_exit(&tq->tq_lock);
1722 			CALLB_CPR_EXIT(&cprinfo);
1723 			kmem_cache_free(taskq_ent_cache, tqe);
1724 			thread_exit();
1725 		}
1726 	}
1727 }
1728 
1729 
1730 /*
1731  * Taskq creation. May sleep for memory.
1732  * Always use automatically generated instances to avoid kstat name space
1733  * collisions.
1734  */
1735 
1736 taskq_t *
1737 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
1738     int maxalloc, uint_t flags)
1739 {
1740 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1741 
1742 	return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1743 	    maxalloc, &p0, 0, flags | TASKQ_NOINSTANCE));
1744 }
1745 
1746 /*
1747  * Create an instance of task queue. It is legal to create task queues with the
1748  * same name and different instances.
1749  *
1750  * taskq_create_instance is used by ddi_taskq_create() where it gets the
1751  * instance from ddi_get_instance(). In some cases the instance is not
1752  * initialized and is set to -1. This case is handled as if no instance was
1753  * passed at all.
1754  */
1755 taskq_t *
1756 taskq_create_instance(const char *name, int instance, int nthreads, pri_t pri,
1757     int minalloc, int maxalloc, uint_t flags)
1758 {
1759 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1760 	ASSERT((instance >= 0) || (instance == -1));
1761 
1762 	if (instance < 0) {
1763 		flags |= TASKQ_NOINSTANCE;
1764 	}
1765 
1766 	return (taskq_create_common(name, instance, nthreads,
1767 	    pri, minalloc, maxalloc, &p0, 0, flags));
1768 }
1769 
1770 taskq_t *
1771 taskq_create_proc(const char *name, int nthreads, pri_t pri, int minalloc,
1772     int maxalloc, proc_t *proc, uint_t flags)
1773 {
1774 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1775 	ASSERT(proc->p_flag & SSYS);
1776 
1777 	return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1778 	    maxalloc, proc, 0, flags | TASKQ_NOINSTANCE));
1779 }
1780 
1781 taskq_t *
1782 taskq_create_sysdc(const char *name, int nthreads, int minalloc,
1783     int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1784 {
1785 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1786 	ASSERT(proc->p_flag & SSYS);
1787 
1788 	return (taskq_create_common(name, 0, nthreads, minclsyspri, minalloc,
1789 	    maxalloc, proc, dc, flags | TASKQ_NOINSTANCE | TASKQ_DUTY_CYCLE));
1790 }
1791 
1792 static taskq_t *
1793 taskq_create_common(const char *name, int instance, int nthreads, pri_t pri,
1794     int minalloc, int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1795 {
1796 	taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP);
1797 	uint_t ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
1798 	uint_t bsize;	/* # of buckets - always power of 2 */
1799 	int max_nthreads;
1800 
1801 	/*
1802 	 * TASKQ_DYNAMIC, TASKQ_CPR_SAFE and TASKQ_THREADS_CPU_PCT are all
1803 	 * mutually incompatible.
1804 	 */
1805 	IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_CPR_SAFE));
1806 	IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_THREADS_CPU_PCT));
1807 	IMPLY((flags & TASKQ_CPR_SAFE), !(flags & TASKQ_THREADS_CPU_PCT));
1808 
1809 	/* Cannot have DUTY_CYCLE without a non-p0 kernel process */
1810 	IMPLY((flags & TASKQ_DUTY_CYCLE), proc != &p0);
1811 
1812 	/* Cannot have DC_BATCH without DUTY_CYCLE */
1813 	ASSERT((flags & (TASKQ_DUTY_CYCLE|TASKQ_DC_BATCH)) != TASKQ_DC_BATCH);
1814 
1815 	ASSERT(proc != NULL);
1816 
1817 	bsize = 1 << (highbit(ncpus) - 1);
1818 	ASSERT(bsize >= 1);
1819 	bsize = MIN(bsize, taskq_maxbuckets);
1820 
1821 	if (flags & TASKQ_DYNAMIC) {
1822 		ASSERT3S(nthreads, >=, 1);
1823 		tq->tq_maxsize = nthreads;
1824 
1825 		/* For dynamic task queues use just one backup thread */
1826 		nthreads = max_nthreads = 1;
1827 
1828 	} else if (flags & TASKQ_THREADS_CPU_PCT) {
1829 		uint_t pct;
1830 		ASSERT3S(nthreads, >=, 0);
1831 		pct = nthreads;
1832 
1833 		if (pct > taskq_cpupct_max_percent)
1834 			pct = taskq_cpupct_max_percent;
1835 
1836 		/*
1837 		 * If you're using THREADS_CPU_PCT, the process for the
1838 		 * taskq threads must be curproc.  This allows any pset
1839 		 * binding to be inherited correctly.  If proc is &p0,
1840 		 * we won't be creating LWPs, so new threads will be assigned
1841 		 * to the default processor set.
1842 		 */
1843 		ASSERT(curproc == proc || proc == &p0);
1844 		tq->tq_threads_ncpus_pct = pct;
1845 		nthreads = 1;		/* corrected in taskq_thread_create() */
1846 		max_nthreads = TASKQ_THREADS_PCT(max_ncpus, pct);
1847 
1848 	} else {
1849 		ASSERT3S(nthreads, >=, 1);
1850 		max_nthreads = nthreads;
1851 	}
1852 
1853 	if (max_nthreads < taskq_minimum_nthreads_max)
1854 		max_nthreads = taskq_minimum_nthreads_max;
1855 
1856 	/*
1857 	 * Make sure the name is 0-terminated, and conforms to the rules for
1858 	 * C indentifiers
1859 	 */
1860 	(void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
1861 	strident_canon(tq->tq_name, TASKQ_NAMELEN + 1);
1862 
1863 	tq->tq_flags = flags | TASKQ_CHANGING;
1864 	tq->tq_active = 0;
1865 	tq->tq_instance = instance;
1866 	tq->tq_nthreads_target = nthreads;
1867 	tq->tq_nthreads_max = max_nthreads;
1868 	tq->tq_minalloc = minalloc;
1869 	tq->tq_maxalloc = maxalloc;
1870 	tq->tq_nbuckets = bsize;
1871 	tq->tq_proc = proc;
1872 	tq->tq_pri = pri;
1873 	tq->tq_DC = dc;
1874 	list_link_init(&tq->tq_cpupct_link);
1875 
1876 	if (max_nthreads > 1)
1877 		tq->tq_threadlist = kmem_alloc(
1878 		    sizeof (kthread_t *) * max_nthreads, KM_SLEEP);
1879 
1880 	mutex_enter(&tq->tq_lock);
1881 	if (flags & TASKQ_PREPOPULATE) {
1882 		while (minalloc-- > 0)
1883 			taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
1884 	}
1885 
1886 	/*
1887 	 * Create the first thread, which will create any other threads
1888 	 * necessary.  taskq_thread_create will not return until we have
1889 	 * enough threads to be able to process requests.
1890 	 */
1891 	taskq_thread_create(tq);
1892 	mutex_exit(&tq->tq_lock);
1893 
1894 	if (flags & TASKQ_DYNAMIC) {
1895 		taskq_bucket_t *bucket = kmem_zalloc(sizeof (taskq_bucket_t) *
1896 		    bsize, KM_SLEEP);
1897 		int b_id;
1898 
1899 		tq->tq_buckets = bucket;
1900 
1901 		/* Initialize each bucket */
1902 		for (b_id = 0; b_id < bsize; b_id++, bucket++) {
1903 			mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT,
1904 			    NULL);
1905 			cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL);
1906 			bucket->tqbucket_taskq = tq;
1907 			bucket->tqbucket_freelist.tqent_next =
1908 			    bucket->tqbucket_freelist.tqent_prev =
1909 			    &bucket->tqbucket_freelist;
1910 			if (flags & TASKQ_PREPOPULATE)
1911 				taskq_bucket_extend(bucket);
1912 		}
1913 	}
1914 
1915 	/*
1916 	 * Install kstats.
1917 	 * We have two cases:
1918 	 *   1) Instance is provided to taskq_create_instance(). In this case it
1919 	 *	should be >= 0 and we use it.
1920 	 *
1921 	 *   2) Instance is not provided and is automatically generated
1922 	 */
1923 	if (flags & TASKQ_NOINSTANCE) {
1924 		instance = tq->tq_instance =
1925 		    (int)(uintptr_t)vmem_alloc(taskq_id_arena, 1, VM_SLEEP);
1926 	}
1927 
1928 	if (flags & TASKQ_DYNAMIC) {
1929 		if ((tq->tq_kstat = kstat_create("unix", instance,
1930 		    tq->tq_name, "taskq_d", KSTAT_TYPE_NAMED,
1931 		    sizeof (taskq_d_kstat) / sizeof (kstat_named_t),
1932 		    KSTAT_FLAG_VIRTUAL)) != NULL) {
1933 			tq->tq_kstat->ks_lock = &taskq_d_kstat_lock;
1934 			tq->tq_kstat->ks_data = &taskq_d_kstat;
1935 			tq->tq_kstat->ks_update = taskq_d_kstat_update;
1936 			tq->tq_kstat->ks_private = tq;
1937 			kstat_install(tq->tq_kstat);
1938 		}
1939 	} else {
1940 		if ((tq->tq_kstat = kstat_create("unix", instance, tq->tq_name,
1941 		    "taskq", KSTAT_TYPE_NAMED,
1942 		    sizeof (taskq_kstat) / sizeof (kstat_named_t),
1943 		    KSTAT_FLAG_VIRTUAL)) != NULL) {
1944 			tq->tq_kstat->ks_lock = &taskq_kstat_lock;
1945 			tq->tq_kstat->ks_data = &taskq_kstat;
1946 			tq->tq_kstat->ks_update = taskq_kstat_update;
1947 			tq->tq_kstat->ks_private = tq;
1948 			kstat_install(tq->tq_kstat);
1949 		}
1950 	}
1951 
1952 	return (tq);
1953 }
1954 
1955 /*
1956  * taskq_destroy().
1957  *
1958  * Assumes: by the time taskq_destroy is called no one will use this task queue
1959  * in any way and no one will try to dispatch entries in it.
1960  */
1961 void
1962 taskq_destroy(taskq_t *tq)
1963 {
1964 	taskq_bucket_t *b = tq->tq_buckets;
1965 	int bid = 0;
1966 
1967 	ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE));
1968 
1969 	/*
1970 	 * Destroy kstats.
1971 	 */
1972 	if (tq->tq_kstat != NULL) {
1973 		kstat_delete(tq->tq_kstat);
1974 		tq->tq_kstat = NULL;
1975 	}
1976 
1977 	/*
1978 	 * Destroy instance if needed.
1979 	 */
1980 	if (tq->tq_flags & TASKQ_NOINSTANCE) {
1981 		vmem_free(taskq_id_arena, (void *)(uintptr_t)(tq->tq_instance),
1982 		    1);
1983 		tq->tq_instance = 0;
1984 	}
1985 
1986 	/*
1987 	 * Unregister from the cpupct list.
1988 	 */
1989 	if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1990 		taskq_cpupct_remove(tq);
1991 	}
1992 
1993 	/*
1994 	 * Wait for any pending entries to complete.
1995 	 */
1996 	taskq_wait(tq);
1997 
1998 	mutex_enter(&tq->tq_lock);
1999 	ASSERT((tq->tq_task.tqent_next == &tq->tq_task) &&
2000 	    (tq->tq_active == 0));
2001 
2002 	/* notify all the threads that they need to exit */
2003 	tq->tq_nthreads_target = 0;
2004 
2005 	tq->tq_flags |= TASKQ_CHANGING;
2006 	cv_broadcast(&tq->tq_dispatch_cv);
2007 	cv_broadcast(&tq->tq_exit_cv);
2008 
2009 	while (tq->tq_nthreads != 0)
2010 		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
2011 
2012 	if (tq->tq_nthreads_max != 1)
2013 		kmem_free(tq->tq_threadlist, sizeof (kthread_t *) *
2014 		    tq->tq_nthreads_max);
2015 
2016 	tq->tq_minalloc = 0;
2017 	while (tq->tq_nalloc != 0)
2018 		taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
2019 
2020 	mutex_exit(&tq->tq_lock);
2021 
2022 	/*
2023 	 * Mark each bucket as closing and wakeup all sleeping threads.
2024 	 */
2025 	for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2026 		taskq_ent_t *tqe;
2027 
2028 		mutex_enter(&b->tqbucket_lock);
2029 
2030 		b->tqbucket_flags |= TQBUCKET_CLOSE;
2031 		/* Wakeup all sleeping threads */
2032 
2033 		for (tqe = b->tqbucket_freelist.tqent_next;
2034 		    tqe != &b->tqbucket_freelist; tqe = tqe->tqent_next)
2035 			cv_signal(&tqe->tqent_cv);
2036 
2037 		ASSERT(b->tqbucket_nalloc == 0);
2038 
2039 		/*
2040 		 * At this point we waited for all pending jobs to complete (in
2041 		 * both the task queue and the bucket and no new jobs should
2042 		 * arrive. Wait for all threads to die.
2043 		 */
2044 		while (b->tqbucket_nfree > 0)
2045 			cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
2046 		mutex_exit(&b->tqbucket_lock);
2047 		mutex_destroy(&b->tqbucket_lock);
2048 		cv_destroy(&b->tqbucket_cv);
2049 	}
2050 
2051 	if (tq->tq_buckets != NULL) {
2052 		ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2053 		kmem_free(tq->tq_buckets,
2054 		    sizeof (taskq_bucket_t) * tq->tq_nbuckets);
2055 
2056 		/* Cleanup fields before returning tq to the cache */
2057 		tq->tq_buckets = NULL;
2058 		tq->tq_tcreates = 0;
2059 		tq->tq_tdeaths = 0;
2060 	} else {
2061 		ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
2062 	}
2063 
2064 	tq->tq_threads_ncpus_pct = 0;
2065 	tq->tq_totaltime = 0;
2066 	tq->tq_tasks = 0;
2067 	tq->tq_maxtasks = 0;
2068 	tq->tq_executed = 0;
2069 	kmem_cache_free(taskq_cache, tq);
2070 }
2071 
2072 /*
2073  * Extend a bucket with a new entry on the free list and attach a worker thread
2074  * to it.
2075  *
2076  * Argument: pointer to the bucket.
2077  *
2078  * This function may quietly fail. It is only used by taskq_dispatch() which
2079  * handles such failures properly.
2080  */
2081 static void
2082 taskq_bucket_extend(void *arg)
2083 {
2084 	taskq_ent_t *tqe;
2085 	taskq_bucket_t *b = (taskq_bucket_t *)arg;
2086 	taskq_t *tq = b->tqbucket_taskq;
2087 	int nthreads;
2088 
2089 	if (! ENOUGH_MEMORY()) {
2090 		TQ_STAT(b, tqs_nomem);
2091 		return;
2092 	}
2093 
2094 	mutex_enter(&tq->tq_lock);
2095 
2096 	/*
2097 	 * Observe global taskq limits on the number of threads.
2098 	 */
2099 	if (tq->tq_tcreates++ - tq->tq_tdeaths > tq->tq_maxsize) {
2100 		tq->tq_tcreates--;
2101 		mutex_exit(&tq->tq_lock);
2102 		return;
2103 	}
2104 	mutex_exit(&tq->tq_lock);
2105 
2106 	tqe = kmem_cache_alloc(taskq_ent_cache, KM_NOSLEEP);
2107 
2108 	if (tqe == NULL) {
2109 		mutex_enter(&tq->tq_lock);
2110 		TQ_STAT(b, tqs_nomem);
2111 		tq->tq_tcreates--;
2112 		mutex_exit(&tq->tq_lock);
2113 		return;
2114 	}
2115 
2116 	ASSERT(tqe->tqent_thread == NULL);
2117 
2118 	tqe->tqent_bucket = b;
2119 
2120 	/*
2121 	 * Create a thread in a TS_STOPPED state first. If it is successfully
2122 	 * created, place the entry on the free list and start the thread.
2123 	 */
2124 	tqe->tqent_thread = thread_create(NULL, 0, taskq_d_thread, tqe,
2125 	    0, &p0, TS_STOPPED, tq->tq_pri);
2126 
2127 	/*
2128 	 * Once the entry is ready, link it to the the bucket free list.
2129 	 */
2130 	mutex_enter(&b->tqbucket_lock);
2131 	tqe->tqent_func = NULL;
2132 	TQ_APPEND(b->tqbucket_freelist, tqe);
2133 	b->tqbucket_nfree++;
2134 	TQ_STAT(b, tqs_tcreates);
2135 
2136 #if TASKQ_STATISTIC
2137 	nthreads = b->tqbucket_stat.tqs_tcreates -
2138 	    b->tqbucket_stat.tqs_tdeaths;
2139 	b->tqbucket_stat.tqs_maxthreads = MAX(nthreads,
2140 	    b->tqbucket_stat.tqs_maxthreads);
2141 #endif
2142 
2143 	mutex_exit(&b->tqbucket_lock);
2144 	/*
2145 	 * Start the stopped thread.
2146 	 */
2147 	thread_lock(tqe->tqent_thread);
2148 	tqe->tqent_thread->t_taskq = tq;
2149 	tqe->tqent_thread->t_schedflag |= TS_ALLSTART;
2150 	setrun_locked(tqe->tqent_thread);
2151 	thread_unlock(tqe->tqent_thread);
2152 }
2153 
2154 static int
2155 taskq_kstat_update(kstat_t *ksp, int rw)
2156 {
2157 	struct taskq_kstat *tqsp = &taskq_kstat;
2158 	taskq_t *tq = ksp->ks_private;
2159 
2160 	if (rw == KSTAT_WRITE)
2161 		return (EACCES);
2162 
2163 	tqsp->tq_pid.value.ui64 = tq->tq_proc->p_pid;
2164 	tqsp->tq_tasks.value.ui64 = tq->tq_tasks;
2165 	tqsp->tq_executed.value.ui64 = tq->tq_executed;
2166 	tqsp->tq_maxtasks.value.ui64 = tq->tq_maxtasks;
2167 	tqsp->tq_totaltime.value.ui64 = tq->tq_totaltime;
2168 	tqsp->tq_nactive.value.ui64 = tq->tq_active;
2169 	tqsp->tq_nalloc.value.ui64 = tq->tq_nalloc;
2170 	tqsp->tq_pri.value.ui64 = tq->tq_pri;
2171 	tqsp->tq_nthreads.value.ui64 = tq->tq_nthreads;
2172 	return (0);
2173 }
2174 
2175 static int
2176 taskq_d_kstat_update(kstat_t *ksp, int rw)
2177 {
2178 	struct taskq_d_kstat *tqsp = &taskq_d_kstat;
2179 	taskq_t *tq = ksp->ks_private;
2180 	taskq_bucket_t *b = tq->tq_buckets;
2181 	int bid = 0;
2182 
2183 	if (rw == KSTAT_WRITE)
2184 		return (EACCES);
2185 
2186 	ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2187 
2188 	tqsp->tqd_btasks.value.ui64 = tq->tq_tasks;
2189 	tqsp->tqd_bexecuted.value.ui64 = tq->tq_executed;
2190 	tqsp->tqd_bmaxtasks.value.ui64 = tq->tq_maxtasks;
2191 	tqsp->tqd_bnalloc.value.ui64 = tq->tq_nalloc;
2192 	tqsp->tqd_bnactive.value.ui64 = tq->tq_active;
2193 	tqsp->tqd_btotaltime.value.ui64 = tq->tq_totaltime;
2194 	tqsp->tqd_pri.value.ui64 = tq->tq_pri;
2195 
2196 	tqsp->tqd_hits.value.ui64 = 0;
2197 	tqsp->tqd_misses.value.ui64 = 0;
2198 	tqsp->tqd_overflows.value.ui64 = 0;
2199 	tqsp->tqd_tcreates.value.ui64 = 0;
2200 	tqsp->tqd_tdeaths.value.ui64 = 0;
2201 	tqsp->tqd_maxthreads.value.ui64 = 0;
2202 	tqsp->tqd_nomem.value.ui64 = 0;
2203 	tqsp->tqd_disptcreates.value.ui64 = 0;
2204 	tqsp->tqd_totaltime.value.ui64 = 0;
2205 	tqsp->tqd_nalloc.value.ui64 = 0;
2206 	tqsp->tqd_nfree.value.ui64 = 0;
2207 
2208 	for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2209 		tqsp->tqd_hits.value.ui64 += b->tqbucket_stat.tqs_hits;
2210 		tqsp->tqd_misses.value.ui64 += b->tqbucket_stat.tqs_misses;
2211 		tqsp->tqd_overflows.value.ui64 += b->tqbucket_stat.tqs_overflow;
2212 		tqsp->tqd_tcreates.value.ui64 += b->tqbucket_stat.tqs_tcreates;
2213 		tqsp->tqd_tdeaths.value.ui64 += b->tqbucket_stat.tqs_tdeaths;
2214 		tqsp->tqd_maxthreads.value.ui64 +=
2215 		    b->tqbucket_stat.tqs_maxthreads;
2216 		tqsp->tqd_nomem.value.ui64 += b->tqbucket_stat.tqs_nomem;
2217 		tqsp->tqd_disptcreates.value.ui64 +=
2218 		    b->tqbucket_stat.tqs_disptcreates;
2219 		tqsp->tqd_totaltime.value.ui64 += b->tqbucket_totaltime;
2220 		tqsp->tqd_nalloc.value.ui64 += b->tqbucket_nalloc;
2221 		tqsp->tqd_nfree.value.ui64 += b->tqbucket_nfree;
2222 	}
2223 	return (0);
2224 }
2225