xref: /titanic_54/usr/src/uts/common/os/taskq.c (revision 35a5a3587fd94b666239c157d3722745250ccbd7)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52e0c549eSJonathan Adams  * Common Development and Distribution License (the "License").
62e0c549eSJonathan Adams  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
222e0c549eSJonathan Adams  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate /*
277c478bd9Sstevel@tonic-gate  * Kernel task queues: general-purpose asynchronous task scheduling.
287c478bd9Sstevel@tonic-gate  *
297c478bd9Sstevel@tonic-gate  * A common problem in kernel programming is the need to schedule tasks
307c478bd9Sstevel@tonic-gate  * to be performed later, by another thread. There are several reasons
317c478bd9Sstevel@tonic-gate  * you may want or need to do this:
327c478bd9Sstevel@tonic-gate  *
337c478bd9Sstevel@tonic-gate  * (1) The task isn't time-critical, but your current code path is.
347c478bd9Sstevel@tonic-gate  *
357c478bd9Sstevel@tonic-gate  * (2) The task may require grabbing locks that you already hold.
367c478bd9Sstevel@tonic-gate  *
377c478bd9Sstevel@tonic-gate  * (3) The task may need to block (e.g. to wait for memory), but you
387c478bd9Sstevel@tonic-gate  *     cannot block in your current context.
397c478bd9Sstevel@tonic-gate  *
407c478bd9Sstevel@tonic-gate  * (4) Your code path can't complete because of some condition, but you can't
417c478bd9Sstevel@tonic-gate  *     sleep or fail, so you queue the task for later execution when condition
427c478bd9Sstevel@tonic-gate  *     disappears.
437c478bd9Sstevel@tonic-gate  *
447c478bd9Sstevel@tonic-gate  * (5) You just want a simple way to launch multiple tasks in parallel.
457c478bd9Sstevel@tonic-gate  *
467c478bd9Sstevel@tonic-gate  * Task queues provide such a facility. In its simplest form (used when
477c478bd9Sstevel@tonic-gate  * performance is not a critical consideration) a task queue consists of a
487c478bd9Sstevel@tonic-gate  * single list of tasks, together with one or more threads to service the
497c478bd9Sstevel@tonic-gate  * list. There are some cases when this simple queue is not sufficient:
507c478bd9Sstevel@tonic-gate  *
517c478bd9Sstevel@tonic-gate  * (1) The task queues are very hot and there is a need to avoid data and lock
527c478bd9Sstevel@tonic-gate  *	contention over global resources.
537c478bd9Sstevel@tonic-gate  *
547c478bd9Sstevel@tonic-gate  * (2) Some tasks may depend on other tasks to complete, so they can't be put in
557c478bd9Sstevel@tonic-gate  *	the same list managed by the same thread.
567c478bd9Sstevel@tonic-gate  *
577c478bd9Sstevel@tonic-gate  * (3) Some tasks may block for a long time, and this should not block other
587c478bd9Sstevel@tonic-gate  *	tasks in the queue.
597c478bd9Sstevel@tonic-gate  *
607c478bd9Sstevel@tonic-gate  * To provide useful service in such cases we define a "dynamic task queue"
617c478bd9Sstevel@tonic-gate  * which has an individual thread for each of the tasks. These threads are
627c478bd9Sstevel@tonic-gate  * dynamically created as they are needed and destroyed when they are not in
637c478bd9Sstevel@tonic-gate  * use. The API for managing task pools is the same as for managing task queues
647c478bd9Sstevel@tonic-gate  * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
657c478bd9Sstevel@tonic-gate  * dynamic task pool behavior is desired.
667c478bd9Sstevel@tonic-gate  *
677c478bd9Sstevel@tonic-gate  * Dynamic task queues may also place tasks in the normal queue (called "backing
687c478bd9Sstevel@tonic-gate  * queue") when task pool runs out of resources. Users of task queues may
697c478bd9Sstevel@tonic-gate  * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
707c478bd9Sstevel@tonic-gate  * flags.
717c478bd9Sstevel@tonic-gate  *
727c478bd9Sstevel@tonic-gate  * The backing task queue is also used for scheduling internal tasks needed for
737c478bd9Sstevel@tonic-gate  * dynamic task queue maintenance.
747c478bd9Sstevel@tonic-gate  *
752e0c549eSJonathan Adams  * INTERFACES ==================================================================
767c478bd9Sstevel@tonic-gate  *
77*35a5a358SJonathan Adams  * taskq_t *taskq_create(name, nthreads, pri, minalloc, maxall, flags);
787c478bd9Sstevel@tonic-gate  *
797c478bd9Sstevel@tonic-gate  *	Create a taskq with specified properties.
807c478bd9Sstevel@tonic-gate  *	Possible 'flags':
817c478bd9Sstevel@tonic-gate  *
827c478bd9Sstevel@tonic-gate  *	  TASKQ_DYNAMIC: Create task pool for task management. If this flag is
837c478bd9Sstevel@tonic-gate  *		specified, 'nthreads' specifies the maximum number of threads in
847c478bd9Sstevel@tonic-gate  *		the task queue. Task execution order for dynamic task queues is
857c478bd9Sstevel@tonic-gate  *		not predictable.
867c478bd9Sstevel@tonic-gate  *
877c478bd9Sstevel@tonic-gate  *		If this flag is not specified (default case) a
887c478bd9Sstevel@tonic-gate  *		single-list task queue is created with 'nthreads' threads
897c478bd9Sstevel@tonic-gate  *		servicing it. Entries in this queue are managed by
907c478bd9Sstevel@tonic-gate  *		taskq_ent_alloc() and taskq_ent_free() which try to keep the
917c478bd9Sstevel@tonic-gate  *		task population between 'minalloc' and 'maxalloc', but the
927c478bd9Sstevel@tonic-gate  *		latter limit is only advisory for TQ_SLEEP dispatches and the
937c478bd9Sstevel@tonic-gate  *		former limit is only advisory for TQ_NOALLOC dispatches. If
947c478bd9Sstevel@tonic-gate  *		TASKQ_PREPOPULATE is set in 'flags', the taskq will be
957c478bd9Sstevel@tonic-gate  *		prepopulated with 'minalloc' task structures.
967c478bd9Sstevel@tonic-gate  *
977c478bd9Sstevel@tonic-gate  *		Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
987c478bd9Sstevel@tonic-gate  *		executed in the order they are scheduled if nthreads == 1.
997c478bd9Sstevel@tonic-gate  *		If nthreads > 1, task execution order is not predictable.
1007c478bd9Sstevel@tonic-gate  *
1017c478bd9Sstevel@tonic-gate  *	  TASKQ_PREPOPULATE: Prepopulate task queue with threads.
1027c478bd9Sstevel@tonic-gate  *		Also prepopulate the task queue with 'minalloc' task structures.
1037c478bd9Sstevel@tonic-gate  *
1042e0c549eSJonathan Adams  *	  TASKQ_THREADS_CPU_PCT: This flag specifies that 'nthreads' should be
1052e0c549eSJonathan Adams  *		interpreted as a percentage of the # of online CPUs on the
1062e0c549eSJonathan Adams  *		system.  The taskq subsystem will automatically adjust the
1072e0c549eSJonathan Adams  *		number of threads in the taskq in response to CPU online
1082e0c549eSJonathan Adams  *		and offline events, to keep the ratio.  nthreads must be in
1092e0c549eSJonathan Adams  *		the range [0,100].
1102e0c549eSJonathan Adams  *
1112e0c549eSJonathan Adams  *		The calculation used is:
1122e0c549eSJonathan Adams  *
1132e0c549eSJonathan Adams  *			MAX((ncpus_online * percentage)/100, 1)
1142e0c549eSJonathan Adams  *
1152e0c549eSJonathan Adams  *		This flag is not supported for DYNAMIC task queues.
1162e0c549eSJonathan Adams  *		This flag is not compatible with TASKQ_CPR_SAFE.
1172e0c549eSJonathan Adams  *
1187c478bd9Sstevel@tonic-gate  *	  TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
1197c478bd9Sstevel@tonic-gate  *		use their own protocol for handling CPR issues. This flag is not
1202e0c549eSJonathan Adams  *		supported for DYNAMIC task queues.  This flag is not compatible
1212e0c549eSJonathan Adams  *		with TASKQ_THREADS_CPU_PCT.
1227c478bd9Sstevel@tonic-gate  *
1237c478bd9Sstevel@tonic-gate  *	The 'pri' field specifies the default priority for the threads that
1247c478bd9Sstevel@tonic-gate  *	service all scheduled tasks.
1257c478bd9Sstevel@tonic-gate  *
126*35a5a358SJonathan Adams  * taskq_t *taskq_create_instance(name, instance, nthreads, pri, minalloc,
127*35a5a358SJonathan Adams  *    maxall, flags);
128*35a5a358SJonathan Adams  *
129*35a5a358SJonathan Adams  *	Like taskq_create(), but takes an instance number (or -1 to indicate
130*35a5a358SJonathan Adams  *	no instance).
131*35a5a358SJonathan Adams  *
132*35a5a358SJonathan Adams  * taskq_t *taskq_create_proc(name, nthreads, pri, minalloc, maxall, proc,
133*35a5a358SJonathan Adams  *    flags);
134*35a5a358SJonathan Adams  *
135*35a5a358SJonathan Adams  *	Like taskq_create(), but creates the taskq threads in the specified
136*35a5a358SJonathan Adams  *	system process.  If proc != &p0, this must be called from a thread
137*35a5a358SJonathan Adams  *	in that process.
138*35a5a358SJonathan Adams  *
139*35a5a358SJonathan Adams  * taskq_t *taskq_create_sysdc(name, nthreads, minalloc, maxall, proc,
140*35a5a358SJonathan Adams  *    dc, flags);
141*35a5a358SJonathan Adams  *
142*35a5a358SJonathan Adams  *	Like taskq_create_proc(), but the taskq threads will use the
143*35a5a358SJonathan Adams  *	System Duty Cycle (SDC) scheduling class with a duty cycle of dc.
144*35a5a358SJonathan Adams  *
1457c478bd9Sstevel@tonic-gate  * void taskq_destroy(tap):
1467c478bd9Sstevel@tonic-gate  *
1477c478bd9Sstevel@tonic-gate  *	Waits for any scheduled tasks to complete, then destroys the taskq.
1487c478bd9Sstevel@tonic-gate  *	Caller should guarantee that no new tasks are scheduled in the closing
1497c478bd9Sstevel@tonic-gate  *	taskq.
1507c478bd9Sstevel@tonic-gate  *
1517c478bd9Sstevel@tonic-gate  * taskqid_t taskq_dispatch(tq, func, arg, flags):
1527c478bd9Sstevel@tonic-gate  *
1537c478bd9Sstevel@tonic-gate  *	Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
1547c478bd9Sstevel@tonic-gate  *	the caller is willing to block for memory.  The function returns an
1557c478bd9Sstevel@tonic-gate  *	opaque value which is zero iff dispatch fails.  If flags is TQ_NOSLEEP
1567c478bd9Sstevel@tonic-gate  *	or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
1577c478bd9Sstevel@tonic-gate  *	and returns (taskqid_t)0.
1587c478bd9Sstevel@tonic-gate  *
1597c478bd9Sstevel@tonic-gate  *	ASSUMES: func != NULL.
1607c478bd9Sstevel@tonic-gate  *
1617c478bd9Sstevel@tonic-gate  *	Possible flags:
1627c478bd9Sstevel@tonic-gate  *	  TQ_NOSLEEP: Do not wait for resources; may fail.
1637c478bd9Sstevel@tonic-gate  *
1647c478bd9Sstevel@tonic-gate  *	  TQ_NOALLOC: Do not allocate memory; may fail.  May only be used with
1657c478bd9Sstevel@tonic-gate  *		non-dynamic task queues.
1667c478bd9Sstevel@tonic-gate  *
1677c478bd9Sstevel@tonic-gate  *	  TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
1687c478bd9Sstevel@tonic-gate  *		lack of available resources and fail. If this flag is not
1697c478bd9Sstevel@tonic-gate  *		set, and the task pool is exhausted, the task may be scheduled
1707c478bd9Sstevel@tonic-gate  *		in the backing queue. This flag may ONLY be used with dynamic
1717c478bd9Sstevel@tonic-gate  *		task queues.
1727c478bd9Sstevel@tonic-gate  *
1737c478bd9Sstevel@tonic-gate  *		NOTE: This flag should always be used when a task queue is used
1747c478bd9Sstevel@tonic-gate  *		for tasks that may depend on each other for completion.
1757c478bd9Sstevel@tonic-gate  *		Enqueueing dependent tasks may create deadlocks.
1767c478bd9Sstevel@tonic-gate  *
1777c478bd9Sstevel@tonic-gate  *	  TQ_SLEEP:   May block waiting for resources. May still fail for
1787c478bd9Sstevel@tonic-gate  *		dynamic task queues if TQ_NOQUEUE is also specified, otherwise
1797c478bd9Sstevel@tonic-gate  *		always succeed.
1807c478bd9Sstevel@tonic-gate  *
181*35a5a358SJonathan Adams  *	  TQ_FRONT:   Puts the new task at the front of the queue.  Be careful.
182*35a5a358SJonathan Adams  *
1837c478bd9Sstevel@tonic-gate  *	NOTE: Dynamic task queues are much more likely to fail in
1847c478bd9Sstevel@tonic-gate  *		taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
1857c478bd9Sstevel@tonic-gate  *		is important to have backup strategies handling such failures.
1867c478bd9Sstevel@tonic-gate  *
1877c478bd9Sstevel@tonic-gate  * void taskq_wait(tq):
1887c478bd9Sstevel@tonic-gate  *
1897c478bd9Sstevel@tonic-gate  *	Waits for all previously scheduled tasks to complete.
1907c478bd9Sstevel@tonic-gate  *
1917c478bd9Sstevel@tonic-gate  *	NOTE: It does not stop any new task dispatches.
1927c478bd9Sstevel@tonic-gate  *	      Do NOT call taskq_wait() from a task: it will cause deadlock.
1937c478bd9Sstevel@tonic-gate  *
1947c478bd9Sstevel@tonic-gate  * void taskq_suspend(tq)
1957c478bd9Sstevel@tonic-gate  *
1967c478bd9Sstevel@tonic-gate  *	Suspend all task execution. Tasks already scheduled for a dynamic task
1977c478bd9Sstevel@tonic-gate  *	queue will still be executed, but all new scheduled tasks will be
1987c478bd9Sstevel@tonic-gate  *	suspended until taskq_resume() is called.
1997c478bd9Sstevel@tonic-gate  *
2007c478bd9Sstevel@tonic-gate  * int  taskq_suspended(tq)
2017c478bd9Sstevel@tonic-gate  *
2027c478bd9Sstevel@tonic-gate  *	Returns 1 if taskq is suspended and 0 otherwise. It is intended to
2037c478bd9Sstevel@tonic-gate  *	ASSERT that the task queue is suspended.
2047c478bd9Sstevel@tonic-gate  *
2057c478bd9Sstevel@tonic-gate  * void taskq_resume(tq)
2067c478bd9Sstevel@tonic-gate  *
2077c478bd9Sstevel@tonic-gate  *	Resume task queue execution.
2087c478bd9Sstevel@tonic-gate  *
2097c478bd9Sstevel@tonic-gate  * int  taskq_member(tq, thread)
2107c478bd9Sstevel@tonic-gate  *
2117c478bd9Sstevel@tonic-gate  *	Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
2127c478bd9Sstevel@tonic-gate  *	intended use is to ASSERT that a given function is called in taskq
2137c478bd9Sstevel@tonic-gate  *	context only.
2147c478bd9Sstevel@tonic-gate  *
2157c478bd9Sstevel@tonic-gate  * system_taskq
2167c478bd9Sstevel@tonic-gate  *
2177c478bd9Sstevel@tonic-gate  *	Global system-wide dynamic task queue for common uses. It may be used by
2187c478bd9Sstevel@tonic-gate  *	any subsystem that needs to schedule tasks and does not need to manage
2197c478bd9Sstevel@tonic-gate  *	its own task queues. It is initialized quite early during system boot.
2207c478bd9Sstevel@tonic-gate  *
2212e0c549eSJonathan Adams  * IMPLEMENTATION ==============================================================
2227c478bd9Sstevel@tonic-gate  *
2237c478bd9Sstevel@tonic-gate  * This is schematic representation of the task queue structures.
2247c478bd9Sstevel@tonic-gate  *
2257c478bd9Sstevel@tonic-gate  *   taskq:
2267c478bd9Sstevel@tonic-gate  *   +-------------+
2277c478bd9Sstevel@tonic-gate  *   | tq_lock     | +---< taskq_ent_free()
2287c478bd9Sstevel@tonic-gate  *   +-------------+ |
2297c478bd9Sstevel@tonic-gate  *   |...          | | tqent:                  tqent:
2307c478bd9Sstevel@tonic-gate  *   +-------------+ | +------------+          +------------+
2317c478bd9Sstevel@tonic-gate  *   | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
2327c478bd9Sstevel@tonic-gate  *   +-------------+   +------------+          +------------+
2337c478bd9Sstevel@tonic-gate  *   |...          |   | ...        |          | ...        |
2347c478bd9Sstevel@tonic-gate  *   +-------------+   +------------+          +------------+
2357c478bd9Sstevel@tonic-gate  *   | tq_task     |    |
2367c478bd9Sstevel@tonic-gate  *   |             |    +-------------->taskq_ent_alloc()
2377c478bd9Sstevel@tonic-gate  * +--------------------------------------------------------------------------+
2387c478bd9Sstevel@tonic-gate  * | |                     |            tqent                   tqent         |
2397c478bd9Sstevel@tonic-gate  * | +---------------------+     +--> +------------+     +--> +------------+  |
2407c478bd9Sstevel@tonic-gate  * | | ...		   |     |    | func, arg  |     |    | func, arg  |  |
2417c478bd9Sstevel@tonic-gate  * +>+---------------------+ <---|-+  +------------+ <---|-+  +------------+  |
2427c478bd9Sstevel@tonic-gate  *   | tq_taskq.tqent_next | ----+ |  | tqent_next | --->+ |  | tqent_next |--+
2437c478bd9Sstevel@tonic-gate  *   +---------------------+	   |  +------------+     ^ |  +------------+
2447c478bd9Sstevel@tonic-gate  * +-| tq_task.tqent_prev  |	   +--| tqent_prev |     | +--| tqent_prev |  ^
2457c478bd9Sstevel@tonic-gate  * | +---------------------+	      +------------+     |    +------------+  |
2467c478bd9Sstevel@tonic-gate  * | |...		   |	      | ...        |     |    | ...        |  |
2477c478bd9Sstevel@tonic-gate  * | +---------------------+	      +------------+     |    +------------+  |
2487c478bd9Sstevel@tonic-gate  * |                                      ^              |                    |
2497c478bd9Sstevel@tonic-gate  * |                                      |              |                    |
2507c478bd9Sstevel@tonic-gate  * +--------------------------------------+--------------+       TQ_APPEND() -+
2517c478bd9Sstevel@tonic-gate  *   |             |                      |
2527c478bd9Sstevel@tonic-gate  *   |...          |   taskq_thread()-----+
2537c478bd9Sstevel@tonic-gate  *   +-------------+
2547c478bd9Sstevel@tonic-gate  *   | tq_buckets  |--+-------> [ NULL ] (for regular task queues)
2557c478bd9Sstevel@tonic-gate  *   +-------------+  |
2567c478bd9Sstevel@tonic-gate  *                    |   DYNAMIC TASK QUEUES:
2577c478bd9Sstevel@tonic-gate  *                    |
2587c478bd9Sstevel@tonic-gate  *                    +-> taskq_bucket[nCPU]		taskq_bucket_dispatch()
2597c478bd9Sstevel@tonic-gate  *                        +-------------------+                    ^
2607c478bd9Sstevel@tonic-gate  *                   +--->| tqbucket_lock     |                    |
2617c478bd9Sstevel@tonic-gate  *                   |    +-------------------+   +--------+      +--------+
2627c478bd9Sstevel@tonic-gate  *                   |    | tqbucket_freelist |-->| tqent  |-->...| tqent  | ^
2637c478bd9Sstevel@tonic-gate  *                   |    +-------------------+<--+--------+<--...+--------+ |
2647c478bd9Sstevel@tonic-gate  *                   |    | ...               |   | thread |      | thread | |
2657c478bd9Sstevel@tonic-gate  *                   |    +-------------------+   +--------+      +--------+ |
2667c478bd9Sstevel@tonic-gate  *                   |    +-------------------+                              |
2677c478bd9Sstevel@tonic-gate  * taskq_dispatch()--+--->| tqbucket_lock     |             TQ_APPEND()------+
2687c478bd9Sstevel@tonic-gate  *      TQ_HASH()    |    +-------------------+   +--------+      +--------+
2697c478bd9Sstevel@tonic-gate  *                   |    | tqbucket_freelist |-->| tqent  |-->...| tqent  |
2707c478bd9Sstevel@tonic-gate  *                   |    +-------------------+<--+--------+<--...+--------+
2717c478bd9Sstevel@tonic-gate  *                   |    | ...               |   | thread |      | thread |
2727c478bd9Sstevel@tonic-gate  *                   |    +-------------------+   +--------+      +--------+
2737c478bd9Sstevel@tonic-gate  *		     +--->	...
2747c478bd9Sstevel@tonic-gate  *
2757c478bd9Sstevel@tonic-gate  *
2767c478bd9Sstevel@tonic-gate  * Task queues use tq_task field to link new entry in the queue. The queue is a
2777c478bd9Sstevel@tonic-gate  * circular doubly-linked list. Entries are put in the end of the list with
2787c478bd9Sstevel@tonic-gate  * TQ_APPEND() and processed from the front of the list by taskq_thread() in
2797c478bd9Sstevel@tonic-gate  * FIFO order. Task queue entries are cached in the free list managed by
2807c478bd9Sstevel@tonic-gate  * taskq_ent_alloc() and taskq_ent_free() functions.
2817c478bd9Sstevel@tonic-gate  *
2827c478bd9Sstevel@tonic-gate  *	All threads used by task queues mark t_taskq field of the thread to
2837c478bd9Sstevel@tonic-gate  *	point to the task queue.
2847c478bd9Sstevel@tonic-gate  *
2852e0c549eSJonathan Adams  * Taskq Thread Management -----------------------------------------------------
2862e0c549eSJonathan Adams  *
2872e0c549eSJonathan Adams  * Taskq's non-dynamic threads are managed with several variables and flags:
2882e0c549eSJonathan Adams  *
2892e0c549eSJonathan Adams  *	* tq_nthreads	- The number of threads in taskq_thread() for the
2902e0c549eSJonathan Adams  *			  taskq.
2912e0c549eSJonathan Adams  *
2922e0c549eSJonathan Adams  *	* tq_active	- The number of threads not waiting on a CV in
2932e0c549eSJonathan Adams  *			  taskq_thread(); includes newly created threads
2942e0c549eSJonathan Adams  *			  not yet counted in tq_nthreads.
2952e0c549eSJonathan Adams  *
2962e0c549eSJonathan Adams  *	* tq_nthreads_target
2972e0c549eSJonathan Adams  *			- The number of threads desired for the taskq.
2982e0c549eSJonathan Adams  *
2992e0c549eSJonathan Adams  *	* tq_flags & TASKQ_CHANGING
3002e0c549eSJonathan Adams  *			- Indicates that tq_nthreads != tq_nthreads_target.
3012e0c549eSJonathan Adams  *
3022e0c549eSJonathan Adams  *	* tq_flags & TASKQ_THREAD_CREATED
3032e0c549eSJonathan Adams  *			- Indicates that a thread is being created in the taskq.
3042e0c549eSJonathan Adams  *
3052e0c549eSJonathan Adams  * During creation, tq_nthreads and tq_active are set to 0, and
3062e0c549eSJonathan Adams  * tq_nthreads_target is set to the number of threads desired.  The
307*35a5a358SJonathan Adams  * TASKQ_CHANGING flag is set, and taskq_thread_create() is called to
308*35a5a358SJonathan Adams  * create the first thread. taskq_thread_create() increments tq_active,
3092e0c549eSJonathan Adams  * sets TASKQ_THREAD_CREATED, and creates the new thread.
3102e0c549eSJonathan Adams  *
3112e0c549eSJonathan Adams  * Each thread starts in taskq_thread(), clears the TASKQ_THREAD_CREATED
3122e0c549eSJonathan Adams  * flag, and increments tq_nthreads.  It stores the new value of
3132e0c549eSJonathan Adams  * tq_nthreads as its "thread_id", and stores its thread pointer in the
3142e0c549eSJonathan Adams  * tq_threadlist at the (thread_id - 1).  We keep the thread_id space
3152e0c549eSJonathan Adams  * densely packed by requiring that only the largest thread_id can exit during
3162e0c549eSJonathan Adams  * normal adjustment.   The exception is during the destruction of the
3172e0c549eSJonathan Adams  * taskq; once tq_nthreads_target is set to zero, no new threads will be created
3182e0c549eSJonathan Adams  * for the taskq queue, so every thread can exit without any ordering being
3192e0c549eSJonathan Adams  * necessary.
3202e0c549eSJonathan Adams  *
3212e0c549eSJonathan Adams  * Threads will only process work if their thread id is <= tq_nthreads_target.
3222e0c549eSJonathan Adams  *
3232e0c549eSJonathan Adams  * When TASKQ_CHANGING is set, threads will check the current thread target
3242e0c549eSJonathan Adams  * whenever they wake up, and do whatever they can to apply its effects.
3252e0c549eSJonathan Adams  *
3262e0c549eSJonathan Adams  * TASKQ_THREAD_CPU_PCT --------------------------------------------------------
3272e0c549eSJonathan Adams  *
3282e0c549eSJonathan Adams  * When a taskq is created with TASKQ_THREAD_CPU_PCT, we store their requested
3292e0c549eSJonathan Adams  * percentage in tq_threads_ncpus_pct, start them off with the correct thread
3302e0c549eSJonathan Adams  * target, and add them to the taskq_cpupct_list for later adjustment.
3312e0c549eSJonathan Adams  *
3322e0c549eSJonathan Adams  * We register taskq_cpu_setup() to be called whenever a CPU changes state.  It
3332e0c549eSJonathan Adams  * walks the list of TASKQ_THREAD_CPU_PCT taskqs, adjusts their nthread_target
3342e0c549eSJonathan Adams  * if need be, and wakes up all of the threads to process the change.
3352e0c549eSJonathan Adams  *
3362e0c549eSJonathan Adams  * Dynamic Task Queues Implementation ------------------------------------------
3377c478bd9Sstevel@tonic-gate  *
3387c478bd9Sstevel@tonic-gate  * For a dynamic task queues there is a 1-to-1 mapping between a thread and
3397c478bd9Sstevel@tonic-gate  * taskq_ent_structure. Each entry is serviced by its own thread and each thread
3407c478bd9Sstevel@tonic-gate  * is controlled by a single entry.
3417c478bd9Sstevel@tonic-gate  *
3427c478bd9Sstevel@tonic-gate  * Entries are distributed over a set of buckets. To avoid using modulo
3437c478bd9Sstevel@tonic-gate  * arithmetics the number of buckets is 2^n and is determined as the nearest
3447c478bd9Sstevel@tonic-gate  * power of two roundown of the number of CPUs in the system. Tunable
3457c478bd9Sstevel@tonic-gate  * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
3467c478bd9Sstevel@tonic-gate  * is attached to a bucket for its lifetime and can't migrate to other buckets.
3477c478bd9Sstevel@tonic-gate  *
3487c478bd9Sstevel@tonic-gate  * Entries that have scheduled tasks are not placed in any list. The dispatch
3497c478bd9Sstevel@tonic-gate  * function sets their "func" and "arg" fields and signals the corresponding
3507c478bd9Sstevel@tonic-gate  * thread to execute the task. Once the thread executes the task it clears the
3517c478bd9Sstevel@tonic-gate  * "func" field and places an entry on the bucket cache of free entries pointed
3527c478bd9Sstevel@tonic-gate  * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
3537c478bd9Sstevel@tonic-gate  * field equal to NULL. The free list is a circular doubly-linked list identical
3547c478bd9Sstevel@tonic-gate  * in structure to the tq_task list above, but entries are taken from it in LIFO
3557c478bd9Sstevel@tonic-gate  * order - the last freed entry is the first to be allocated. The
3567c478bd9Sstevel@tonic-gate  * taskq_bucket_dispatch() function gets the most recently used entry from the
3577c478bd9Sstevel@tonic-gate  * free list, sets its "func" and "arg" fields and signals a worker thread.
3587c478bd9Sstevel@tonic-gate  *
3597c478bd9Sstevel@tonic-gate  * After executing each task a per-entry thread taskq_d_thread() places its
3607c478bd9Sstevel@tonic-gate  * entry on the bucket free list and goes to a timed sleep. If it wakes up
3617c478bd9Sstevel@tonic-gate  * without getting new task it removes the entry from the free list and destroys
3627c478bd9Sstevel@tonic-gate  * itself. The thread sleep time is controlled by a tunable variable
3637c478bd9Sstevel@tonic-gate  * `taskq_thread_timeout'.
3647c478bd9Sstevel@tonic-gate  *
3652e0c549eSJonathan Adams  * There are various statistics kept in the bucket which allows for later
3667c478bd9Sstevel@tonic-gate  * analysis of taskq usage patterns. Also, a global copy of taskq creation and
3677c478bd9Sstevel@tonic-gate  * death statistics is kept in the global taskq data structure. Since thread
3687c478bd9Sstevel@tonic-gate  * creation and death happen rarely, updating such global data does not present
3697c478bd9Sstevel@tonic-gate  * a performance problem.
3707c478bd9Sstevel@tonic-gate  *
3717c478bd9Sstevel@tonic-gate  * NOTE: Threads are not bound to any CPU and there is absolutely no association
3727c478bd9Sstevel@tonic-gate  *       between the bucket and actual thread CPU, so buckets are used only to
3737c478bd9Sstevel@tonic-gate  *	 split resources and reduce resource contention. Having threads attached
3747c478bd9Sstevel@tonic-gate  *	 to the CPU denoted by a bucket may reduce number of times the job
3757c478bd9Sstevel@tonic-gate  *	 switches between CPUs.
3767c478bd9Sstevel@tonic-gate  *
3777c478bd9Sstevel@tonic-gate  *	 Current algorithm creates a thread whenever a bucket has no free
3787c478bd9Sstevel@tonic-gate  *	 entries. It would be nice to know how many threads are in the running
3797c478bd9Sstevel@tonic-gate  *	 state and don't create threads if all CPUs are busy with existing
3807c478bd9Sstevel@tonic-gate  *	 tasks, but it is unclear how such strategy can be implemented.
3817c478bd9Sstevel@tonic-gate  *
3827c478bd9Sstevel@tonic-gate  *	 Currently buckets are created statically as an array attached to task
3837c478bd9Sstevel@tonic-gate  *	 queue. On some system with nCPUs < max_ncpus it may waste system
3847c478bd9Sstevel@tonic-gate  *	 memory. One solution may be allocation of buckets when they are first
3857c478bd9Sstevel@tonic-gate  *	 touched, but it is not clear how useful it is.
3867c478bd9Sstevel@tonic-gate  *
3872e0c549eSJonathan Adams  * SUSPEND/RESUME implementation -----------------------------------------------
3887c478bd9Sstevel@tonic-gate  *
3897c478bd9Sstevel@tonic-gate  *	Before executing a task taskq_thread() (executing non-dynamic task
3907c478bd9Sstevel@tonic-gate  *	queues) obtains taskq's thread lock as a reader. The taskq_suspend()
3917c478bd9Sstevel@tonic-gate  *	function gets the same lock as a writer blocking all non-dynamic task
3927c478bd9Sstevel@tonic-gate  *	execution. The taskq_resume() function releases the lock allowing
3937c478bd9Sstevel@tonic-gate  *	taskq_thread to continue execution.
3947c478bd9Sstevel@tonic-gate  *
3957c478bd9Sstevel@tonic-gate  *	For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
3967c478bd9Sstevel@tonic-gate  *	taskq_suspend() function. After that taskq_bucket_dispatch() always
3977c478bd9Sstevel@tonic-gate  *	fails, so that taskq_dispatch() will either enqueue tasks for a
3987c478bd9Sstevel@tonic-gate  *	suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
3997c478bd9Sstevel@tonic-gate  *	flags.
4007c478bd9Sstevel@tonic-gate  *
4017c478bd9Sstevel@tonic-gate  *	NOTE: taskq_suspend() does not immediately block any tasks already
4027c478bd9Sstevel@tonic-gate  *	      scheduled for dynamic task queues. It only suspends new tasks
4037c478bd9Sstevel@tonic-gate  *	      scheduled after taskq_suspend() was called.
4047c478bd9Sstevel@tonic-gate  *
4057c478bd9Sstevel@tonic-gate  *	taskq_member() function works by comparing a thread t_taskq pointer with
4067c478bd9Sstevel@tonic-gate  *	the passed thread pointer.
4077c478bd9Sstevel@tonic-gate  *
4082e0c549eSJonathan Adams  * LOCKS and LOCK Hierarchy ----------------------------------------------------
4097c478bd9Sstevel@tonic-gate  *
4102e0c549eSJonathan Adams  *   There are three locks used in task queues:
4117c478bd9Sstevel@tonic-gate  *
4122e0c549eSJonathan Adams  *   1) The taskq_t's tq_lock, protecting global task queue state.
4137c478bd9Sstevel@tonic-gate  *
4147c478bd9Sstevel@tonic-gate  *   2) Each per-CPU bucket has a lock for bucket management.
4157c478bd9Sstevel@tonic-gate  *
4162e0c549eSJonathan Adams  *   3) The global taskq_cpupct_lock, which protects the list of
4172e0c549eSJonathan Adams  *      TASKQ_THREADS_CPU_PCT taskqs.
4182e0c549eSJonathan Adams  *
4192e0c549eSJonathan Adams  *   If both (1) and (2) are needed, tq_lock should be taken *after* the bucket
4207c478bd9Sstevel@tonic-gate  *   lock.
4217c478bd9Sstevel@tonic-gate  *
4222e0c549eSJonathan Adams  *   If both (1) and (3) are needed, tq_lock should be taken *after*
4232e0c549eSJonathan Adams  *   taskq_cpupct_lock.
4242e0c549eSJonathan Adams  *
4252e0c549eSJonathan Adams  * DEBUG FACILITIES ------------------------------------------------------------
4267c478bd9Sstevel@tonic-gate  *
4277c478bd9Sstevel@tonic-gate  * For DEBUG kernels it is possible to induce random failures to
4287c478bd9Sstevel@tonic-gate  * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
4297c478bd9Sstevel@tonic-gate  * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
4307c478bd9Sstevel@tonic-gate  * failures for dynamic and static task queues respectively.
4317c478bd9Sstevel@tonic-gate  *
4327c478bd9Sstevel@tonic-gate  * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
4337c478bd9Sstevel@tonic-gate  *
4342e0c549eSJonathan Adams  * TUNABLES --------------------------------------------------------------------
4357c478bd9Sstevel@tonic-gate  *
4367c478bd9Sstevel@tonic-gate  *	system_taskq_size	- Size of the global system_taskq.
4377c478bd9Sstevel@tonic-gate  *				  This value is multiplied by nCPUs to determine
4387c478bd9Sstevel@tonic-gate  *				  actual size.
4397c478bd9Sstevel@tonic-gate  *				  Default value: 64
4407c478bd9Sstevel@tonic-gate  *
4412e0c549eSJonathan Adams  *	taskq_minimum_nthreads_max
4422e0c549eSJonathan Adams  *				- Minimum size of the thread list for a taskq.
4432e0c549eSJonathan Adams  *				  Useful for testing different thread pool
4442e0c549eSJonathan Adams  *				  sizes by overwriting tq_nthreads_target.
4452e0c549eSJonathan Adams  *
4467c478bd9Sstevel@tonic-gate  *	taskq_thread_timeout	- Maximum idle time for taskq_d_thread()
4477c478bd9Sstevel@tonic-gate  *				  Default value: 5 minutes
4487c478bd9Sstevel@tonic-gate  *
4497c478bd9Sstevel@tonic-gate  *	taskq_maxbuckets	- Maximum number of buckets in any task queue
4507c478bd9Sstevel@tonic-gate  *				  Default value: 128
4517c478bd9Sstevel@tonic-gate  *
4527c478bd9Sstevel@tonic-gate  *	taskq_search_depth	- Maximum # of buckets searched for a free entry
4537c478bd9Sstevel@tonic-gate  *				  Default value: 4
4547c478bd9Sstevel@tonic-gate  *
4557c478bd9Sstevel@tonic-gate  *	taskq_dmtbf		- Mean time between induced dispatch failures
4567c478bd9Sstevel@tonic-gate  *				  for dynamic task queues.
4577c478bd9Sstevel@tonic-gate  *				  Default value: UINT_MAX (no induced failures)
4587c478bd9Sstevel@tonic-gate  *
4597c478bd9Sstevel@tonic-gate  *	taskq_smtbf		- Mean time between induced dispatch failures
4607c478bd9Sstevel@tonic-gate  *				  for static task queues.
4617c478bd9Sstevel@tonic-gate  *				  Default value: UINT_MAX (no induced failures)
4627c478bd9Sstevel@tonic-gate  *
4632e0c549eSJonathan Adams  * CONDITIONAL compilation -----------------------------------------------------
4647c478bd9Sstevel@tonic-gate  *
4657c478bd9Sstevel@tonic-gate  *    TASKQ_STATISTIC	- If set will enable bucket statistic (default).
4667c478bd9Sstevel@tonic-gate  *
4677c478bd9Sstevel@tonic-gate  */
4687c478bd9Sstevel@tonic-gate 
4697c478bd9Sstevel@tonic-gate #include <sys/taskq_impl.h>
4707c478bd9Sstevel@tonic-gate #include <sys/thread.h>
4717c478bd9Sstevel@tonic-gate #include <sys/proc.h>
4727c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
4737c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
4747c478bd9Sstevel@tonic-gate #include <sys/callb.h>
475*35a5a358SJonathan Adams #include <sys/class.h>
4767c478bd9Sstevel@tonic-gate #include <sys/systm.h>
4777c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
4787c478bd9Sstevel@tonic-gate #include <sys/debug.h>
4797c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>	/* For throttlefree */
4807c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
4817c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
482*35a5a358SJonathan Adams #include <sys/cpupart.h>
4837c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
484*35a5a358SJonathan Adams #include <sys/sysdc.h>
4852e0c549eSJonathan Adams #include <sys/note.h>
4867c478bd9Sstevel@tonic-gate 
4877c478bd9Sstevel@tonic-gate static kmem_cache_t *taskq_ent_cache, *taskq_cache;
4887c478bd9Sstevel@tonic-gate 
4897c478bd9Sstevel@tonic-gate /*
4902e0c549eSJonathan Adams  * Pseudo instance numbers for taskqs without explicitly provided instance.
4917c478bd9Sstevel@tonic-gate  */
4927c478bd9Sstevel@tonic-gate static vmem_t *taskq_id_arena;
4937c478bd9Sstevel@tonic-gate 
4947c478bd9Sstevel@tonic-gate /* Global system task queue for common use */
4957c478bd9Sstevel@tonic-gate taskq_t	*system_taskq;
4967c478bd9Sstevel@tonic-gate 
4977c478bd9Sstevel@tonic-gate /*
4982e0c549eSJonathan Adams  * Maximum number of entries in global system taskq is
4997c478bd9Sstevel@tonic-gate  *	system_taskq_size * max_ncpus
5007c478bd9Sstevel@tonic-gate  */
5017c478bd9Sstevel@tonic-gate #define	SYSTEM_TASKQ_SIZE 64
5027c478bd9Sstevel@tonic-gate int system_taskq_size = SYSTEM_TASKQ_SIZE;
5037c478bd9Sstevel@tonic-gate 
5047c478bd9Sstevel@tonic-gate /*
5052e0c549eSJonathan Adams  * Minimum size for tq_nthreads_max; useful for those who want to play around
5062e0c549eSJonathan Adams  * with increasing a taskq's tq_nthreads_target.
5072e0c549eSJonathan Adams  */
5082e0c549eSJonathan Adams int taskq_minimum_nthreads_max = 1;
5092e0c549eSJonathan Adams 
510*35a5a358SJonathan Adams /*
511*35a5a358SJonathan Adams  * We want to ensure that when taskq_create() returns, there is at least
512*35a5a358SJonathan Adams  * one thread ready to handle requests.  To guarantee this, we have to wait
513*35a5a358SJonathan Adams  * for the second thread, since the first one cannot process requests until
514*35a5a358SJonathan Adams  * the second thread has been created.
515*35a5a358SJonathan Adams  */
516*35a5a358SJonathan Adams #define	TASKQ_CREATE_ACTIVE_THREADS	2
517*35a5a358SJonathan Adams 
5182e0c549eSJonathan Adams /* Maximum percentage allowed for TASKQ_THREADS_CPU_PCT */
5192e0c549eSJonathan Adams #define	TASKQ_CPUPCT_MAX_PERCENT	1000
5202e0c549eSJonathan Adams int taskq_cpupct_max_percent = TASKQ_CPUPCT_MAX_PERCENT;
5212e0c549eSJonathan Adams 
5222e0c549eSJonathan Adams /*
5237c478bd9Sstevel@tonic-gate  * Dynamic task queue threads that don't get any work within
5247c478bd9Sstevel@tonic-gate  * taskq_thread_timeout destroy themselves
5257c478bd9Sstevel@tonic-gate  */
5267c478bd9Sstevel@tonic-gate #define	TASKQ_THREAD_TIMEOUT (60 * 5)
5277c478bd9Sstevel@tonic-gate int taskq_thread_timeout = TASKQ_THREAD_TIMEOUT;
5287c478bd9Sstevel@tonic-gate 
5297c478bd9Sstevel@tonic-gate #define	TASKQ_MAXBUCKETS 128
5307c478bd9Sstevel@tonic-gate int taskq_maxbuckets = TASKQ_MAXBUCKETS;
5317c478bd9Sstevel@tonic-gate 
5327c478bd9Sstevel@tonic-gate /*
5337c478bd9Sstevel@tonic-gate  * When a bucket has no available entries another buckets are tried.
5347c478bd9Sstevel@tonic-gate  * taskq_search_depth parameter limits the amount of buckets that we search
5357c478bd9Sstevel@tonic-gate  * before failing. This is mostly useful in systems with many CPUs where we may
5367c478bd9Sstevel@tonic-gate  * spend too much time scanning busy buckets.
5377c478bd9Sstevel@tonic-gate  */
5387c478bd9Sstevel@tonic-gate #define	TASKQ_SEARCH_DEPTH 4
5397c478bd9Sstevel@tonic-gate int taskq_search_depth = TASKQ_SEARCH_DEPTH;
5407c478bd9Sstevel@tonic-gate 
5417c478bd9Sstevel@tonic-gate /*
5427c478bd9Sstevel@tonic-gate  * Hashing function: mix various bits of x. May be pretty much anything.
5437c478bd9Sstevel@tonic-gate  */
5447c478bd9Sstevel@tonic-gate #define	TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
5457c478bd9Sstevel@tonic-gate 
5467c478bd9Sstevel@tonic-gate /*
5477c478bd9Sstevel@tonic-gate  * We do not create any new threads when the system is low on memory and start
5487c478bd9Sstevel@tonic-gate  * throttling memory allocations. The following macro tries to estimate such
5497c478bd9Sstevel@tonic-gate  * condition.
5507c478bd9Sstevel@tonic-gate  */
5517c478bd9Sstevel@tonic-gate #define	ENOUGH_MEMORY() (freemem > throttlefree)
5527c478bd9Sstevel@tonic-gate 
5537c478bd9Sstevel@tonic-gate /*
5547c478bd9Sstevel@tonic-gate  * Static functions.
5557c478bd9Sstevel@tonic-gate  */
5567c478bd9Sstevel@tonic-gate static taskq_t	*taskq_create_common(const char *, int, int, pri_t, int,
557*35a5a358SJonathan Adams     int, proc_t *, uint_t, uint_t);
5587c478bd9Sstevel@tonic-gate static void taskq_thread(void *);
5597c478bd9Sstevel@tonic-gate static void taskq_d_thread(taskq_ent_t *);
5607c478bd9Sstevel@tonic-gate static void taskq_bucket_extend(void *);
5617c478bd9Sstevel@tonic-gate static int  taskq_constructor(void *, void *, int);
5627c478bd9Sstevel@tonic-gate static void taskq_destructor(void *, void *);
5637c478bd9Sstevel@tonic-gate static int  taskq_ent_constructor(void *, void *, int);
5647c478bd9Sstevel@tonic-gate static void taskq_ent_destructor(void *, void *);
5657c478bd9Sstevel@tonic-gate static taskq_ent_t *taskq_ent_alloc(taskq_t *, int);
5667c478bd9Sstevel@tonic-gate static void taskq_ent_free(taskq_t *, taskq_ent_t *);
5677c478bd9Sstevel@tonic-gate static taskq_ent_t *taskq_bucket_dispatch(taskq_bucket_t *, task_func_t,
5687c478bd9Sstevel@tonic-gate     void *);
5697c478bd9Sstevel@tonic-gate 
5707c478bd9Sstevel@tonic-gate /*
5717c478bd9Sstevel@tonic-gate  * Task queues kstats.
5727c478bd9Sstevel@tonic-gate  */
5737c478bd9Sstevel@tonic-gate struct taskq_kstat {
574*35a5a358SJonathan Adams 	kstat_named_t	tq_pid;
5757c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_tasks;
5767c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_executed;
5777c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_maxtasks;
5787c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_totaltime;
5797c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_nalloc;
5807c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_nactive;
5817c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_pri;
5827c478bd9Sstevel@tonic-gate 	kstat_named_t	tq_nthreads;
5837c478bd9Sstevel@tonic-gate } taskq_kstat = {
584*35a5a358SJonathan Adams 	{ "pid",		KSTAT_DATA_UINT64 },
5857c478bd9Sstevel@tonic-gate 	{ "tasks",		KSTAT_DATA_UINT64 },
5867c478bd9Sstevel@tonic-gate 	{ "executed",		KSTAT_DATA_UINT64 },
5877c478bd9Sstevel@tonic-gate 	{ "maxtasks",		KSTAT_DATA_UINT64 },
5887c478bd9Sstevel@tonic-gate 	{ "totaltime",		KSTAT_DATA_UINT64 },
5897c478bd9Sstevel@tonic-gate 	{ "nactive",		KSTAT_DATA_UINT64 },
5907c478bd9Sstevel@tonic-gate 	{ "nalloc",		KSTAT_DATA_UINT64 },
5917c478bd9Sstevel@tonic-gate 	{ "priority",		KSTAT_DATA_UINT64 },
5927c478bd9Sstevel@tonic-gate 	{ "threads",		KSTAT_DATA_UINT64 },
5937c478bd9Sstevel@tonic-gate };
5947c478bd9Sstevel@tonic-gate 
5957c478bd9Sstevel@tonic-gate struct taskq_d_kstat {
5967c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_pri;
5977c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_btasks;
5987c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_bexecuted;
5997c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_bmaxtasks;
6007c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_bnalloc;
6017c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_bnactive;
6027c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_btotaltime;
6037c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_hits;
6047c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_misses;
6057c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_overflows;
6067c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_tcreates;
6077c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_tdeaths;
6087c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_maxthreads;
6097c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_nomem;
6107c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_disptcreates;
6117c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_totaltime;
6127c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_nalloc;
6137c478bd9Sstevel@tonic-gate 	kstat_named_t	tqd_nfree;
6147c478bd9Sstevel@tonic-gate } taskq_d_kstat = {
6157c478bd9Sstevel@tonic-gate 	{ "priority",		KSTAT_DATA_UINT64 },
6167c478bd9Sstevel@tonic-gate 	{ "btasks",		KSTAT_DATA_UINT64 },
6177c478bd9Sstevel@tonic-gate 	{ "bexecuted",		KSTAT_DATA_UINT64 },
6187c478bd9Sstevel@tonic-gate 	{ "bmaxtasks",		KSTAT_DATA_UINT64 },
6197c478bd9Sstevel@tonic-gate 	{ "bnalloc",		KSTAT_DATA_UINT64 },
6207c478bd9Sstevel@tonic-gate 	{ "bnactive",		KSTAT_DATA_UINT64 },
6217c478bd9Sstevel@tonic-gate 	{ "btotaltime",		KSTAT_DATA_UINT64 },
6227c478bd9Sstevel@tonic-gate 	{ "hits",		KSTAT_DATA_UINT64 },
6237c478bd9Sstevel@tonic-gate 	{ "misses",		KSTAT_DATA_UINT64 },
6247c478bd9Sstevel@tonic-gate 	{ "overflows",		KSTAT_DATA_UINT64 },
6257c478bd9Sstevel@tonic-gate 	{ "tcreates",		KSTAT_DATA_UINT64 },
6267c478bd9Sstevel@tonic-gate 	{ "tdeaths",		KSTAT_DATA_UINT64 },
6277c478bd9Sstevel@tonic-gate 	{ "maxthreads",		KSTAT_DATA_UINT64 },
6287c478bd9Sstevel@tonic-gate 	{ "nomem",		KSTAT_DATA_UINT64 },
6297c478bd9Sstevel@tonic-gate 	{ "disptcreates",	KSTAT_DATA_UINT64 },
6307c478bd9Sstevel@tonic-gate 	{ "totaltime",		KSTAT_DATA_UINT64 },
6317c478bd9Sstevel@tonic-gate 	{ "nalloc",		KSTAT_DATA_UINT64 },
6327c478bd9Sstevel@tonic-gate 	{ "nfree",		KSTAT_DATA_UINT64 },
6337c478bd9Sstevel@tonic-gate };
6347c478bd9Sstevel@tonic-gate 
6357c478bd9Sstevel@tonic-gate static kmutex_t taskq_kstat_lock;
6367c478bd9Sstevel@tonic-gate static kmutex_t taskq_d_kstat_lock;
6377c478bd9Sstevel@tonic-gate static int taskq_kstat_update(kstat_t *, int);
6387c478bd9Sstevel@tonic-gate static int taskq_d_kstat_update(kstat_t *, int);
6397c478bd9Sstevel@tonic-gate 
6402e0c549eSJonathan Adams /*
641*35a5a358SJonathan Adams  * List of all TASKQ_THREADS_CPU_PCT taskqs.
6422e0c549eSJonathan Adams  */
643*35a5a358SJonathan Adams static list_t taskq_cpupct_list;	/* protected by cpu_lock */
6447c478bd9Sstevel@tonic-gate 
6457c478bd9Sstevel@tonic-gate /*
6467c478bd9Sstevel@tonic-gate  * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
6477c478bd9Sstevel@tonic-gate  */
6487c478bd9Sstevel@tonic-gate #define	TASKQ_STATISTIC 1
6497c478bd9Sstevel@tonic-gate 
6507c478bd9Sstevel@tonic-gate #if TASKQ_STATISTIC
6517c478bd9Sstevel@tonic-gate #define	TQ_STAT(b, x)	b->tqbucket_stat.x++
6527c478bd9Sstevel@tonic-gate #else
6537c478bd9Sstevel@tonic-gate #define	TQ_STAT(b, x)
6547c478bd9Sstevel@tonic-gate #endif
6557c478bd9Sstevel@tonic-gate 
6567c478bd9Sstevel@tonic-gate /*
6577c478bd9Sstevel@tonic-gate  * Random fault injection.
6587c478bd9Sstevel@tonic-gate  */
6597c478bd9Sstevel@tonic-gate uint_t taskq_random;
6607c478bd9Sstevel@tonic-gate uint_t taskq_dmtbf = UINT_MAX;    /* mean time between injected failures */
6617c478bd9Sstevel@tonic-gate uint_t taskq_smtbf = UINT_MAX;    /* mean time between injected failures */
6627c478bd9Sstevel@tonic-gate 
6637c478bd9Sstevel@tonic-gate /*
6647c478bd9Sstevel@tonic-gate  * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
6657c478bd9Sstevel@tonic-gate  *
6667c478bd9Sstevel@tonic-gate  * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
6677c478bd9Sstevel@tonic-gate  * they could prepopulate the cache and make sure that they do not use more
6687c478bd9Sstevel@tonic-gate  * then minalloc entries.  So, fault injection in this case insures that
6697c478bd9Sstevel@tonic-gate  * either TASKQ_PREPOPULATE is not set or there are more entries allocated
6707c478bd9Sstevel@tonic-gate  * than is specified by minalloc.  TQ_NOALLOC dispatches are always allowed
6717c478bd9Sstevel@tonic-gate  * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
6727c478bd9Sstevel@tonic-gate  * dispatches.
6737c478bd9Sstevel@tonic-gate  */
6747c478bd9Sstevel@tonic-gate #ifdef DEBUG
6757c478bd9Sstevel@tonic-gate #define	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)		\
6767c478bd9Sstevel@tonic-gate 	taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
6777c478bd9Sstevel@tonic-gate 	if ((flag & TQ_NOSLEEP) &&				\
6787c478bd9Sstevel@tonic-gate 	    taskq_random < 1771875 / taskq_dmtbf) {		\
6797c478bd9Sstevel@tonic-gate 		return (NULL);					\
6807c478bd9Sstevel@tonic-gate 	}
6817c478bd9Sstevel@tonic-gate 
6827c478bd9Sstevel@tonic-gate #define	TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)		\
6837c478bd9Sstevel@tonic-gate 	taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
6847c478bd9Sstevel@tonic-gate 	if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) &&		\
6857c478bd9Sstevel@tonic-gate 	    (!(tq->tq_flags & TASKQ_PREPOPULATE) ||		\
6867c478bd9Sstevel@tonic-gate 	    (tq->tq_nalloc > tq->tq_minalloc)) &&		\
6877c478bd9Sstevel@tonic-gate 	    (taskq_random < (1771875 / taskq_smtbf))) {		\
6887c478bd9Sstevel@tonic-gate 		mutex_exit(&tq->tq_lock);			\
6897c478bd9Sstevel@tonic-gate 		return (NULL);					\
6907c478bd9Sstevel@tonic-gate 	}
6917c478bd9Sstevel@tonic-gate #else
6927c478bd9Sstevel@tonic-gate #define	TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
6937c478bd9Sstevel@tonic-gate #define	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
6947c478bd9Sstevel@tonic-gate #endif
6957c478bd9Sstevel@tonic-gate 
6967c478bd9Sstevel@tonic-gate #define	IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) &&	\
6977c478bd9Sstevel@tonic-gate 	((l).tqent_prev == &(l)))
6987c478bd9Sstevel@tonic-gate 
6997c478bd9Sstevel@tonic-gate /*
7007c478bd9Sstevel@tonic-gate  * Append `tqe' in the end of the doubly-linked list denoted by l.
7017c478bd9Sstevel@tonic-gate  */
7027c478bd9Sstevel@tonic-gate #define	TQ_APPEND(l, tqe) {					\
7037c478bd9Sstevel@tonic-gate 	tqe->tqent_next = &l;					\
7047c478bd9Sstevel@tonic-gate 	tqe->tqent_prev = l.tqent_prev;				\
7057c478bd9Sstevel@tonic-gate 	tqe->tqent_next->tqent_prev = tqe;			\
7067c478bd9Sstevel@tonic-gate 	tqe->tqent_prev->tqent_next = tqe;			\
7077c478bd9Sstevel@tonic-gate }
708*35a5a358SJonathan Adams /*
709*35a5a358SJonathan Adams  * Prepend 'tqe' to the beginning of l
710*35a5a358SJonathan Adams  */
711*35a5a358SJonathan Adams #define	TQ_PREPEND(l, tqe) {					\
712*35a5a358SJonathan Adams 	tqe->tqent_next = l.tqent_next;				\
713*35a5a358SJonathan Adams 	tqe->tqent_prev = &l;					\
714*35a5a358SJonathan Adams 	tqe->tqent_next->tqent_prev = tqe;			\
715*35a5a358SJonathan Adams 	tqe->tqent_prev->tqent_next = tqe;			\
716*35a5a358SJonathan Adams }
7177c478bd9Sstevel@tonic-gate 
7187c478bd9Sstevel@tonic-gate /*
7197c478bd9Sstevel@tonic-gate  * Schedule a task specified by func and arg into the task queue entry tqe.
7207c478bd9Sstevel@tonic-gate  */
721*35a5a358SJonathan Adams #define	TQ_DO_ENQUEUE(tq, tqe, func, arg, front) {			\
7227c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tq->tq_lock));				\
723*35a5a358SJonathan Adams 	_NOTE(CONSTCOND)						\
724*35a5a358SJonathan Adams 	if (front) {							\
725*35a5a358SJonathan Adams 		TQ_PREPEND(tq->tq_task, tqe);				\
726*35a5a358SJonathan Adams 	} else {							\
7277c478bd9Sstevel@tonic-gate 		TQ_APPEND(tq->tq_task, tqe);				\
728*35a5a358SJonathan Adams 	}								\
7297c478bd9Sstevel@tonic-gate 	tqe->tqent_func = (func);					\
7307c478bd9Sstevel@tonic-gate 	tqe->tqent_arg = (arg);						\
7317c478bd9Sstevel@tonic-gate 	tq->tq_tasks++;							\
7327c478bd9Sstevel@tonic-gate 	if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks)		\
7337c478bd9Sstevel@tonic-gate 		tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed;	\
7347c478bd9Sstevel@tonic-gate 	cv_signal(&tq->tq_dispatch_cv);					\
7357c478bd9Sstevel@tonic-gate 	DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
7367c478bd9Sstevel@tonic-gate }
7377c478bd9Sstevel@tonic-gate 
738*35a5a358SJonathan Adams #define	TQ_ENQUEUE(tq, tqe, func, arg)					\
739*35a5a358SJonathan Adams 	TQ_DO_ENQUEUE(tq, tqe, func, arg, 0)
740*35a5a358SJonathan Adams 
741*35a5a358SJonathan Adams #define	TQ_ENQUEUE_FRONT(tq, tqe, func, arg)				\
742*35a5a358SJonathan Adams 	TQ_DO_ENQUEUE(tq, tqe, func, arg, 1)
743*35a5a358SJonathan Adams 
7447c478bd9Sstevel@tonic-gate /*
7457c478bd9Sstevel@tonic-gate  * Do-nothing task which may be used to prepopulate thread caches.
7467c478bd9Sstevel@tonic-gate  */
7477c478bd9Sstevel@tonic-gate /*ARGSUSED*/
7487c478bd9Sstevel@tonic-gate void
7497c478bd9Sstevel@tonic-gate nulltask(void *unused)
7507c478bd9Sstevel@tonic-gate {
7517c478bd9Sstevel@tonic-gate }
7527c478bd9Sstevel@tonic-gate 
7537c478bd9Sstevel@tonic-gate /*ARGSUSED*/
7547c478bd9Sstevel@tonic-gate static int
7557c478bd9Sstevel@tonic-gate taskq_constructor(void *buf, void *cdrarg, int kmflags)
7567c478bd9Sstevel@tonic-gate {
7577c478bd9Sstevel@tonic-gate 	taskq_t *tq = buf;
7587c478bd9Sstevel@tonic-gate 
7597c478bd9Sstevel@tonic-gate 	bzero(tq, sizeof (taskq_t));
7607c478bd9Sstevel@tonic-gate 
7617c478bd9Sstevel@tonic-gate 	mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
7627c478bd9Sstevel@tonic-gate 	rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
7637c478bd9Sstevel@tonic-gate 	cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
7642e0c549eSJonathan Adams 	cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL);
7657c478bd9Sstevel@tonic-gate 	cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
7667c478bd9Sstevel@tonic-gate 
7677c478bd9Sstevel@tonic-gate 	tq->tq_task.tqent_next = &tq->tq_task;
7687c478bd9Sstevel@tonic-gate 	tq->tq_task.tqent_prev = &tq->tq_task;
7697c478bd9Sstevel@tonic-gate 
7707c478bd9Sstevel@tonic-gate 	return (0);
7717c478bd9Sstevel@tonic-gate }
7727c478bd9Sstevel@tonic-gate 
7737c478bd9Sstevel@tonic-gate /*ARGSUSED*/
7747c478bd9Sstevel@tonic-gate static void
7757c478bd9Sstevel@tonic-gate taskq_destructor(void *buf, void *cdrarg)
7767c478bd9Sstevel@tonic-gate {
7777c478bd9Sstevel@tonic-gate 	taskq_t *tq = buf;
7787c478bd9Sstevel@tonic-gate 
7792e0c549eSJonathan Adams 	ASSERT(tq->tq_nthreads == 0);
7802e0c549eSJonathan Adams 	ASSERT(tq->tq_buckets == NULL);
7812e0c549eSJonathan Adams 	ASSERT(tq->tq_tcreates == 0);
7822e0c549eSJonathan Adams 	ASSERT(tq->tq_tdeaths == 0);
7832e0c549eSJonathan Adams 
7847c478bd9Sstevel@tonic-gate 	mutex_destroy(&tq->tq_lock);
7857c478bd9Sstevel@tonic-gate 	rw_destroy(&tq->tq_threadlock);
7867c478bd9Sstevel@tonic-gate 	cv_destroy(&tq->tq_dispatch_cv);
7872e0c549eSJonathan Adams 	cv_destroy(&tq->tq_exit_cv);
7887c478bd9Sstevel@tonic-gate 	cv_destroy(&tq->tq_wait_cv);
7897c478bd9Sstevel@tonic-gate }
7907c478bd9Sstevel@tonic-gate 
7917c478bd9Sstevel@tonic-gate /*ARGSUSED*/
7927c478bd9Sstevel@tonic-gate static int
7937c478bd9Sstevel@tonic-gate taskq_ent_constructor(void *buf, void *cdrarg, int kmflags)
7947c478bd9Sstevel@tonic-gate {
7957c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe = buf;
7967c478bd9Sstevel@tonic-gate 
7977c478bd9Sstevel@tonic-gate 	tqe->tqent_thread = NULL;
7987c478bd9Sstevel@tonic-gate 	cv_init(&tqe->tqent_cv, NULL, CV_DEFAULT, NULL);
7997c478bd9Sstevel@tonic-gate 
8007c478bd9Sstevel@tonic-gate 	return (0);
8017c478bd9Sstevel@tonic-gate }
8027c478bd9Sstevel@tonic-gate 
8037c478bd9Sstevel@tonic-gate /*ARGSUSED*/
8047c478bd9Sstevel@tonic-gate static void
8057c478bd9Sstevel@tonic-gate taskq_ent_destructor(void *buf, void *cdrarg)
8067c478bd9Sstevel@tonic-gate {
8077c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe = buf;
8087c478bd9Sstevel@tonic-gate 
8097c478bd9Sstevel@tonic-gate 	ASSERT(tqe->tqent_thread == NULL);
8107c478bd9Sstevel@tonic-gate 	cv_destroy(&tqe->tqent_cv);
8117c478bd9Sstevel@tonic-gate }
8127c478bd9Sstevel@tonic-gate 
8137c478bd9Sstevel@tonic-gate void
8147c478bd9Sstevel@tonic-gate taskq_init(void)
8157c478bd9Sstevel@tonic-gate {
8167c478bd9Sstevel@tonic-gate 	taskq_ent_cache = kmem_cache_create("taskq_ent_cache",
8177c478bd9Sstevel@tonic-gate 	    sizeof (taskq_ent_t), 0, taskq_ent_constructor,
8187c478bd9Sstevel@tonic-gate 	    taskq_ent_destructor, NULL, NULL, NULL, 0);
8197c478bd9Sstevel@tonic-gate 	taskq_cache = kmem_cache_create("taskq_cache", sizeof (taskq_t),
8207c478bd9Sstevel@tonic-gate 	    0, taskq_constructor, taskq_destructor, NULL, NULL, NULL, 0);
8217c478bd9Sstevel@tonic-gate 	taskq_id_arena = vmem_create("taskq_id_arena",
8227c478bd9Sstevel@tonic-gate 	    (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0,
8237c478bd9Sstevel@tonic-gate 	    VM_SLEEP | VMC_IDENTIFIER);
8242e0c549eSJonathan Adams 
825*35a5a358SJonathan Adams 	list_create(&taskq_cpupct_list, sizeof (taskq_t),
826*35a5a358SJonathan Adams 	    offsetof(taskq_t, tq_cpupct_link));
827*35a5a358SJonathan Adams }
828*35a5a358SJonathan Adams 
829*35a5a358SJonathan Adams static void
830*35a5a358SJonathan Adams taskq_update_nthreads(taskq_t *tq, uint_t ncpus)
831*35a5a358SJonathan Adams {
832*35a5a358SJonathan Adams 	uint_t newtarget = TASKQ_THREADS_PCT(ncpus, tq->tq_threads_ncpus_pct);
833*35a5a358SJonathan Adams 
834*35a5a358SJonathan Adams 	ASSERT(MUTEX_HELD(&cpu_lock));
835*35a5a358SJonathan Adams 	ASSERT(MUTEX_HELD(&tq->tq_lock));
836*35a5a358SJonathan Adams 
837*35a5a358SJonathan Adams 	/* We must be going from non-zero to non-zero; no exiting. */
838*35a5a358SJonathan Adams 	ASSERT3U(tq->tq_nthreads_target, !=, 0);
839*35a5a358SJonathan Adams 	ASSERT3U(newtarget, !=, 0);
840*35a5a358SJonathan Adams 
841*35a5a358SJonathan Adams 	ASSERT3U(newtarget, <=, tq->tq_nthreads_max);
842*35a5a358SJonathan Adams 	if (newtarget != tq->tq_nthreads_target) {
843*35a5a358SJonathan Adams 		tq->tq_flags |= TASKQ_CHANGING;
844*35a5a358SJonathan Adams 		tq->tq_nthreads_target = newtarget;
845*35a5a358SJonathan Adams 		cv_broadcast(&tq->tq_dispatch_cv);
846*35a5a358SJonathan Adams 		cv_broadcast(&tq->tq_exit_cv);
847*35a5a358SJonathan Adams 	}
848*35a5a358SJonathan Adams }
849*35a5a358SJonathan Adams 
850*35a5a358SJonathan Adams /* called during task queue creation */
851*35a5a358SJonathan Adams static void
852*35a5a358SJonathan Adams taskq_cpupct_install(taskq_t *tq, cpupart_t *cpup)
853*35a5a358SJonathan Adams {
854*35a5a358SJonathan Adams 	ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
855*35a5a358SJonathan Adams 
856*35a5a358SJonathan Adams 	mutex_enter(&cpu_lock);
857*35a5a358SJonathan Adams 	mutex_enter(&tq->tq_lock);
858*35a5a358SJonathan Adams 	tq->tq_cpupart = cpup->cp_id;
859*35a5a358SJonathan Adams 	taskq_update_nthreads(tq, cpup->cp_ncpus);
860*35a5a358SJonathan Adams 	mutex_exit(&tq->tq_lock);
861*35a5a358SJonathan Adams 
862*35a5a358SJonathan Adams 	list_insert_tail(&taskq_cpupct_list, tq);
863*35a5a358SJonathan Adams 	mutex_exit(&cpu_lock);
864*35a5a358SJonathan Adams }
865*35a5a358SJonathan Adams 
866*35a5a358SJonathan Adams static void
867*35a5a358SJonathan Adams taskq_cpupct_remove(taskq_t *tq)
868*35a5a358SJonathan Adams {
869*35a5a358SJonathan Adams 	ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
870*35a5a358SJonathan Adams 
871*35a5a358SJonathan Adams 	mutex_enter(&cpu_lock);
872*35a5a358SJonathan Adams 	list_remove(&taskq_cpupct_list, tq);
873*35a5a358SJonathan Adams 	mutex_exit(&cpu_lock);
8742e0c549eSJonathan Adams }
8752e0c549eSJonathan Adams 
8762e0c549eSJonathan Adams /*ARGSUSED*/
8772e0c549eSJonathan Adams static int
8782e0c549eSJonathan Adams taskq_cpu_setup(cpu_setup_t what, int id, void *arg)
8792e0c549eSJonathan Adams {
880*35a5a358SJonathan Adams 	taskq_t *tq;
881*35a5a358SJonathan Adams 	cpupart_t *cp = cpu[id]->cpu_part;
882*35a5a358SJonathan Adams 	uint_t ncpus = cp->cp_ncpus;
8832e0c549eSJonathan Adams 
884*35a5a358SJonathan Adams 	ASSERT(MUTEX_HELD(&cpu_lock));
885*35a5a358SJonathan Adams 	ASSERT(ncpus > 0);
886*35a5a358SJonathan Adams 
887*35a5a358SJonathan Adams 	switch (what) {
888*35a5a358SJonathan Adams 	case CPU_OFF:
889*35a5a358SJonathan Adams 	case CPU_CPUPART_OUT:
8902e0c549eSJonathan Adams 		/* offlines are called *before* the cpu is offlined. */
891*35a5a358SJonathan Adams 		if (ncpus > 1)
892*35a5a358SJonathan Adams 			ncpus--;
893*35a5a358SJonathan Adams 		break;
8942e0c549eSJonathan Adams 
895*35a5a358SJonathan Adams 	case CPU_ON:
896*35a5a358SJonathan Adams 	case CPU_CPUPART_IN:
897*35a5a358SJonathan Adams 		break;
898*35a5a358SJonathan Adams 
899*35a5a358SJonathan Adams 	default:
900*35a5a358SJonathan Adams 		return (0);		/* doesn't affect cpu count */
9012e0c549eSJonathan Adams 	}
9022e0c549eSJonathan Adams 
903*35a5a358SJonathan Adams 	for (tq = list_head(&taskq_cpupct_list); tq != NULL;
904*35a5a358SJonathan Adams 	    tq = list_next(&taskq_cpupct_list, tq)) {
9052e0c549eSJonathan Adams 
9062e0c549eSJonathan Adams 		mutex_enter(&tq->tq_lock);
907*35a5a358SJonathan Adams 		/*
908*35a5a358SJonathan Adams 		 * If the taskq is part of the cpuset which is changing,
909*35a5a358SJonathan Adams 		 * update its nthreads_target.
910*35a5a358SJonathan Adams 		 */
911*35a5a358SJonathan Adams 		if (tq->tq_cpupart == cp->cp_id) {
912*35a5a358SJonathan Adams 			taskq_update_nthreads(tq, ncpus);
9132e0c549eSJonathan Adams 		}
9142e0c549eSJonathan Adams 		mutex_exit(&tq->tq_lock);
9152e0c549eSJonathan Adams 	}
9162e0c549eSJonathan Adams 	return (0);
9172e0c549eSJonathan Adams }
9182e0c549eSJonathan Adams 
9192e0c549eSJonathan Adams void
9202e0c549eSJonathan Adams taskq_mp_init(void)
9212e0c549eSJonathan Adams {
9222e0c549eSJonathan Adams 	mutex_enter(&cpu_lock);
9232e0c549eSJonathan Adams 	register_cpu_setup_func(taskq_cpu_setup, NULL);
924*35a5a358SJonathan Adams 	/*
925*35a5a358SJonathan Adams 	 * Make sure we're up to date.  At this point in boot, there is only
926*35a5a358SJonathan Adams 	 * one processor set, so we only have to update the current CPU.
927*35a5a358SJonathan Adams 	 */
928*35a5a358SJonathan Adams 	(void) taskq_cpu_setup(CPU_ON, CPU->cpu_id, NULL);
9292e0c549eSJonathan Adams 	mutex_exit(&cpu_lock);
9307c478bd9Sstevel@tonic-gate }
9317c478bd9Sstevel@tonic-gate 
9327c478bd9Sstevel@tonic-gate /*
9337c478bd9Sstevel@tonic-gate  * Create global system dynamic task queue.
9347c478bd9Sstevel@tonic-gate  */
9357c478bd9Sstevel@tonic-gate void
9367c478bd9Sstevel@tonic-gate system_taskq_init(void)
9377c478bd9Sstevel@tonic-gate {
9387c478bd9Sstevel@tonic-gate 	system_taskq = taskq_create_common("system_taskq", 0,
939*35a5a358SJonathan Adams 	    system_taskq_size * max_ncpus, minclsyspri, 4, 512, &p0, 0,
9407c478bd9Sstevel@tonic-gate 	    TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
9417c478bd9Sstevel@tonic-gate }
9427c478bd9Sstevel@tonic-gate 
9437c478bd9Sstevel@tonic-gate /*
9447c478bd9Sstevel@tonic-gate  * taskq_ent_alloc()
9457c478bd9Sstevel@tonic-gate  *
9467c478bd9Sstevel@tonic-gate  * Allocates a new taskq_ent_t structure either from the free list or from the
9477c478bd9Sstevel@tonic-gate  * cache. Returns NULL if it can't be allocated.
9487c478bd9Sstevel@tonic-gate  *
9497c478bd9Sstevel@tonic-gate  * Assumes: tq->tq_lock is held.
9507c478bd9Sstevel@tonic-gate  */
9517c478bd9Sstevel@tonic-gate static taskq_ent_t *
9527c478bd9Sstevel@tonic-gate taskq_ent_alloc(taskq_t *tq, int flags)
9537c478bd9Sstevel@tonic-gate {
9547c478bd9Sstevel@tonic-gate 	int kmflags = (flags & TQ_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
9557c478bd9Sstevel@tonic-gate 
9567c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe;
9577c478bd9Sstevel@tonic-gate 
9587c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tq->tq_lock));
9597c478bd9Sstevel@tonic-gate 
9607c478bd9Sstevel@tonic-gate 	/*
9617c478bd9Sstevel@tonic-gate 	 * TQ_NOALLOC allocations are allowed to use the freelist, even if
9627c478bd9Sstevel@tonic-gate 	 * we are below tq_minalloc.
9637c478bd9Sstevel@tonic-gate 	 */
9647c478bd9Sstevel@tonic-gate 	if ((tqe = tq->tq_freelist) != NULL &&
9657c478bd9Sstevel@tonic-gate 	    ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
9667c478bd9Sstevel@tonic-gate 		tq->tq_freelist = tqe->tqent_next;
9677c478bd9Sstevel@tonic-gate 	} else {
9687c478bd9Sstevel@tonic-gate 		if (flags & TQ_NOALLOC)
9697c478bd9Sstevel@tonic-gate 			return (NULL);
9707c478bd9Sstevel@tonic-gate 
9717c478bd9Sstevel@tonic-gate 		mutex_exit(&tq->tq_lock);
9727c478bd9Sstevel@tonic-gate 		if (tq->tq_nalloc >= tq->tq_maxalloc) {
9737c478bd9Sstevel@tonic-gate 			if (kmflags & KM_NOSLEEP) {
9747c478bd9Sstevel@tonic-gate 				mutex_enter(&tq->tq_lock);
9757c478bd9Sstevel@tonic-gate 				return (NULL);
9767c478bd9Sstevel@tonic-gate 			}
9777c478bd9Sstevel@tonic-gate 			/*
9787c478bd9Sstevel@tonic-gate 			 * We don't want to exceed tq_maxalloc, but we can't
9797c478bd9Sstevel@tonic-gate 			 * wait for other tasks to complete (and thus free up
9807c478bd9Sstevel@tonic-gate 			 * task structures) without risking deadlock with
9817c478bd9Sstevel@tonic-gate 			 * the caller.  So, we just delay for one second
9827c478bd9Sstevel@tonic-gate 			 * to throttle the allocation rate.
9837c478bd9Sstevel@tonic-gate 			 */
9847c478bd9Sstevel@tonic-gate 			delay(hz);
9857c478bd9Sstevel@tonic-gate 		}
9867c478bd9Sstevel@tonic-gate 		tqe = kmem_cache_alloc(taskq_ent_cache, kmflags);
9877c478bd9Sstevel@tonic-gate 		mutex_enter(&tq->tq_lock);
9887c478bd9Sstevel@tonic-gate 		if (tqe != NULL)
9897c478bd9Sstevel@tonic-gate 			tq->tq_nalloc++;
9907c478bd9Sstevel@tonic-gate 	}
9917c478bd9Sstevel@tonic-gate 	return (tqe);
9927c478bd9Sstevel@tonic-gate }
9937c478bd9Sstevel@tonic-gate 
9947c478bd9Sstevel@tonic-gate /*
9957c478bd9Sstevel@tonic-gate  * taskq_ent_free()
9967c478bd9Sstevel@tonic-gate  *
9977c478bd9Sstevel@tonic-gate  * Free taskq_ent_t structure by either putting it on the free list or freeing
9987c478bd9Sstevel@tonic-gate  * it to the cache.
9997c478bd9Sstevel@tonic-gate  *
10007c478bd9Sstevel@tonic-gate  * Assumes: tq->tq_lock is held.
10017c478bd9Sstevel@tonic-gate  */
10027c478bd9Sstevel@tonic-gate static void
10037c478bd9Sstevel@tonic-gate taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe)
10047c478bd9Sstevel@tonic-gate {
10057c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tq->tq_lock));
10067c478bd9Sstevel@tonic-gate 
10077c478bd9Sstevel@tonic-gate 	if (tq->tq_nalloc <= tq->tq_minalloc) {
10087c478bd9Sstevel@tonic-gate 		tqe->tqent_next = tq->tq_freelist;
10097c478bd9Sstevel@tonic-gate 		tq->tq_freelist = tqe;
10107c478bd9Sstevel@tonic-gate 	} else {
10117c478bd9Sstevel@tonic-gate 		tq->tq_nalloc--;
10127c478bd9Sstevel@tonic-gate 		mutex_exit(&tq->tq_lock);
10137c478bd9Sstevel@tonic-gate 		kmem_cache_free(taskq_ent_cache, tqe);
10147c478bd9Sstevel@tonic-gate 		mutex_enter(&tq->tq_lock);
10157c478bd9Sstevel@tonic-gate 	}
10167c478bd9Sstevel@tonic-gate }
10177c478bd9Sstevel@tonic-gate 
10187c478bd9Sstevel@tonic-gate /*
10197c478bd9Sstevel@tonic-gate  * Dispatch a task "func(arg)" to a free entry of bucket b.
10207c478bd9Sstevel@tonic-gate  *
10217c478bd9Sstevel@tonic-gate  * Assumes: no bucket locks is held.
10227c478bd9Sstevel@tonic-gate  *
10237c478bd9Sstevel@tonic-gate  * Returns: a pointer to an entry if dispatch was successful.
10247c478bd9Sstevel@tonic-gate  *	    NULL if there are no free entries or if the bucket is suspended.
10257c478bd9Sstevel@tonic-gate  */
10267c478bd9Sstevel@tonic-gate static taskq_ent_t *
10277c478bd9Sstevel@tonic-gate taskq_bucket_dispatch(taskq_bucket_t *b, task_func_t func, void *arg)
10287c478bd9Sstevel@tonic-gate {
10297c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe;
10307c478bd9Sstevel@tonic-gate 
10317c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&b->tqbucket_lock));
10327c478bd9Sstevel@tonic-gate 	ASSERT(func != NULL);
10337c478bd9Sstevel@tonic-gate 
10347c478bd9Sstevel@tonic-gate 	mutex_enter(&b->tqbucket_lock);
10357c478bd9Sstevel@tonic-gate 
10367c478bd9Sstevel@tonic-gate 	ASSERT(b->tqbucket_nfree != 0 || IS_EMPTY(b->tqbucket_freelist));
10377c478bd9Sstevel@tonic-gate 	ASSERT(b->tqbucket_nfree == 0 || !IS_EMPTY(b->tqbucket_freelist));
10387c478bd9Sstevel@tonic-gate 
10397c478bd9Sstevel@tonic-gate 	/*
10407c478bd9Sstevel@tonic-gate 	 * Get en entry from the freelist if there is one.
10417c478bd9Sstevel@tonic-gate 	 * Schedule task into the entry.
10427c478bd9Sstevel@tonic-gate 	 */
10437c478bd9Sstevel@tonic-gate 	if ((b->tqbucket_nfree != 0) &&
10447c478bd9Sstevel@tonic-gate 	    !(b->tqbucket_flags & TQBUCKET_SUSPEND)) {
10457c478bd9Sstevel@tonic-gate 		tqe = b->tqbucket_freelist.tqent_prev;
10467c478bd9Sstevel@tonic-gate 
10477c478bd9Sstevel@tonic-gate 		ASSERT(tqe != &b->tqbucket_freelist);
10487c478bd9Sstevel@tonic-gate 		ASSERT(tqe->tqent_thread != NULL);
10497c478bd9Sstevel@tonic-gate 
10507c478bd9Sstevel@tonic-gate 		tqe->tqent_prev->tqent_next = tqe->tqent_next;
10517c478bd9Sstevel@tonic-gate 		tqe->tqent_next->tqent_prev = tqe->tqent_prev;
10527c478bd9Sstevel@tonic-gate 		b->tqbucket_nalloc++;
10537c478bd9Sstevel@tonic-gate 		b->tqbucket_nfree--;
10547c478bd9Sstevel@tonic-gate 		tqe->tqent_func = func;
10557c478bd9Sstevel@tonic-gate 		tqe->tqent_arg = arg;
10567c478bd9Sstevel@tonic-gate 		TQ_STAT(b, tqs_hits);
10577c478bd9Sstevel@tonic-gate 		cv_signal(&tqe->tqent_cv);
10587c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(taskq__d__enqueue, taskq_bucket_t *, b,
10597c478bd9Sstevel@tonic-gate 		    taskq_ent_t *, tqe);
10607c478bd9Sstevel@tonic-gate 	} else {
10617c478bd9Sstevel@tonic-gate 		tqe = NULL;
10627c478bd9Sstevel@tonic-gate 		TQ_STAT(b, tqs_misses);
10637c478bd9Sstevel@tonic-gate 	}
10647c478bd9Sstevel@tonic-gate 	mutex_exit(&b->tqbucket_lock);
10657c478bd9Sstevel@tonic-gate 	return (tqe);
10667c478bd9Sstevel@tonic-gate }
10677c478bd9Sstevel@tonic-gate 
10687c478bd9Sstevel@tonic-gate /*
10697c478bd9Sstevel@tonic-gate  * Dispatch a task.
10707c478bd9Sstevel@tonic-gate  *
10717c478bd9Sstevel@tonic-gate  * Assumes: func != NULL
10727c478bd9Sstevel@tonic-gate  *
10737c478bd9Sstevel@tonic-gate  * Returns: NULL if dispatch failed.
10747c478bd9Sstevel@tonic-gate  *	    non-NULL if task dispatched successfully.
10757c478bd9Sstevel@tonic-gate  *	    Actual return value is the pointer to taskq entry that was used to
10767c478bd9Sstevel@tonic-gate  *	    dispatch a task. This is useful for debugging.
10777c478bd9Sstevel@tonic-gate  */
10787c478bd9Sstevel@tonic-gate /* ARGSUSED */
10797c478bd9Sstevel@tonic-gate taskqid_t
10807c478bd9Sstevel@tonic-gate taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
10817c478bd9Sstevel@tonic-gate {
10827c478bd9Sstevel@tonic-gate 	taskq_bucket_t *bucket = NULL;	/* Which bucket needs extension */
10837c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe = NULL;
10847c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe1;
10857c478bd9Sstevel@tonic-gate 	uint_t bsize;
10867c478bd9Sstevel@tonic-gate 
10877c478bd9Sstevel@tonic-gate 	ASSERT(tq != NULL);
10887c478bd9Sstevel@tonic-gate 	ASSERT(func != NULL);
10897c478bd9Sstevel@tonic-gate 
10907c478bd9Sstevel@tonic-gate 	if (!(tq->tq_flags & TASKQ_DYNAMIC)) {
10917c478bd9Sstevel@tonic-gate 		/*
10927c478bd9Sstevel@tonic-gate 		 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
10937c478bd9Sstevel@tonic-gate 		 */
10947c478bd9Sstevel@tonic-gate 		ASSERT(! (flags & TQ_NOQUEUE));
10957c478bd9Sstevel@tonic-gate 		/*
10967c478bd9Sstevel@tonic-gate 		 * Enqueue the task to the underlying queue.
10977c478bd9Sstevel@tonic-gate 		 */
10987c478bd9Sstevel@tonic-gate 		mutex_enter(&tq->tq_lock);
10997c478bd9Sstevel@tonic-gate 
11007c478bd9Sstevel@tonic-gate 		TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags);
11017c478bd9Sstevel@tonic-gate 
11027c478bd9Sstevel@tonic-gate 		if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) {
11037c478bd9Sstevel@tonic-gate 			mutex_exit(&tq->tq_lock);
11047c478bd9Sstevel@tonic-gate 			return (NULL);
11057c478bd9Sstevel@tonic-gate 		}
1106*35a5a358SJonathan Adams 		if (flags & TQ_FRONT) {
1107*35a5a358SJonathan Adams 			TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1108*35a5a358SJonathan Adams 		} else {
11097c478bd9Sstevel@tonic-gate 			TQ_ENQUEUE(tq, tqe, func, arg);
1110*35a5a358SJonathan Adams 		}
11117c478bd9Sstevel@tonic-gate 		mutex_exit(&tq->tq_lock);
11127c478bd9Sstevel@tonic-gate 		return ((taskqid_t)tqe);
11137c478bd9Sstevel@tonic-gate 	}
11147c478bd9Sstevel@tonic-gate 
11157c478bd9Sstevel@tonic-gate 	/*
11167c478bd9Sstevel@tonic-gate 	 * Dynamic taskq dispatching.
11177c478bd9Sstevel@tonic-gate 	 */
1118*35a5a358SJonathan Adams 	ASSERT(!(flags & (TQ_NOALLOC | TQ_FRONT)));
11197c478bd9Sstevel@tonic-gate 	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flags);
11207c478bd9Sstevel@tonic-gate 
11217c478bd9Sstevel@tonic-gate 	bsize = tq->tq_nbuckets;
11227c478bd9Sstevel@tonic-gate 
11237c478bd9Sstevel@tonic-gate 	if (bsize == 1) {
11247c478bd9Sstevel@tonic-gate 		/*
11257c478bd9Sstevel@tonic-gate 		 * In a single-CPU case there is only one bucket, so get
11267c478bd9Sstevel@tonic-gate 		 * entry directly from there.
11277c478bd9Sstevel@tonic-gate 		 */
11287c478bd9Sstevel@tonic-gate 		if ((tqe = taskq_bucket_dispatch(tq->tq_buckets, func, arg))
11297c478bd9Sstevel@tonic-gate 		    != NULL)
11307c478bd9Sstevel@tonic-gate 			return ((taskqid_t)tqe);	/* Fastpath */
11317c478bd9Sstevel@tonic-gate 		bucket = tq->tq_buckets;
11327c478bd9Sstevel@tonic-gate 	} else {
11337c478bd9Sstevel@tonic-gate 		int loopcount;
11347c478bd9Sstevel@tonic-gate 		taskq_bucket_t *b;
11357c478bd9Sstevel@tonic-gate 		uintptr_t h = ((uintptr_t)CPU + (uintptr_t)arg) >> 3;
11367c478bd9Sstevel@tonic-gate 
11377c478bd9Sstevel@tonic-gate 		h = TQ_HASH(h);
11387c478bd9Sstevel@tonic-gate 
11397c478bd9Sstevel@tonic-gate 		/*
11407c478bd9Sstevel@tonic-gate 		 * The 'bucket' points to the original bucket that we hit. If we
11417c478bd9Sstevel@tonic-gate 		 * can't allocate from it, we search other buckets, but only
11427c478bd9Sstevel@tonic-gate 		 * extend this one.
11437c478bd9Sstevel@tonic-gate 		 */
11447c478bd9Sstevel@tonic-gate 		b = &tq->tq_buckets[h & (bsize - 1)];
11457c478bd9Sstevel@tonic-gate 		ASSERT(b->tqbucket_taskq == tq);	/* Sanity check */
11467c478bd9Sstevel@tonic-gate 
11477c478bd9Sstevel@tonic-gate 		/*
11487c478bd9Sstevel@tonic-gate 		 * Do a quick check before grabbing the lock. If the bucket does
11497c478bd9Sstevel@tonic-gate 		 * not have free entries now, chances are very small that it
11507c478bd9Sstevel@tonic-gate 		 * will after we take the lock, so we just skip it.
11517c478bd9Sstevel@tonic-gate 		 */
11527c478bd9Sstevel@tonic-gate 		if (b->tqbucket_nfree != 0) {
11537c478bd9Sstevel@tonic-gate 			if ((tqe = taskq_bucket_dispatch(b, func, arg)) != NULL)
11547c478bd9Sstevel@tonic-gate 				return ((taskqid_t)tqe);	/* Fastpath */
11557c478bd9Sstevel@tonic-gate 		} else {
11567c478bd9Sstevel@tonic-gate 			TQ_STAT(b, tqs_misses);
11577c478bd9Sstevel@tonic-gate 		}
11587c478bd9Sstevel@tonic-gate 
11597c478bd9Sstevel@tonic-gate 		bucket = b;
11607c478bd9Sstevel@tonic-gate 		loopcount = MIN(taskq_search_depth, bsize);
11617c478bd9Sstevel@tonic-gate 		/*
11627c478bd9Sstevel@tonic-gate 		 * If bucket dispatch failed, search loopcount number of buckets
11637c478bd9Sstevel@tonic-gate 		 * before we give up and fail.
11647c478bd9Sstevel@tonic-gate 		 */
11657c478bd9Sstevel@tonic-gate 		do {
11667c478bd9Sstevel@tonic-gate 			b = &tq->tq_buckets[++h & (bsize - 1)];
11677c478bd9Sstevel@tonic-gate 			ASSERT(b->tqbucket_taskq == tq);  /* Sanity check */
11687c478bd9Sstevel@tonic-gate 			loopcount--;
11697c478bd9Sstevel@tonic-gate 
11707c478bd9Sstevel@tonic-gate 			if (b->tqbucket_nfree != 0) {
11717c478bd9Sstevel@tonic-gate 				tqe = taskq_bucket_dispatch(b, func, arg);
11727c478bd9Sstevel@tonic-gate 			} else {
11737c478bd9Sstevel@tonic-gate 				TQ_STAT(b, tqs_misses);
11747c478bd9Sstevel@tonic-gate 			}
11757c478bd9Sstevel@tonic-gate 		} while ((tqe == NULL) && (loopcount > 0));
11767c478bd9Sstevel@tonic-gate 	}
11777c478bd9Sstevel@tonic-gate 
11787c478bd9Sstevel@tonic-gate 	/*
11797c478bd9Sstevel@tonic-gate 	 * At this point we either scheduled a task and (tqe != NULL) or failed
11807c478bd9Sstevel@tonic-gate 	 * (tqe == NULL). Try to recover from fails.
11817c478bd9Sstevel@tonic-gate 	 */
11827c478bd9Sstevel@tonic-gate 
11837c478bd9Sstevel@tonic-gate 	/*
11847c478bd9Sstevel@tonic-gate 	 * For KM_SLEEP dispatches, try to extend the bucket and retry dispatch.
11857c478bd9Sstevel@tonic-gate 	 */
11867c478bd9Sstevel@tonic-gate 	if ((tqe == NULL) && !(flags & TQ_NOSLEEP)) {
11877c478bd9Sstevel@tonic-gate 		/*
11887c478bd9Sstevel@tonic-gate 		 * taskq_bucket_extend() may fail to do anything, but this is
11897c478bd9Sstevel@tonic-gate 		 * fine - we deal with it later. If the bucket was successfully
11907c478bd9Sstevel@tonic-gate 		 * extended, there is a good chance that taskq_bucket_dispatch()
11917c478bd9Sstevel@tonic-gate 		 * will get this new entry, unless someone is racing with us and
11927c478bd9Sstevel@tonic-gate 		 * stealing the new entry from under our nose.
11937c478bd9Sstevel@tonic-gate 		 * taskq_bucket_extend() may sleep.
11947c478bd9Sstevel@tonic-gate 		 */
11957c478bd9Sstevel@tonic-gate 		taskq_bucket_extend(bucket);
11967c478bd9Sstevel@tonic-gate 		TQ_STAT(bucket, tqs_disptcreates);
11977c478bd9Sstevel@tonic-gate 		if ((tqe = taskq_bucket_dispatch(bucket, func, arg)) != NULL)
11987c478bd9Sstevel@tonic-gate 			return ((taskqid_t)tqe);
11997c478bd9Sstevel@tonic-gate 	}
12007c478bd9Sstevel@tonic-gate 
12017c478bd9Sstevel@tonic-gate 	ASSERT(bucket != NULL);
12027c478bd9Sstevel@tonic-gate 	/*
12037c478bd9Sstevel@tonic-gate 	 * Since there are not enough free entries in the bucket, extend it
12047c478bd9Sstevel@tonic-gate 	 * in the background using backing queue.
12057c478bd9Sstevel@tonic-gate 	 */
12067c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
12077c478bd9Sstevel@tonic-gate 	if ((tqe1 = taskq_ent_alloc(tq, TQ_NOSLEEP)) != NULL) {
1208*35a5a358SJonathan Adams 		TQ_ENQUEUE(tq, tqe1, taskq_bucket_extend, bucket);
12097c478bd9Sstevel@tonic-gate 	} else {
12107c478bd9Sstevel@tonic-gate 		TQ_STAT(bucket, tqs_nomem);
12117c478bd9Sstevel@tonic-gate 	}
12127c478bd9Sstevel@tonic-gate 
12137c478bd9Sstevel@tonic-gate 	/*
12147c478bd9Sstevel@tonic-gate 	 * Dispatch failed and we can't find an entry to schedule a task.
12157c478bd9Sstevel@tonic-gate 	 * Revert to the backing queue unless TQ_NOQUEUE was asked.
12167c478bd9Sstevel@tonic-gate 	 */
12177c478bd9Sstevel@tonic-gate 	if ((tqe == NULL) && !(flags & TQ_NOQUEUE)) {
12187c478bd9Sstevel@tonic-gate 		if ((tqe = taskq_ent_alloc(tq, flags)) != NULL) {
12197c478bd9Sstevel@tonic-gate 			TQ_ENQUEUE(tq, tqe, func, arg);
12207c478bd9Sstevel@tonic-gate 		} else {
12217c478bd9Sstevel@tonic-gate 			TQ_STAT(bucket, tqs_nomem);
12227c478bd9Sstevel@tonic-gate 		}
12237c478bd9Sstevel@tonic-gate 	}
12247c478bd9Sstevel@tonic-gate 	mutex_exit(&tq->tq_lock);
12257c478bd9Sstevel@tonic-gate 
12267c478bd9Sstevel@tonic-gate 	return ((taskqid_t)tqe);
12277c478bd9Sstevel@tonic-gate }
12287c478bd9Sstevel@tonic-gate 
12297c478bd9Sstevel@tonic-gate /*
12307c478bd9Sstevel@tonic-gate  * Wait for all pending tasks to complete.
12317c478bd9Sstevel@tonic-gate  * Calling taskq_wait from a task will cause deadlock.
12327c478bd9Sstevel@tonic-gate  */
12337c478bd9Sstevel@tonic-gate void
12347c478bd9Sstevel@tonic-gate taskq_wait(taskq_t *tq)
12357c478bd9Sstevel@tonic-gate {
12367c478bd9Sstevel@tonic-gate 	ASSERT(tq != curthread->t_taskq);
12377c478bd9Sstevel@tonic-gate 
12387c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
12397c478bd9Sstevel@tonic-gate 	while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
12407c478bd9Sstevel@tonic-gate 		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
12417c478bd9Sstevel@tonic-gate 	mutex_exit(&tq->tq_lock);
12427c478bd9Sstevel@tonic-gate 
12437c478bd9Sstevel@tonic-gate 	if (tq->tq_flags & TASKQ_DYNAMIC) {
12447c478bd9Sstevel@tonic-gate 		taskq_bucket_t *b = tq->tq_buckets;
12457c478bd9Sstevel@tonic-gate 		int bid = 0;
12467c478bd9Sstevel@tonic-gate 		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
12477c478bd9Sstevel@tonic-gate 			mutex_enter(&b->tqbucket_lock);
12487c478bd9Sstevel@tonic-gate 			while (b->tqbucket_nalloc > 0)
12497c478bd9Sstevel@tonic-gate 				cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
12507c478bd9Sstevel@tonic-gate 			mutex_exit(&b->tqbucket_lock);
12517c478bd9Sstevel@tonic-gate 		}
12527c478bd9Sstevel@tonic-gate 	}
12537c478bd9Sstevel@tonic-gate }
12547c478bd9Sstevel@tonic-gate 
12557c478bd9Sstevel@tonic-gate /*
12567c478bd9Sstevel@tonic-gate  * Suspend execution of tasks.
12577c478bd9Sstevel@tonic-gate  *
12587c478bd9Sstevel@tonic-gate  * Tasks in the queue part will be suspended immediately upon return from this
12597c478bd9Sstevel@tonic-gate  * function. Pending tasks in the dynamic part will continue to execute, but all
12607c478bd9Sstevel@tonic-gate  * new tasks will  be suspended.
12617c478bd9Sstevel@tonic-gate  */
12627c478bd9Sstevel@tonic-gate void
12637c478bd9Sstevel@tonic-gate taskq_suspend(taskq_t *tq)
12647c478bd9Sstevel@tonic-gate {
12657c478bd9Sstevel@tonic-gate 	rw_enter(&tq->tq_threadlock, RW_WRITER);
12667c478bd9Sstevel@tonic-gate 
12677c478bd9Sstevel@tonic-gate 	if (tq->tq_flags & TASKQ_DYNAMIC) {
12687c478bd9Sstevel@tonic-gate 		taskq_bucket_t *b = tq->tq_buckets;
12697c478bd9Sstevel@tonic-gate 		int bid = 0;
12707c478bd9Sstevel@tonic-gate 		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
12717c478bd9Sstevel@tonic-gate 			mutex_enter(&b->tqbucket_lock);
12727c478bd9Sstevel@tonic-gate 			b->tqbucket_flags |= TQBUCKET_SUSPEND;
12737c478bd9Sstevel@tonic-gate 			mutex_exit(&b->tqbucket_lock);
12747c478bd9Sstevel@tonic-gate 		}
12757c478bd9Sstevel@tonic-gate 	}
12767c478bd9Sstevel@tonic-gate 	/*
12777c478bd9Sstevel@tonic-gate 	 * Mark task queue as being suspended. Needed for taskq_suspended().
12787c478bd9Sstevel@tonic-gate 	 */
12797c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
12807c478bd9Sstevel@tonic-gate 	ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED));
12817c478bd9Sstevel@tonic-gate 	tq->tq_flags |= TASKQ_SUSPENDED;
12827c478bd9Sstevel@tonic-gate 	mutex_exit(&tq->tq_lock);
12837c478bd9Sstevel@tonic-gate }
12847c478bd9Sstevel@tonic-gate 
12857c478bd9Sstevel@tonic-gate /*
12867c478bd9Sstevel@tonic-gate  * returns: 1 if tq is suspended, 0 otherwise.
12877c478bd9Sstevel@tonic-gate  */
12887c478bd9Sstevel@tonic-gate int
12897c478bd9Sstevel@tonic-gate taskq_suspended(taskq_t *tq)
12907c478bd9Sstevel@tonic-gate {
12917c478bd9Sstevel@tonic-gate 	return ((tq->tq_flags & TASKQ_SUSPENDED) != 0);
12927c478bd9Sstevel@tonic-gate }
12937c478bd9Sstevel@tonic-gate 
12947c478bd9Sstevel@tonic-gate /*
12957c478bd9Sstevel@tonic-gate  * Resume taskq execution.
12967c478bd9Sstevel@tonic-gate  */
12977c478bd9Sstevel@tonic-gate void
12987c478bd9Sstevel@tonic-gate taskq_resume(taskq_t *tq)
12997c478bd9Sstevel@tonic-gate {
13007c478bd9Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&tq->tq_threadlock));
13017c478bd9Sstevel@tonic-gate 
13027c478bd9Sstevel@tonic-gate 	if (tq->tq_flags & TASKQ_DYNAMIC) {
13037c478bd9Sstevel@tonic-gate 		taskq_bucket_t *b = tq->tq_buckets;
13047c478bd9Sstevel@tonic-gate 		int bid = 0;
13057c478bd9Sstevel@tonic-gate 		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
13067c478bd9Sstevel@tonic-gate 			mutex_enter(&b->tqbucket_lock);
13077c478bd9Sstevel@tonic-gate 			b->tqbucket_flags &= ~TQBUCKET_SUSPEND;
13087c478bd9Sstevel@tonic-gate 			mutex_exit(&b->tqbucket_lock);
13097c478bd9Sstevel@tonic-gate 		}
13107c478bd9Sstevel@tonic-gate 	}
13117c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
13127c478bd9Sstevel@tonic-gate 	ASSERT(tq->tq_flags & TASKQ_SUSPENDED);
13137c478bd9Sstevel@tonic-gate 	tq->tq_flags &= ~TASKQ_SUSPENDED;
13147c478bd9Sstevel@tonic-gate 	mutex_exit(&tq->tq_lock);
13157c478bd9Sstevel@tonic-gate 
13167c478bd9Sstevel@tonic-gate 	rw_exit(&tq->tq_threadlock);
13177c478bd9Sstevel@tonic-gate }
13187c478bd9Sstevel@tonic-gate 
13197c478bd9Sstevel@tonic-gate int
13207c478bd9Sstevel@tonic-gate taskq_member(taskq_t *tq, kthread_t *thread)
13217c478bd9Sstevel@tonic-gate {
13227c478bd9Sstevel@tonic-gate 	return (thread->t_taskq == tq);
13237c478bd9Sstevel@tonic-gate }
13247c478bd9Sstevel@tonic-gate 
1325*35a5a358SJonathan Adams /*
1326*35a5a358SJonathan Adams  * Creates a thread in the taskq.  We only allow one outstanding create at
1327*35a5a358SJonathan Adams  * a time.  We drop and reacquire the tq_lock in order to avoid blocking other
1328*35a5a358SJonathan Adams  * taskq activity while thread_create() or lwp_kernel_create() run.
1329*35a5a358SJonathan Adams  *
1330*35a5a358SJonathan Adams  * The first time we're called, we do some additional setup, and do not
1331*35a5a358SJonathan Adams  * return until there are enough threads to start servicing requests.
1332*35a5a358SJonathan Adams  */
13332e0c549eSJonathan Adams static void
13342e0c549eSJonathan Adams taskq_thread_create(taskq_t *tq)
13352e0c549eSJonathan Adams {
13362e0c549eSJonathan Adams 	kthread_t	*t;
1337*35a5a358SJonathan Adams 	const boolean_t	first = (tq->tq_nthreads == 0);
13382e0c549eSJonathan Adams 
13392e0c549eSJonathan Adams 	ASSERT(MUTEX_HELD(&tq->tq_lock));
1340*35a5a358SJonathan Adams 	ASSERT(tq->tq_flags & TASKQ_CHANGING);
1341*35a5a358SJonathan Adams 	ASSERT(tq->tq_nthreads < tq->tq_nthreads_target);
13422e0c549eSJonathan Adams 	ASSERT(!(tq->tq_flags & TASKQ_THREAD_CREATED));
13432e0c549eSJonathan Adams 
1344*35a5a358SJonathan Adams 
13452e0c549eSJonathan Adams 	tq->tq_flags |= TASKQ_THREAD_CREATED;
13462e0c549eSJonathan Adams 	tq->tq_active++;
1347*35a5a358SJonathan Adams 	mutex_exit(&tq->tq_lock);
1348*35a5a358SJonathan Adams 
1349*35a5a358SJonathan Adams 	if (tq->tq_proc != &p0) {
1350*35a5a358SJonathan Adams 		t = lwp_kernel_create(tq->tq_proc, taskq_thread, tq, TS_RUN,
1351*35a5a358SJonathan Adams 		    tq->tq_pri);
1352*35a5a358SJonathan Adams 	} else {
13532e0c549eSJonathan Adams 		t = thread_create(NULL, 0, taskq_thread, tq, 0, &p0, TS_RUN,
13542e0c549eSJonathan Adams 		    tq->tq_pri);
1355*35a5a358SJonathan Adams 	}
1356*35a5a358SJonathan Adams 
1357*35a5a358SJonathan Adams 	if (!first) {
1358*35a5a358SJonathan Adams 		mutex_enter(&tq->tq_lock);
1359*35a5a358SJonathan Adams 		return;
1360*35a5a358SJonathan Adams 	}
1361*35a5a358SJonathan Adams 
1362*35a5a358SJonathan Adams 	/*
1363*35a5a358SJonathan Adams 	 * We know the thread cannot go away, since tq cannot be
1364*35a5a358SJonathan Adams 	 * destroyed until creation has completed.  We can therefore
1365*35a5a358SJonathan Adams 	 * safely dereference t.
1366*35a5a358SJonathan Adams 	 */
1367*35a5a358SJonathan Adams 	if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1368*35a5a358SJonathan Adams 		taskq_cpupct_install(tq, t->t_cpupart);
1369*35a5a358SJonathan Adams 	}
1370*35a5a358SJonathan Adams 	mutex_enter(&tq->tq_lock);
1371*35a5a358SJonathan Adams 
1372*35a5a358SJonathan Adams 	/* Wait until we can service requests. */
1373*35a5a358SJonathan Adams 	while (tq->tq_nthreads != tq->tq_nthreads_target &&
1374*35a5a358SJonathan Adams 	    tq->tq_nthreads < TASKQ_CREATE_ACTIVE_THREADS) {
1375*35a5a358SJonathan Adams 		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1376*35a5a358SJonathan Adams 	}
13772e0c549eSJonathan Adams }
13782e0c549eSJonathan Adams 
1379e0ad97e3SJonathan Adams /*
1380e0ad97e3SJonathan Adams  * Common "sleep taskq thread" function, which handles CPR stuff, as well
1381e0ad97e3SJonathan Adams  * as giving a nice common point for debuggers to find inactive threads.
1382e0ad97e3SJonathan Adams  */
1383e0ad97e3SJonathan Adams static clock_t
1384e0ad97e3SJonathan Adams taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv,
1385e0ad97e3SJonathan Adams     callb_cpr_t *cprinfo, clock_t timeout)
13862e0c549eSJonathan Adams {
1387e0ad97e3SJonathan Adams 	clock_t ret = 0;
1388e0ad97e3SJonathan Adams 
1389e0ad97e3SJonathan Adams 	if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
13902e0c549eSJonathan Adams 		CALLB_CPR_SAFE_BEGIN(cprinfo);
13912e0c549eSJonathan Adams 	}
1392e0ad97e3SJonathan Adams 	if (timeout < 0)
1393e0ad97e3SJonathan Adams 		cv_wait(cv, mx);
1394e0ad97e3SJonathan Adams 	else
1395d3d50737SRafael Vanoni 		ret = cv_reltimedwait(cv, mx, timeout, TR_CLOCK_TICK);
1396e0ad97e3SJonathan Adams 
1397e0ad97e3SJonathan Adams 	if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1398e0ad97e3SJonathan Adams 		CALLB_CPR_SAFE_END(cprinfo, mx);
1399e0ad97e3SJonathan Adams 	}
1400e0ad97e3SJonathan Adams 
1401e0ad97e3SJonathan Adams 	return (ret);
14022e0c549eSJonathan Adams }
14032e0c549eSJonathan Adams 
14047c478bd9Sstevel@tonic-gate /*
14057c478bd9Sstevel@tonic-gate  * Worker thread for processing task queue.
14067c478bd9Sstevel@tonic-gate  */
14077c478bd9Sstevel@tonic-gate static void
14087c478bd9Sstevel@tonic-gate taskq_thread(void *arg)
14097c478bd9Sstevel@tonic-gate {
14102e0c549eSJonathan Adams 	int thread_id;
14112e0c549eSJonathan Adams 
14127c478bd9Sstevel@tonic-gate 	taskq_t *tq = arg;
14137c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe;
14147c478bd9Sstevel@tonic-gate 	callb_cpr_t cprinfo;
14157c478bd9Sstevel@tonic-gate 	hrtime_t start, end;
14167c478bd9Sstevel@tonic-gate 
1417*35a5a358SJonathan Adams 	curthread->t_taskq = tq;	/* mark ourselves for taskq_member() */
1418*35a5a358SJonathan Adams 
1419*35a5a358SJonathan Adams 	if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) {
1420*35a5a358SJonathan Adams 		sysdc_thread_enter(curthread, tq->tq_DC,
1421*35a5a358SJonathan Adams 		    (tq->tq_flags & TASKQ_DC_BATCH) ? SYSDC_THREAD_BATCH : 0);
1422*35a5a358SJonathan Adams 	}
1423*35a5a358SJonathan Adams 
14247c478bd9Sstevel@tonic-gate 	if (tq->tq_flags & TASKQ_CPR_SAFE) {
14257c478bd9Sstevel@tonic-gate 		CALLB_CPR_INIT_SAFE(curthread, tq->tq_name);
14267c478bd9Sstevel@tonic-gate 	} else {
14277c478bd9Sstevel@tonic-gate 		CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr,
14287c478bd9Sstevel@tonic-gate 		    tq->tq_name);
14297c478bd9Sstevel@tonic-gate 	}
14307c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
14312e0c549eSJonathan Adams 	thread_id = ++tq->tq_nthreads;
14322e0c549eSJonathan Adams 	ASSERT(tq->tq_flags & TASKQ_THREAD_CREATED);
1433*35a5a358SJonathan Adams 	ASSERT(tq->tq_flags & TASKQ_CHANGING);
14342e0c549eSJonathan Adams 	tq->tq_flags &= ~TASKQ_THREAD_CREATED;
14352e0c549eSJonathan Adams 
14362e0c549eSJonathan Adams 	VERIFY3S(thread_id, <=, tq->tq_nthreads_max);
14372e0c549eSJonathan Adams 
14382e0c549eSJonathan Adams 	if (tq->tq_nthreads_max == 1)
14392e0c549eSJonathan Adams 		tq->tq_thread = curthread;
14402e0c549eSJonathan Adams 	else
14412e0c549eSJonathan Adams 		tq->tq_threadlist[thread_id - 1] = curthread;
14422e0c549eSJonathan Adams 
1443*35a5a358SJonathan Adams 	/* Allow taskq_create_common()'s taskq_thread_create() to return. */
1444*35a5a358SJonathan Adams 	if (tq->tq_nthreads == TASKQ_CREATE_ACTIVE_THREADS)
1445*35a5a358SJonathan Adams 		cv_broadcast(&tq->tq_wait_cv);
1446*35a5a358SJonathan Adams 
14472e0c549eSJonathan Adams 	for (;;) {
14482e0c549eSJonathan Adams 		if (tq->tq_flags & TASKQ_CHANGING) {
1449*35a5a358SJonathan Adams 			/* See if we're no longer needed */
14502e0c549eSJonathan Adams 			if (thread_id > tq->tq_nthreads_target) {
14512e0c549eSJonathan Adams 				/*
14522e0c549eSJonathan Adams 				 * To preserve the one-to-one mapping between
14532e0c549eSJonathan Adams 				 * thread_id and thread, we must exit from
14542e0c549eSJonathan Adams 				 * highest thread ID to least.
14552e0c549eSJonathan Adams 				 *
14562e0c549eSJonathan Adams 				 * However, if everyone is exiting, the order
14572e0c549eSJonathan Adams 				 * doesn't matter, so just exit immediately.
14582e0c549eSJonathan Adams 				 * (this is safe, since you must wait for
14592e0c549eSJonathan Adams 				 * nthreads to reach 0 after setting
14602e0c549eSJonathan Adams 				 * tq_nthreads_target to 0)
14612e0c549eSJonathan Adams 				 */
14622e0c549eSJonathan Adams 				if (thread_id == tq->tq_nthreads ||
14632e0c549eSJonathan Adams 				    tq->tq_nthreads_target == 0)
14642e0c549eSJonathan Adams 					break;
14652e0c549eSJonathan Adams 
14662e0c549eSJonathan Adams 				/* Wait for higher thread_ids to exit */
1467e0ad97e3SJonathan Adams 				(void) taskq_thread_wait(tq, &tq->tq_lock,
1468e0ad97e3SJonathan Adams 				    &tq->tq_exit_cv, &cprinfo, -1);
14692e0c549eSJonathan Adams 				continue;
14702e0c549eSJonathan Adams 			}
1471*35a5a358SJonathan Adams 
1472*35a5a358SJonathan Adams 			/*
1473*35a5a358SJonathan Adams 			 * If no thread is starting taskq_thread(), we can
1474*35a5a358SJonathan Adams 			 * do some bookkeeping.
1475*35a5a358SJonathan Adams 			 */
1476*35a5a358SJonathan Adams 			if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) {
1477*35a5a358SJonathan Adams 				/* Check if we've reached our target */
1478*35a5a358SJonathan Adams 				if (tq->tq_nthreads == tq->tq_nthreads_target) {
1479*35a5a358SJonathan Adams 					tq->tq_flags &= ~TASKQ_CHANGING;
1480*35a5a358SJonathan Adams 					cv_broadcast(&tq->tq_wait_cv);
1481*35a5a358SJonathan Adams 				}
1482*35a5a358SJonathan Adams 				/* Check if we need to create a thread */
1483*35a5a358SJonathan Adams 				if (tq->tq_nthreads < tq->tq_nthreads_target) {
1484*35a5a358SJonathan Adams 					taskq_thread_create(tq);
1485*35a5a358SJonathan Adams 					continue; /* tq_lock was dropped */
1486*35a5a358SJonathan Adams 				}
1487*35a5a358SJonathan Adams 			}
14882e0c549eSJonathan Adams 		}
14897c478bd9Sstevel@tonic-gate 		if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
14907c478bd9Sstevel@tonic-gate 			if (--tq->tq_active == 0)
14917c478bd9Sstevel@tonic-gate 				cv_broadcast(&tq->tq_wait_cv);
1492e0ad97e3SJonathan Adams 			(void) taskq_thread_wait(tq, &tq->tq_lock,
1493e0ad97e3SJonathan Adams 			    &tq->tq_dispatch_cv, &cprinfo, -1);
14947c478bd9Sstevel@tonic-gate 			tq->tq_active++;
14957c478bd9Sstevel@tonic-gate 			continue;
14967c478bd9Sstevel@tonic-gate 		}
1497*35a5a358SJonathan Adams 
14987c478bd9Sstevel@tonic-gate 		tqe->tqent_prev->tqent_next = tqe->tqent_next;
14997c478bd9Sstevel@tonic-gate 		tqe->tqent_next->tqent_prev = tqe->tqent_prev;
15007c478bd9Sstevel@tonic-gate 		mutex_exit(&tq->tq_lock);
15017c478bd9Sstevel@tonic-gate 
15027c478bd9Sstevel@tonic-gate 		rw_enter(&tq->tq_threadlock, RW_READER);
15037c478bd9Sstevel@tonic-gate 		start = gethrtime();
15047c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
15057c478bd9Sstevel@tonic-gate 		    taskq_ent_t *, tqe);
15067c478bd9Sstevel@tonic-gate 		tqe->tqent_func(tqe->tqent_arg);
15077c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
15087c478bd9Sstevel@tonic-gate 		    taskq_ent_t *, tqe);
15097c478bd9Sstevel@tonic-gate 		end = gethrtime();
15107c478bd9Sstevel@tonic-gate 		rw_exit(&tq->tq_threadlock);
15117c478bd9Sstevel@tonic-gate 
15127c478bd9Sstevel@tonic-gate 		mutex_enter(&tq->tq_lock);
15137c478bd9Sstevel@tonic-gate 		tq->tq_totaltime += end - start;
15147c478bd9Sstevel@tonic-gate 		tq->tq_executed++;
15157c478bd9Sstevel@tonic-gate 
15167c478bd9Sstevel@tonic-gate 		taskq_ent_free(tq, tqe);
15177c478bd9Sstevel@tonic-gate 	}
15182e0c549eSJonathan Adams 
15192e0c549eSJonathan Adams 	if (tq->tq_nthreads_max == 1)
15202e0c549eSJonathan Adams 		tq->tq_thread = NULL;
15212e0c549eSJonathan Adams 	else
15222e0c549eSJonathan Adams 		tq->tq_threadlist[thread_id - 1] = NULL;
15232e0c549eSJonathan Adams 
15242e0c549eSJonathan Adams 	/* We're exiting, and therefore no longer active */
1525*35a5a358SJonathan Adams 	ASSERT(tq->tq_active > 0);
15262e0c549eSJonathan Adams 	tq->tq_active--;
15272e0c549eSJonathan Adams 
1528*35a5a358SJonathan Adams 	ASSERT(tq->tq_nthreads > 0);
1529*35a5a358SJonathan Adams 	tq->tq_nthreads--;
1530*35a5a358SJonathan Adams 
1531*35a5a358SJonathan Adams 	/* Wake up anyone waiting for us to exit */
1532*35a5a358SJonathan Adams 	cv_broadcast(&tq->tq_exit_cv);
1533*35a5a358SJonathan Adams 	if (tq->tq_nthreads == tq->tq_nthreads_target) {
1534*35a5a358SJonathan Adams 		if (!(tq->tq_flags & TASKQ_THREAD_CREATED))
1535*35a5a358SJonathan Adams 			tq->tq_flags &= ~TASKQ_CHANGING;
1536*35a5a358SJonathan Adams 
1537*35a5a358SJonathan Adams 		cv_broadcast(&tq->tq_wait_cv);
1538*35a5a358SJonathan Adams 	}
1539*35a5a358SJonathan Adams 
15407c478bd9Sstevel@tonic-gate 	ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
1541*35a5a358SJonathan Adams 	CALLB_CPR_EXIT(&cprinfo);		/* drops tq->tq_lock */
1542*35a5a358SJonathan Adams 	if (curthread->t_lwp != NULL) {
1543*35a5a358SJonathan Adams 		mutex_enter(&curproc->p_lock);
1544*35a5a358SJonathan Adams 		lwp_exit();
1545*35a5a358SJonathan Adams 	} else {
15467c478bd9Sstevel@tonic-gate 		thread_exit();
15477c478bd9Sstevel@tonic-gate 	}
1548*35a5a358SJonathan Adams }
15497c478bd9Sstevel@tonic-gate 
15507c478bd9Sstevel@tonic-gate /*
15517c478bd9Sstevel@tonic-gate  * Worker per-entry thread for dynamic dispatches.
15527c478bd9Sstevel@tonic-gate  */
15537c478bd9Sstevel@tonic-gate static void
15547c478bd9Sstevel@tonic-gate taskq_d_thread(taskq_ent_t *tqe)
15557c478bd9Sstevel@tonic-gate {
15567c478bd9Sstevel@tonic-gate 	taskq_bucket_t	*bucket = tqe->tqent_bucket;
15577c478bd9Sstevel@tonic-gate 	taskq_t		*tq = bucket->tqbucket_taskq;
15587c478bd9Sstevel@tonic-gate 	kmutex_t	*lock = &bucket->tqbucket_lock;
15597c478bd9Sstevel@tonic-gate 	kcondvar_t	*cv = &tqe->tqent_cv;
15607c478bd9Sstevel@tonic-gate 	callb_cpr_t	cprinfo;
15617c478bd9Sstevel@tonic-gate 	clock_t		w;
15627c478bd9Sstevel@tonic-gate 
15637c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, tq->tq_name);
15647c478bd9Sstevel@tonic-gate 
15657c478bd9Sstevel@tonic-gate 	mutex_enter(lock);
15667c478bd9Sstevel@tonic-gate 
15677c478bd9Sstevel@tonic-gate 	for (;;) {
15687c478bd9Sstevel@tonic-gate 		/*
15697c478bd9Sstevel@tonic-gate 		 * If a task is scheduled (func != NULL), execute it, otherwise
15707c478bd9Sstevel@tonic-gate 		 * sleep, waiting for a job.
15717c478bd9Sstevel@tonic-gate 		 */
15727c478bd9Sstevel@tonic-gate 		if (tqe->tqent_func != NULL) {
15737c478bd9Sstevel@tonic-gate 			hrtime_t	start;
15747c478bd9Sstevel@tonic-gate 			hrtime_t	end;
15757c478bd9Sstevel@tonic-gate 
15767c478bd9Sstevel@tonic-gate 			ASSERT(bucket->tqbucket_nalloc > 0);
15777c478bd9Sstevel@tonic-gate 
15787c478bd9Sstevel@tonic-gate 			/*
15797c478bd9Sstevel@tonic-gate 			 * It is possible to free the entry right away before
15807c478bd9Sstevel@tonic-gate 			 * actually executing the task so that subsequent
15817c478bd9Sstevel@tonic-gate 			 * dispatches may immediately reuse it. But this,
15827c478bd9Sstevel@tonic-gate 			 * effectively, creates a two-length queue in the entry
15837c478bd9Sstevel@tonic-gate 			 * and may lead to a deadlock if the execution of the
15847c478bd9Sstevel@tonic-gate 			 * current task depends on the execution of the next
15857c478bd9Sstevel@tonic-gate 			 * scheduled task. So, we keep the entry busy until the
15867c478bd9Sstevel@tonic-gate 			 * task is processed.
15877c478bd9Sstevel@tonic-gate 			 */
15887c478bd9Sstevel@tonic-gate 
15897c478bd9Sstevel@tonic-gate 			mutex_exit(lock);
15907c478bd9Sstevel@tonic-gate 			start = gethrtime();
15917c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(taskq__d__exec__start, taskq_t *, tq,
15927c478bd9Sstevel@tonic-gate 			    taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
15937c478bd9Sstevel@tonic-gate 			tqe->tqent_func(tqe->tqent_arg);
15947c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(taskq__d__exec__end, taskq_t *, tq,
15957c478bd9Sstevel@tonic-gate 			    taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
15967c478bd9Sstevel@tonic-gate 			end = gethrtime();
15977c478bd9Sstevel@tonic-gate 			mutex_enter(lock);
15987c478bd9Sstevel@tonic-gate 			bucket->tqbucket_totaltime += end - start;
15997c478bd9Sstevel@tonic-gate 
16007c478bd9Sstevel@tonic-gate 			/*
16017c478bd9Sstevel@tonic-gate 			 * Return the entry to the bucket free list.
16027c478bd9Sstevel@tonic-gate 			 */
16037c478bd9Sstevel@tonic-gate 			tqe->tqent_func = NULL;
16047c478bd9Sstevel@tonic-gate 			TQ_APPEND(bucket->tqbucket_freelist, tqe);
16057c478bd9Sstevel@tonic-gate 			bucket->tqbucket_nalloc--;
16067c478bd9Sstevel@tonic-gate 			bucket->tqbucket_nfree++;
16077c478bd9Sstevel@tonic-gate 			ASSERT(!IS_EMPTY(bucket->tqbucket_freelist));
16087c478bd9Sstevel@tonic-gate 			/*
16097c478bd9Sstevel@tonic-gate 			 * taskq_wait() waits for nalloc to drop to zero on
16107c478bd9Sstevel@tonic-gate 			 * tqbucket_cv.
16117c478bd9Sstevel@tonic-gate 			 */
16127c478bd9Sstevel@tonic-gate 			cv_signal(&bucket->tqbucket_cv);
16137c478bd9Sstevel@tonic-gate 		}
16147c478bd9Sstevel@tonic-gate 
16157c478bd9Sstevel@tonic-gate 		/*
16167c478bd9Sstevel@tonic-gate 		 * At this point the entry must be in the bucket free list -
16177c478bd9Sstevel@tonic-gate 		 * either because it was there initially or because it just
16187c478bd9Sstevel@tonic-gate 		 * finished executing a task and put itself on the free list.
16197c478bd9Sstevel@tonic-gate 		 */
16207c478bd9Sstevel@tonic-gate 		ASSERT(bucket->tqbucket_nfree > 0);
16217c478bd9Sstevel@tonic-gate 		/*
16227c478bd9Sstevel@tonic-gate 		 * Go to sleep unless we are closing.
16237c478bd9Sstevel@tonic-gate 		 * If a thread is sleeping too long, it dies.
16247c478bd9Sstevel@tonic-gate 		 */
16257c478bd9Sstevel@tonic-gate 		if (! (bucket->tqbucket_flags & TQBUCKET_CLOSE)) {
1626e0ad97e3SJonathan Adams 			w = taskq_thread_wait(tq, lock, cv,
1627e0ad97e3SJonathan Adams 			    &cprinfo, taskq_thread_timeout * hz);
16287c478bd9Sstevel@tonic-gate 		}
16297c478bd9Sstevel@tonic-gate 
16307c478bd9Sstevel@tonic-gate 		/*
16317c478bd9Sstevel@tonic-gate 		 * At this point we may be in two different states:
16327c478bd9Sstevel@tonic-gate 		 *
16337c478bd9Sstevel@tonic-gate 		 * (1) tqent_func is set which means that a new task is
16347c478bd9Sstevel@tonic-gate 		 *	dispatched and we need to execute it.
16357c478bd9Sstevel@tonic-gate 		 *
16367c478bd9Sstevel@tonic-gate 		 * (2) Thread is sleeping for too long or we are closing. In
16377c478bd9Sstevel@tonic-gate 		 *	both cases destroy the thread and the entry.
16387c478bd9Sstevel@tonic-gate 		 */
16397c478bd9Sstevel@tonic-gate 
16407c478bd9Sstevel@tonic-gate 		/* If func is NULL we should be on the freelist. */
16417c478bd9Sstevel@tonic-gate 		ASSERT((tqe->tqent_func != NULL) ||
16427c478bd9Sstevel@tonic-gate 		    (bucket->tqbucket_nfree > 0));
16437c478bd9Sstevel@tonic-gate 		/* If func is non-NULL we should be allocated */
16447c478bd9Sstevel@tonic-gate 		ASSERT((tqe->tqent_func == NULL) ||
16457c478bd9Sstevel@tonic-gate 		    (bucket->tqbucket_nalloc > 0));
16467c478bd9Sstevel@tonic-gate 
16477c478bd9Sstevel@tonic-gate 		/* Check freelist consistency */
16487c478bd9Sstevel@tonic-gate 		ASSERT((bucket->tqbucket_nfree > 0) ||
16497c478bd9Sstevel@tonic-gate 		    IS_EMPTY(bucket->tqbucket_freelist));
16507c478bd9Sstevel@tonic-gate 		ASSERT((bucket->tqbucket_nfree == 0) ||
16517c478bd9Sstevel@tonic-gate 		    !IS_EMPTY(bucket->tqbucket_freelist));
16527c478bd9Sstevel@tonic-gate 
16537c478bd9Sstevel@tonic-gate 		if ((tqe->tqent_func == NULL) &&
16547c478bd9Sstevel@tonic-gate 		    ((w == -1) || (bucket->tqbucket_flags & TQBUCKET_CLOSE))) {
16557c478bd9Sstevel@tonic-gate 			/*
16567c478bd9Sstevel@tonic-gate 			 * This thread is sleeping for too long or we are
16577c478bd9Sstevel@tonic-gate 			 * closing - time to die.
16587c478bd9Sstevel@tonic-gate 			 * Thread creation/destruction happens rarely,
16597c478bd9Sstevel@tonic-gate 			 * so grabbing the lock is not a big performance issue.
16607c478bd9Sstevel@tonic-gate 			 * The bucket lock is dropped by CALLB_CPR_EXIT().
16617c478bd9Sstevel@tonic-gate 			 */
16627c478bd9Sstevel@tonic-gate 
16637c478bd9Sstevel@tonic-gate 			/* Remove the entry from the free list. */
16647c478bd9Sstevel@tonic-gate 			tqe->tqent_prev->tqent_next = tqe->tqent_next;
16657c478bd9Sstevel@tonic-gate 			tqe->tqent_next->tqent_prev = tqe->tqent_prev;
16667c478bd9Sstevel@tonic-gate 			ASSERT(bucket->tqbucket_nfree > 0);
16677c478bd9Sstevel@tonic-gate 			bucket->tqbucket_nfree--;
16687c478bd9Sstevel@tonic-gate 
16697c478bd9Sstevel@tonic-gate 			TQ_STAT(bucket, tqs_tdeaths);
16707c478bd9Sstevel@tonic-gate 			cv_signal(&bucket->tqbucket_cv);
16717c478bd9Sstevel@tonic-gate 			tqe->tqent_thread = NULL;
16727c478bd9Sstevel@tonic-gate 			mutex_enter(&tq->tq_lock);
16737c478bd9Sstevel@tonic-gate 			tq->tq_tdeaths++;
16747c478bd9Sstevel@tonic-gate 			mutex_exit(&tq->tq_lock);
16757c478bd9Sstevel@tonic-gate 			CALLB_CPR_EXIT(&cprinfo);
16767c478bd9Sstevel@tonic-gate 			kmem_cache_free(taskq_ent_cache, tqe);
16777c478bd9Sstevel@tonic-gate 			thread_exit();
16787c478bd9Sstevel@tonic-gate 		}
16797c478bd9Sstevel@tonic-gate 	}
16807c478bd9Sstevel@tonic-gate }
16817c478bd9Sstevel@tonic-gate 
16827c478bd9Sstevel@tonic-gate 
16837c478bd9Sstevel@tonic-gate /*
16847c478bd9Sstevel@tonic-gate  * Taskq creation. May sleep for memory.
16857c478bd9Sstevel@tonic-gate  * Always use automatically generated instances to avoid kstat name space
16867c478bd9Sstevel@tonic-gate  * collisions.
16877c478bd9Sstevel@tonic-gate  */
16887c478bd9Sstevel@tonic-gate 
16897c478bd9Sstevel@tonic-gate taskq_t *
16907c478bd9Sstevel@tonic-gate taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
16917c478bd9Sstevel@tonic-gate     int maxalloc, uint_t flags)
16927c478bd9Sstevel@tonic-gate {
1693*35a5a358SJonathan Adams 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1694*35a5a358SJonathan Adams 
1695*35a5a358SJonathan Adams 	return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1696*35a5a358SJonathan Adams 	    maxalloc, &p0, 0, flags | TASKQ_NOINSTANCE));
16977c478bd9Sstevel@tonic-gate }
16987c478bd9Sstevel@tonic-gate 
16997c478bd9Sstevel@tonic-gate /*
17007c478bd9Sstevel@tonic-gate  * Create an instance of task queue. It is legal to create task queues with the
17017c478bd9Sstevel@tonic-gate  * same name and different instances.
17027c478bd9Sstevel@tonic-gate  *
17037c478bd9Sstevel@tonic-gate  * taskq_create_instance is used by ddi_taskq_create() where it gets the
17047c478bd9Sstevel@tonic-gate  * instance from ddi_get_instance(). In some cases the instance is not
17057c478bd9Sstevel@tonic-gate  * initialized and is set to -1. This case is handled as if no instance was
17067c478bd9Sstevel@tonic-gate  * passed at all.
17077c478bd9Sstevel@tonic-gate  */
17087c478bd9Sstevel@tonic-gate taskq_t *
17097c478bd9Sstevel@tonic-gate taskq_create_instance(const char *name, int instance, int nthreads, pri_t pri,
17107c478bd9Sstevel@tonic-gate     int minalloc, int maxalloc, uint_t flags)
17117c478bd9Sstevel@tonic-gate {
1712*35a5a358SJonathan Adams 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
17137c478bd9Sstevel@tonic-gate 	ASSERT((instance >= 0) || (instance == -1));
17147c478bd9Sstevel@tonic-gate 
17157c478bd9Sstevel@tonic-gate 	if (instance < 0) {
17167c478bd9Sstevel@tonic-gate 		flags |= TASKQ_NOINSTANCE;
17177c478bd9Sstevel@tonic-gate 	}
17187c478bd9Sstevel@tonic-gate 
17197c478bd9Sstevel@tonic-gate 	return (taskq_create_common(name, instance, nthreads,
1720*35a5a358SJonathan Adams 	    pri, minalloc, maxalloc, &p0, 0, flags));
17217c478bd9Sstevel@tonic-gate }
17227c478bd9Sstevel@tonic-gate 
1723*35a5a358SJonathan Adams taskq_t *
1724*35a5a358SJonathan Adams taskq_create_proc(const char *name, int nthreads, pri_t pri, int minalloc,
1725*35a5a358SJonathan Adams     int maxalloc, proc_t *proc, uint_t flags)
1726*35a5a358SJonathan Adams {
1727*35a5a358SJonathan Adams 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1728*35a5a358SJonathan Adams 	ASSERT(proc->p_flag & SSYS);
1729*35a5a358SJonathan Adams 
1730*35a5a358SJonathan Adams 	return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1731*35a5a358SJonathan Adams 	    maxalloc, proc, 0, flags | TASKQ_NOINSTANCE));
1732*35a5a358SJonathan Adams }
1733*35a5a358SJonathan Adams 
1734*35a5a358SJonathan Adams taskq_t *
1735*35a5a358SJonathan Adams taskq_create_sysdc(const char *name, int nthreads, int minalloc,
1736*35a5a358SJonathan Adams     int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1737*35a5a358SJonathan Adams {
1738*35a5a358SJonathan Adams 	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1739*35a5a358SJonathan Adams 	ASSERT(proc->p_flag & SSYS);
1740*35a5a358SJonathan Adams 
1741*35a5a358SJonathan Adams 	return (taskq_create_common(name, 0, nthreads, minclsyspri, minalloc,
1742*35a5a358SJonathan Adams 	    maxalloc, proc, dc, flags | TASKQ_NOINSTANCE | TASKQ_DUTY_CYCLE));
1743*35a5a358SJonathan Adams }
1744*35a5a358SJonathan Adams 
1745*35a5a358SJonathan Adams #define	IMPLY(a, b)	ASSERT((!(a)) || (b)) /* if (a) { ASSERT (b) } */
1746*35a5a358SJonathan Adams 
17477c478bd9Sstevel@tonic-gate static taskq_t *
17487c478bd9Sstevel@tonic-gate taskq_create_common(const char *name, int instance, int nthreads, pri_t pri,
1749*35a5a358SJonathan Adams     int minalloc, int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
17507c478bd9Sstevel@tonic-gate {
17517c478bd9Sstevel@tonic-gate 	taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP);
17527c478bd9Sstevel@tonic-gate 	uint_t ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
17537c478bd9Sstevel@tonic-gate 	uint_t bsize;	/* # of buckets - always power of 2 */
17542e0c549eSJonathan Adams 	int max_nthreads;
17557c478bd9Sstevel@tonic-gate 
17567c478bd9Sstevel@tonic-gate 	/*
1757*35a5a358SJonathan Adams 	 * TASKQ_DYNAMIC, TASKQ_CPR_SAFE and TASKQ_THREADS_CPU_PCT are all
1758*35a5a358SJonathan Adams 	 * mutually incompatible.
17597c478bd9Sstevel@tonic-gate 	 */
1760*35a5a358SJonathan Adams 	IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_CPR_SAFE));
1761*35a5a358SJonathan Adams 	IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_THREADS_CPU_PCT));
1762*35a5a358SJonathan Adams 	IMPLY((flags & TASKQ_CPR_SAFE), !(flags & TASKQ_THREADS_CPU_PCT));
17637c478bd9Sstevel@tonic-gate 
1764*35a5a358SJonathan Adams 	/* Cannot have DUTY_CYCLE without a non-p0 kernel process */
1765*35a5a358SJonathan Adams 	IMPLY((flags & TASKQ_DUTY_CYCLE), proc != &p0);
1766*35a5a358SJonathan Adams 
1767*35a5a358SJonathan Adams 	/* Cannot have DC_BATCH without DUTY_CYCLE */
1768*35a5a358SJonathan Adams 	ASSERT((flags & (TASKQ_DUTY_CYCLE|TASKQ_DC_BATCH)) != TASKQ_DC_BATCH);
1769*35a5a358SJonathan Adams 
1770*35a5a358SJonathan Adams 	ASSERT(proc != NULL);
17717c478bd9Sstevel@tonic-gate 
17727c478bd9Sstevel@tonic-gate 	bsize = 1 << (highbit(ncpus) - 1);
17737c478bd9Sstevel@tonic-gate 	ASSERT(bsize >= 1);
17747c478bd9Sstevel@tonic-gate 	bsize = MIN(bsize, taskq_maxbuckets);
17757c478bd9Sstevel@tonic-gate 
17762e0c549eSJonathan Adams 	if (flags & TASKQ_DYNAMIC) {
17772e0c549eSJonathan Adams 		ASSERT3S(nthreads, >=, 1);
17787c478bd9Sstevel@tonic-gate 		tq->tq_maxsize = nthreads;
17797c478bd9Sstevel@tonic-gate 
17802e0c549eSJonathan Adams 		/* For dynamic task queues use just one backup thread */
17812e0c549eSJonathan Adams 		nthreads = max_nthreads = 1;
17827c478bd9Sstevel@tonic-gate 
1783*35a5a358SJonathan Adams 	} else if (flags & TASKQ_THREADS_CPU_PCT) {
17842e0c549eSJonathan Adams 		uint_t pct;
17852e0c549eSJonathan Adams 		ASSERT3S(nthreads, >=, 0);
17862e0c549eSJonathan Adams 		pct = nthreads;
17872e0c549eSJonathan Adams 
17882e0c549eSJonathan Adams 		if (pct > taskq_cpupct_max_percent)
17892e0c549eSJonathan Adams 			pct = taskq_cpupct_max_percent;
17902e0c549eSJonathan Adams 
1791*35a5a358SJonathan Adams 		/*
1792*35a5a358SJonathan Adams 		 * If you're using THREADS_CPU_PCT, the process for the
1793*35a5a358SJonathan Adams 		 * taskq threads must be curproc.  This allows any pset
1794*35a5a358SJonathan Adams 		 * binding to be inherited correctly.  If proc is &p0,
1795*35a5a358SJonathan Adams 		 * we won't be creating LWPs, so new threads will be assigned
1796*35a5a358SJonathan Adams 		 * to the default processor set.
1797*35a5a358SJonathan Adams 		 */
1798*35a5a358SJonathan Adams 		ASSERT(curproc == proc || proc == &p0);
17992e0c549eSJonathan Adams 		tq->tq_threads_ncpus_pct = pct;
1800*35a5a358SJonathan Adams 		nthreads = 1;		/* corrected in taskq_thread_create() */
18012e0c549eSJonathan Adams 		max_nthreads = TASKQ_THREADS_PCT(max_ncpus, pct);
1802*35a5a358SJonathan Adams 
1803*35a5a358SJonathan Adams 	} else {
1804*35a5a358SJonathan Adams 		ASSERT3S(nthreads, >=, 1);
1805*35a5a358SJonathan Adams 		max_nthreads = nthreads;
18062e0c549eSJonathan Adams 	}
18072e0c549eSJonathan Adams 
18082e0c549eSJonathan Adams 	if (max_nthreads < taskq_minimum_nthreads_max)
18092e0c549eSJonathan Adams 		max_nthreads = taskq_minimum_nthreads_max;
18102e0c549eSJonathan Adams 
18112e0c549eSJonathan Adams 	/*
18122e0c549eSJonathan Adams 	 * Make sure the name is 0-terminated, and conforms to the rules for
18132e0c549eSJonathan Adams 	 * C indentifiers
18142e0c549eSJonathan Adams 	 */
18157c478bd9Sstevel@tonic-gate 	(void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
18162e0c549eSJonathan Adams 	strident_canon(tq->tq_name, TASKQ_NAMELEN + 1);
18177c478bd9Sstevel@tonic-gate 
18182e0c549eSJonathan Adams 	tq->tq_flags = flags | TASKQ_CHANGING;
18192e0c549eSJonathan Adams 	tq->tq_active = 0;
18207c478bd9Sstevel@tonic-gate 	tq->tq_instance = instance;
18212e0c549eSJonathan Adams 	tq->tq_nthreads_target = nthreads;
18222e0c549eSJonathan Adams 	tq->tq_nthreads_max = max_nthreads;
18237c478bd9Sstevel@tonic-gate 	tq->tq_minalloc = minalloc;
18247c478bd9Sstevel@tonic-gate 	tq->tq_maxalloc = maxalloc;
18257c478bd9Sstevel@tonic-gate 	tq->tq_nbuckets = bsize;
1826*35a5a358SJonathan Adams 	tq->tq_proc = proc;
18277c478bd9Sstevel@tonic-gate 	tq->tq_pri = pri;
1828*35a5a358SJonathan Adams 	tq->tq_DC = dc;
1829*35a5a358SJonathan Adams 	list_link_init(&tq->tq_cpupct_link);
18307c478bd9Sstevel@tonic-gate 
18312e0c549eSJonathan Adams 	if (max_nthreads > 1)
18322e0c549eSJonathan Adams 		tq->tq_threadlist = kmem_alloc(
18332e0c549eSJonathan Adams 		    sizeof (kthread_t *) * max_nthreads, KM_SLEEP);
18342e0c549eSJonathan Adams 
18357c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
18362e0c549eSJonathan Adams 	if (flags & TASKQ_PREPOPULATE) {
18377c478bd9Sstevel@tonic-gate 		while (minalloc-- > 0)
18387c478bd9Sstevel@tonic-gate 			taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
18392e0c549eSJonathan Adams 	}
18402e0c549eSJonathan Adams 
1841*35a5a358SJonathan Adams 	/*
1842*35a5a358SJonathan Adams 	 * Create the first thread, which will create any other threads
1843*35a5a358SJonathan Adams 	 * necessary.  taskq_thread_create will not return until we have
1844*35a5a358SJonathan Adams 	 * enough threads to be able to process requests.
1845*35a5a358SJonathan Adams 	 */
18462e0c549eSJonathan Adams 	taskq_thread_create(tq);
18477c478bd9Sstevel@tonic-gate 	mutex_exit(&tq->tq_lock);
18487c478bd9Sstevel@tonic-gate 
18497c478bd9Sstevel@tonic-gate 	if (flags & TASKQ_DYNAMIC) {
18507c478bd9Sstevel@tonic-gate 		taskq_bucket_t *bucket = kmem_zalloc(sizeof (taskq_bucket_t) *
18517c478bd9Sstevel@tonic-gate 		    bsize, KM_SLEEP);
18527c478bd9Sstevel@tonic-gate 		int b_id;
18537c478bd9Sstevel@tonic-gate 
18547c478bd9Sstevel@tonic-gate 		tq->tq_buckets = bucket;
18557c478bd9Sstevel@tonic-gate 
18567c478bd9Sstevel@tonic-gate 		/* Initialize each bucket */
18577c478bd9Sstevel@tonic-gate 		for (b_id = 0; b_id < bsize; b_id++, bucket++) {
18587c478bd9Sstevel@tonic-gate 			mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT,
18597c478bd9Sstevel@tonic-gate 			    NULL);
18607c478bd9Sstevel@tonic-gate 			cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL);
18617c478bd9Sstevel@tonic-gate 			bucket->tqbucket_taskq = tq;
18627c478bd9Sstevel@tonic-gate 			bucket->tqbucket_freelist.tqent_next =
18637c478bd9Sstevel@tonic-gate 			    bucket->tqbucket_freelist.tqent_prev =
18647c478bd9Sstevel@tonic-gate 			    &bucket->tqbucket_freelist;
18657c478bd9Sstevel@tonic-gate 			if (flags & TASKQ_PREPOPULATE)
18667c478bd9Sstevel@tonic-gate 				taskq_bucket_extend(bucket);
18677c478bd9Sstevel@tonic-gate 		}
18687c478bd9Sstevel@tonic-gate 	}
18697c478bd9Sstevel@tonic-gate 
18707c478bd9Sstevel@tonic-gate 	/*
18717c478bd9Sstevel@tonic-gate 	 * Install kstats.
18727c478bd9Sstevel@tonic-gate 	 * We have two cases:
18737c478bd9Sstevel@tonic-gate 	 *   1) Instance is provided to taskq_create_instance(). In this case it
18747c478bd9Sstevel@tonic-gate 	 *	should be >= 0 and we use it.
18757c478bd9Sstevel@tonic-gate 	 *
18767c478bd9Sstevel@tonic-gate 	 *   2) Instance is not provided and is automatically generated
18777c478bd9Sstevel@tonic-gate 	 */
18787c478bd9Sstevel@tonic-gate 	if (flags & TASKQ_NOINSTANCE) {
18797c478bd9Sstevel@tonic-gate 		instance = tq->tq_instance =
18807c478bd9Sstevel@tonic-gate 		    (int)(uintptr_t)vmem_alloc(taskq_id_arena, 1, VM_SLEEP);
18817c478bd9Sstevel@tonic-gate 	}
18827c478bd9Sstevel@tonic-gate 
18837c478bd9Sstevel@tonic-gate 	if (flags & TASKQ_DYNAMIC) {
18847c478bd9Sstevel@tonic-gate 		if ((tq->tq_kstat = kstat_create("unix", instance,
18857c478bd9Sstevel@tonic-gate 		    tq->tq_name, "taskq_d", KSTAT_TYPE_NAMED,
18867c478bd9Sstevel@tonic-gate 		    sizeof (taskq_d_kstat) / sizeof (kstat_named_t),
18877c478bd9Sstevel@tonic-gate 		    KSTAT_FLAG_VIRTUAL)) != NULL) {
18887c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_lock = &taskq_d_kstat_lock;
18897c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_data = &taskq_d_kstat;
18907c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_update = taskq_d_kstat_update;
18917c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_private = tq;
18927c478bd9Sstevel@tonic-gate 			kstat_install(tq->tq_kstat);
18937c478bd9Sstevel@tonic-gate 		}
18947c478bd9Sstevel@tonic-gate 	} else {
18957c478bd9Sstevel@tonic-gate 		if ((tq->tq_kstat = kstat_create("unix", instance, tq->tq_name,
18967c478bd9Sstevel@tonic-gate 		    "taskq", KSTAT_TYPE_NAMED,
18977c478bd9Sstevel@tonic-gate 		    sizeof (taskq_kstat) / sizeof (kstat_named_t),
18987c478bd9Sstevel@tonic-gate 		    KSTAT_FLAG_VIRTUAL)) != NULL) {
18997c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_lock = &taskq_kstat_lock;
19007c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_data = &taskq_kstat;
19017c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_update = taskq_kstat_update;
19027c478bd9Sstevel@tonic-gate 			tq->tq_kstat->ks_private = tq;
19037c478bd9Sstevel@tonic-gate 			kstat_install(tq->tq_kstat);
19047c478bd9Sstevel@tonic-gate 		}
19057c478bd9Sstevel@tonic-gate 	}
19067c478bd9Sstevel@tonic-gate 
19077c478bd9Sstevel@tonic-gate 	return (tq);
19087c478bd9Sstevel@tonic-gate }
19097c478bd9Sstevel@tonic-gate 
19107c478bd9Sstevel@tonic-gate /*
19117c478bd9Sstevel@tonic-gate  * taskq_destroy().
19127c478bd9Sstevel@tonic-gate  *
19137c478bd9Sstevel@tonic-gate  * Assumes: by the time taskq_destroy is called no one will use this task queue
19147c478bd9Sstevel@tonic-gate  * in any way and no one will try to dispatch entries in it.
19157c478bd9Sstevel@tonic-gate  */
19167c478bd9Sstevel@tonic-gate void
19177c478bd9Sstevel@tonic-gate taskq_destroy(taskq_t *tq)
19187c478bd9Sstevel@tonic-gate {
19197c478bd9Sstevel@tonic-gate 	taskq_bucket_t *b = tq->tq_buckets;
19207c478bd9Sstevel@tonic-gate 	int bid = 0;
19217c478bd9Sstevel@tonic-gate 
19227c478bd9Sstevel@tonic-gate 	ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE));
19237c478bd9Sstevel@tonic-gate 
19247c478bd9Sstevel@tonic-gate 	/*
19257c478bd9Sstevel@tonic-gate 	 * Destroy kstats.
19267c478bd9Sstevel@tonic-gate 	 */
19277c478bd9Sstevel@tonic-gate 	if (tq->tq_kstat != NULL) {
19287c478bd9Sstevel@tonic-gate 		kstat_delete(tq->tq_kstat);
19297c478bd9Sstevel@tonic-gate 		tq->tq_kstat = NULL;
19307c478bd9Sstevel@tonic-gate 	}
19317c478bd9Sstevel@tonic-gate 
19327c478bd9Sstevel@tonic-gate 	/*
19337c478bd9Sstevel@tonic-gate 	 * Destroy instance if needed.
19347c478bd9Sstevel@tonic-gate 	 */
19357c478bd9Sstevel@tonic-gate 	if (tq->tq_flags & TASKQ_NOINSTANCE) {
19367c478bd9Sstevel@tonic-gate 		vmem_free(taskq_id_arena, (void *)(uintptr_t)(tq->tq_instance),
19377c478bd9Sstevel@tonic-gate 		    1);
19387c478bd9Sstevel@tonic-gate 		tq->tq_instance = 0;
19397c478bd9Sstevel@tonic-gate 	}
19407c478bd9Sstevel@tonic-gate 
19417c478bd9Sstevel@tonic-gate 	/*
19422e0c549eSJonathan Adams 	 * Unregister from the cpupct list.
19432e0c549eSJonathan Adams 	 */
19442e0c549eSJonathan Adams 	if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1945*35a5a358SJonathan Adams 		taskq_cpupct_remove(tq);
19462e0c549eSJonathan Adams 	}
19472e0c549eSJonathan Adams 
19482e0c549eSJonathan Adams 	/*
19497c478bd9Sstevel@tonic-gate 	 * Wait for any pending entries to complete.
19507c478bd9Sstevel@tonic-gate 	 */
19517c478bd9Sstevel@tonic-gate 	taskq_wait(tq);
19527c478bd9Sstevel@tonic-gate 
19537c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
19547c478bd9Sstevel@tonic-gate 	ASSERT((tq->tq_task.tqent_next == &tq->tq_task) &&
19557c478bd9Sstevel@tonic-gate 	    (tq->tq_active == 0));
19567c478bd9Sstevel@tonic-gate 
19572e0c549eSJonathan Adams 	/* notify all the threads that they need to exit */
19582e0c549eSJonathan Adams 	tq->tq_nthreads_target = 0;
19597c478bd9Sstevel@tonic-gate 
19602e0c549eSJonathan Adams 	tq->tq_flags |= TASKQ_CHANGING;
19617c478bd9Sstevel@tonic-gate 	cv_broadcast(&tq->tq_dispatch_cv);
19622e0c549eSJonathan Adams 	cv_broadcast(&tq->tq_exit_cv);
19632e0c549eSJonathan Adams 
19647c478bd9Sstevel@tonic-gate 	while (tq->tq_nthreads != 0)
19657c478bd9Sstevel@tonic-gate 		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
19667c478bd9Sstevel@tonic-gate 
19672e0c549eSJonathan Adams 	if (tq->tq_nthreads_max != 1)
19682e0c549eSJonathan Adams 		kmem_free(tq->tq_threadlist, sizeof (kthread_t *) *
19692e0c549eSJonathan Adams 		    tq->tq_nthreads_max);
19702e0c549eSJonathan Adams 
19717c478bd9Sstevel@tonic-gate 	tq->tq_minalloc = 0;
19727c478bd9Sstevel@tonic-gate 	while (tq->tq_nalloc != 0)
19737c478bd9Sstevel@tonic-gate 		taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
19747c478bd9Sstevel@tonic-gate 
19757c478bd9Sstevel@tonic-gate 	mutex_exit(&tq->tq_lock);
19767c478bd9Sstevel@tonic-gate 
19777c478bd9Sstevel@tonic-gate 	/*
19787c478bd9Sstevel@tonic-gate 	 * Mark each bucket as closing and wakeup all sleeping threads.
19797c478bd9Sstevel@tonic-gate 	 */
19807c478bd9Sstevel@tonic-gate 	for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
19817c478bd9Sstevel@tonic-gate 		taskq_ent_t *tqe;
19827c478bd9Sstevel@tonic-gate 
19837c478bd9Sstevel@tonic-gate 		mutex_enter(&b->tqbucket_lock);
19847c478bd9Sstevel@tonic-gate 
19857c478bd9Sstevel@tonic-gate 		b->tqbucket_flags |= TQBUCKET_CLOSE;
19867c478bd9Sstevel@tonic-gate 		/* Wakeup all sleeping threads */
19877c478bd9Sstevel@tonic-gate 
19887c478bd9Sstevel@tonic-gate 		for (tqe = b->tqbucket_freelist.tqent_next;
19897c478bd9Sstevel@tonic-gate 		    tqe != &b->tqbucket_freelist; tqe = tqe->tqent_next)
19907c478bd9Sstevel@tonic-gate 			cv_signal(&tqe->tqent_cv);
19917c478bd9Sstevel@tonic-gate 
19927c478bd9Sstevel@tonic-gate 		ASSERT(b->tqbucket_nalloc == 0);
19937c478bd9Sstevel@tonic-gate 
19947c478bd9Sstevel@tonic-gate 		/*
19957c478bd9Sstevel@tonic-gate 		 * At this point we waited for all pending jobs to complete (in
19967c478bd9Sstevel@tonic-gate 		 * both the task queue and the bucket and no new jobs should
19977c478bd9Sstevel@tonic-gate 		 * arrive. Wait for all threads to die.
19987c478bd9Sstevel@tonic-gate 		 */
19997c478bd9Sstevel@tonic-gate 		while (b->tqbucket_nfree > 0)
20007c478bd9Sstevel@tonic-gate 			cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
20017c478bd9Sstevel@tonic-gate 		mutex_exit(&b->tqbucket_lock);
20027c478bd9Sstevel@tonic-gate 		mutex_destroy(&b->tqbucket_lock);
20037c478bd9Sstevel@tonic-gate 		cv_destroy(&b->tqbucket_cv);
20047c478bd9Sstevel@tonic-gate 	}
20057c478bd9Sstevel@tonic-gate 
20067c478bd9Sstevel@tonic-gate 	if (tq->tq_buckets != NULL) {
20077c478bd9Sstevel@tonic-gate 		ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
20087c478bd9Sstevel@tonic-gate 		kmem_free(tq->tq_buckets,
20097c478bd9Sstevel@tonic-gate 		    sizeof (taskq_bucket_t) * tq->tq_nbuckets);
20107c478bd9Sstevel@tonic-gate 
20117c478bd9Sstevel@tonic-gate 		/* Cleanup fields before returning tq to the cache */
20127c478bd9Sstevel@tonic-gate 		tq->tq_buckets = NULL;
20137c478bd9Sstevel@tonic-gate 		tq->tq_tcreates = 0;
20147c478bd9Sstevel@tonic-gate 		tq->tq_tdeaths = 0;
20157c478bd9Sstevel@tonic-gate 	} else {
20167c478bd9Sstevel@tonic-gate 		ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
20177c478bd9Sstevel@tonic-gate 	}
20187c478bd9Sstevel@tonic-gate 
20192e0c549eSJonathan Adams 	tq->tq_threads_ncpus_pct = 0;
20207c478bd9Sstevel@tonic-gate 	tq->tq_totaltime = 0;
20217c478bd9Sstevel@tonic-gate 	tq->tq_tasks = 0;
20227c478bd9Sstevel@tonic-gate 	tq->tq_maxtasks = 0;
20237c478bd9Sstevel@tonic-gate 	tq->tq_executed = 0;
20247c478bd9Sstevel@tonic-gate 	kmem_cache_free(taskq_cache, tq);
20257c478bd9Sstevel@tonic-gate }
20267c478bd9Sstevel@tonic-gate 
20277c478bd9Sstevel@tonic-gate /*
20287c478bd9Sstevel@tonic-gate  * Extend a bucket with a new entry on the free list and attach a worker thread
20297c478bd9Sstevel@tonic-gate  * to it.
20307c478bd9Sstevel@tonic-gate  *
20317c478bd9Sstevel@tonic-gate  * Argument: pointer to the bucket.
20327c478bd9Sstevel@tonic-gate  *
20337c478bd9Sstevel@tonic-gate  * This function may quietly fail. It is only used by taskq_dispatch() which
20347c478bd9Sstevel@tonic-gate  * handles such failures properly.
20357c478bd9Sstevel@tonic-gate  */
20367c478bd9Sstevel@tonic-gate static void
20377c478bd9Sstevel@tonic-gate taskq_bucket_extend(void *arg)
20387c478bd9Sstevel@tonic-gate {
20397c478bd9Sstevel@tonic-gate 	taskq_ent_t *tqe;
20407c478bd9Sstevel@tonic-gate 	taskq_bucket_t *b = (taskq_bucket_t *)arg;
20417c478bd9Sstevel@tonic-gate 	taskq_t *tq = b->tqbucket_taskq;
20427c478bd9Sstevel@tonic-gate 	int nthreads;
20437c478bd9Sstevel@tonic-gate 
20447c478bd9Sstevel@tonic-gate 	if (! ENOUGH_MEMORY()) {
20457c478bd9Sstevel@tonic-gate 		TQ_STAT(b, tqs_nomem);
20467c478bd9Sstevel@tonic-gate 		return;
20477c478bd9Sstevel@tonic-gate 	}
20487c478bd9Sstevel@tonic-gate 
20497c478bd9Sstevel@tonic-gate 	mutex_enter(&tq->tq_lock);
20507c478bd9Sstevel@tonic-gate 
20517c478bd9Sstevel@tonic-gate 	/*
20527c478bd9Sstevel@tonic-gate 	 * Observe global taskq limits on the number of threads.
20537c478bd9Sstevel@tonic-gate 	 */
20547c478bd9Sstevel@tonic-gate 	if (tq->tq_tcreates++ - tq->tq_tdeaths > tq->tq_maxsize) {
20557c478bd9Sstevel@tonic-gate 		tq->tq_tcreates--;
20567c478bd9Sstevel@tonic-gate 		mutex_exit(&tq->tq_lock);
20577c478bd9Sstevel@tonic-gate 		return;
20587c478bd9Sstevel@tonic-gate 	}
20597c478bd9Sstevel@tonic-gate 	mutex_exit(&tq->tq_lock);
20607c478bd9Sstevel@tonic-gate 
20617c478bd9Sstevel@tonic-gate 	tqe = kmem_cache_alloc(taskq_ent_cache, KM_NOSLEEP);
20627c478bd9Sstevel@tonic-gate 
20637c478bd9Sstevel@tonic-gate 	if (tqe == NULL) {
20647c478bd9Sstevel@tonic-gate 		mutex_enter(&tq->tq_lock);
20657c478bd9Sstevel@tonic-gate 		TQ_STAT(b, tqs_nomem);
20667c478bd9Sstevel@tonic-gate 		tq->tq_tcreates--;
20677c478bd9Sstevel@tonic-gate 		mutex_exit(&tq->tq_lock);
20687c478bd9Sstevel@tonic-gate 		return;
20697c478bd9Sstevel@tonic-gate 	}
20707c478bd9Sstevel@tonic-gate 
20717c478bd9Sstevel@tonic-gate 	ASSERT(tqe->tqent_thread == NULL);
20727c478bd9Sstevel@tonic-gate 
20737c478bd9Sstevel@tonic-gate 	tqe->tqent_bucket = b;
20747c478bd9Sstevel@tonic-gate 
20757c478bd9Sstevel@tonic-gate 	/*
20767c478bd9Sstevel@tonic-gate 	 * Create a thread in a TS_STOPPED state first. If it is successfully
20777c478bd9Sstevel@tonic-gate 	 * created, place the entry on the free list and start the thread.
20787c478bd9Sstevel@tonic-gate 	 */
20797c478bd9Sstevel@tonic-gate 	tqe->tqent_thread = thread_create(NULL, 0, taskq_d_thread, tqe,
20807c478bd9Sstevel@tonic-gate 	    0, &p0, TS_STOPPED, tq->tq_pri);
20817c478bd9Sstevel@tonic-gate 
20827c478bd9Sstevel@tonic-gate 	/*
20837c478bd9Sstevel@tonic-gate 	 * Once the entry is ready, link it to the the bucket free list.
20847c478bd9Sstevel@tonic-gate 	 */
20857c478bd9Sstevel@tonic-gate 	mutex_enter(&b->tqbucket_lock);
20867c478bd9Sstevel@tonic-gate 	tqe->tqent_func = NULL;
20877c478bd9Sstevel@tonic-gate 	TQ_APPEND(b->tqbucket_freelist, tqe);
20887c478bd9Sstevel@tonic-gate 	b->tqbucket_nfree++;
20897c478bd9Sstevel@tonic-gate 	TQ_STAT(b, tqs_tcreates);
20907c478bd9Sstevel@tonic-gate 
20917c478bd9Sstevel@tonic-gate #if TASKQ_STATISTIC
20927c478bd9Sstevel@tonic-gate 	nthreads = b->tqbucket_stat.tqs_tcreates -
20937c478bd9Sstevel@tonic-gate 	    b->tqbucket_stat.tqs_tdeaths;
20947c478bd9Sstevel@tonic-gate 	b->tqbucket_stat.tqs_maxthreads = MAX(nthreads,
20957c478bd9Sstevel@tonic-gate 	    b->tqbucket_stat.tqs_maxthreads);
20967c478bd9Sstevel@tonic-gate #endif
20977c478bd9Sstevel@tonic-gate 
20987c478bd9Sstevel@tonic-gate 	mutex_exit(&b->tqbucket_lock);
20997c478bd9Sstevel@tonic-gate 	/*
21007c478bd9Sstevel@tonic-gate 	 * Start the stopped thread.
21017c478bd9Sstevel@tonic-gate 	 */
21027c478bd9Sstevel@tonic-gate 	thread_lock(tqe->tqent_thread);
21037c478bd9Sstevel@tonic-gate 	tqe->tqent_thread->t_taskq = tq;
21047c478bd9Sstevel@tonic-gate 	tqe->tqent_thread->t_schedflag |= TS_ALLSTART;
21057c478bd9Sstevel@tonic-gate 	setrun_locked(tqe->tqent_thread);
21067c478bd9Sstevel@tonic-gate 	thread_unlock(tqe->tqent_thread);
21077c478bd9Sstevel@tonic-gate }
21087c478bd9Sstevel@tonic-gate 
21097c478bd9Sstevel@tonic-gate static int
21107c478bd9Sstevel@tonic-gate taskq_kstat_update(kstat_t *ksp, int rw)
21117c478bd9Sstevel@tonic-gate {
21127c478bd9Sstevel@tonic-gate 	struct taskq_kstat *tqsp = &taskq_kstat;
21137c478bd9Sstevel@tonic-gate 	taskq_t *tq = ksp->ks_private;
21147c478bd9Sstevel@tonic-gate 
21157c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
21167c478bd9Sstevel@tonic-gate 		return (EACCES);
21177c478bd9Sstevel@tonic-gate 
2118*35a5a358SJonathan Adams 	tqsp->tq_pid.value.ui64 = tq->tq_proc->p_pid;
21197c478bd9Sstevel@tonic-gate 	tqsp->tq_tasks.value.ui64 = tq->tq_tasks;
21207c478bd9Sstevel@tonic-gate 	tqsp->tq_executed.value.ui64 = tq->tq_executed;
21217c478bd9Sstevel@tonic-gate 	tqsp->tq_maxtasks.value.ui64 = tq->tq_maxtasks;
21227c478bd9Sstevel@tonic-gate 	tqsp->tq_totaltime.value.ui64 = tq->tq_totaltime;
21237c478bd9Sstevel@tonic-gate 	tqsp->tq_nactive.value.ui64 = tq->tq_active;
21247c478bd9Sstevel@tonic-gate 	tqsp->tq_nalloc.value.ui64 = tq->tq_nalloc;
21257c478bd9Sstevel@tonic-gate 	tqsp->tq_pri.value.ui64 = tq->tq_pri;
21267c478bd9Sstevel@tonic-gate 	tqsp->tq_nthreads.value.ui64 = tq->tq_nthreads;
21277c478bd9Sstevel@tonic-gate 	return (0);
21287c478bd9Sstevel@tonic-gate }
21297c478bd9Sstevel@tonic-gate 
21307c478bd9Sstevel@tonic-gate static int
21317c478bd9Sstevel@tonic-gate taskq_d_kstat_update(kstat_t *ksp, int rw)
21327c478bd9Sstevel@tonic-gate {
21337c478bd9Sstevel@tonic-gate 	struct taskq_d_kstat *tqsp = &taskq_d_kstat;
21347c478bd9Sstevel@tonic-gate 	taskq_t *tq = ksp->ks_private;
21357c478bd9Sstevel@tonic-gate 	taskq_bucket_t *b = tq->tq_buckets;
21367c478bd9Sstevel@tonic-gate 	int bid = 0;
21377c478bd9Sstevel@tonic-gate 
21387c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
21397c478bd9Sstevel@tonic-gate 		return (EACCES);
21407c478bd9Sstevel@tonic-gate 
21417c478bd9Sstevel@tonic-gate 	ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
21427c478bd9Sstevel@tonic-gate 
21437c478bd9Sstevel@tonic-gate 	tqsp->tqd_btasks.value.ui64 = tq->tq_tasks;
21447c478bd9Sstevel@tonic-gate 	tqsp->tqd_bexecuted.value.ui64 = tq->tq_executed;
21457c478bd9Sstevel@tonic-gate 	tqsp->tqd_bmaxtasks.value.ui64 = tq->tq_maxtasks;
21467c478bd9Sstevel@tonic-gate 	tqsp->tqd_bnalloc.value.ui64 = tq->tq_nalloc;
21477c478bd9Sstevel@tonic-gate 	tqsp->tqd_bnactive.value.ui64 = tq->tq_active;
21487c478bd9Sstevel@tonic-gate 	tqsp->tqd_btotaltime.value.ui64 = tq->tq_totaltime;
21497c478bd9Sstevel@tonic-gate 	tqsp->tqd_pri.value.ui64 = tq->tq_pri;
21507c478bd9Sstevel@tonic-gate 
21517c478bd9Sstevel@tonic-gate 	tqsp->tqd_hits.value.ui64 = 0;
21527c478bd9Sstevel@tonic-gate 	tqsp->tqd_misses.value.ui64 = 0;
21537c478bd9Sstevel@tonic-gate 	tqsp->tqd_overflows.value.ui64 = 0;
21547c478bd9Sstevel@tonic-gate 	tqsp->tqd_tcreates.value.ui64 = 0;
21557c478bd9Sstevel@tonic-gate 	tqsp->tqd_tdeaths.value.ui64 = 0;
21567c478bd9Sstevel@tonic-gate 	tqsp->tqd_maxthreads.value.ui64 = 0;
21577c478bd9Sstevel@tonic-gate 	tqsp->tqd_nomem.value.ui64 = 0;
21587c478bd9Sstevel@tonic-gate 	tqsp->tqd_disptcreates.value.ui64 = 0;
21597c478bd9Sstevel@tonic-gate 	tqsp->tqd_totaltime.value.ui64 = 0;
21607c478bd9Sstevel@tonic-gate 	tqsp->tqd_nalloc.value.ui64 = 0;
21617c478bd9Sstevel@tonic-gate 	tqsp->tqd_nfree.value.ui64 = 0;
21627c478bd9Sstevel@tonic-gate 
21637c478bd9Sstevel@tonic-gate 	for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
21647c478bd9Sstevel@tonic-gate 		tqsp->tqd_hits.value.ui64 += b->tqbucket_stat.tqs_hits;
21657c478bd9Sstevel@tonic-gate 		tqsp->tqd_misses.value.ui64 += b->tqbucket_stat.tqs_misses;
21667c478bd9Sstevel@tonic-gate 		tqsp->tqd_overflows.value.ui64 += b->tqbucket_stat.tqs_overflow;
21677c478bd9Sstevel@tonic-gate 		tqsp->tqd_tcreates.value.ui64 += b->tqbucket_stat.tqs_tcreates;
21687c478bd9Sstevel@tonic-gate 		tqsp->tqd_tdeaths.value.ui64 += b->tqbucket_stat.tqs_tdeaths;
21697c478bd9Sstevel@tonic-gate 		tqsp->tqd_maxthreads.value.ui64 +=
21707c478bd9Sstevel@tonic-gate 		    b->tqbucket_stat.tqs_maxthreads;
21717c478bd9Sstevel@tonic-gate 		tqsp->tqd_nomem.value.ui64 += b->tqbucket_stat.tqs_nomem;
21727c478bd9Sstevel@tonic-gate 		tqsp->tqd_disptcreates.value.ui64 +=
21737c478bd9Sstevel@tonic-gate 		    b->tqbucket_stat.tqs_disptcreates;
21747c478bd9Sstevel@tonic-gate 		tqsp->tqd_totaltime.value.ui64 += b->tqbucket_totaltime;
21757c478bd9Sstevel@tonic-gate 		tqsp->tqd_nalloc.value.ui64 += b->tqbucket_nalloc;
21767c478bd9Sstevel@tonic-gate 		tqsp->tqd_nfree.value.ui64 += b->tqbucket_nfree;
21777c478bd9Sstevel@tonic-gate 	}
21787c478bd9Sstevel@tonic-gate 	return (0);
21797c478bd9Sstevel@tonic-gate }
2180