1*8ac904ceSMartin Matuska // SPDX-License-Identifier: CDDL-1.0
2*8ac904ceSMartin Matuska /*
3*8ac904ceSMartin Matuska * CDDL HEADER START
4*8ac904ceSMartin Matuska *
5*8ac904ceSMartin Matuska * The contents of this file are subject to the terms of the
6*8ac904ceSMartin Matuska * Common Development and Distribution License (the "License").
7*8ac904ceSMartin Matuska * You may not use this file except in compliance with the License.
8*8ac904ceSMartin Matuska *
9*8ac904ceSMartin Matuska * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*8ac904ceSMartin Matuska * or https://opensource.org/licenses/CDDL-1.0.
11*8ac904ceSMartin Matuska * See the License for the specific language governing permissions
12*8ac904ceSMartin Matuska * and limitations under the License.
13*8ac904ceSMartin Matuska *
14*8ac904ceSMartin Matuska * When distributing Covered Code, include this CDDL HEADER in each
15*8ac904ceSMartin Matuska * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*8ac904ceSMartin Matuska * If applicable, add the following below this CDDL HEADER, with the
17*8ac904ceSMartin Matuska * fields enclosed by brackets "[]" replaced with your own identifying
18*8ac904ceSMartin Matuska * information: Portions Copyright [yyyy] [name of copyright owner]
19*8ac904ceSMartin Matuska *
20*8ac904ceSMartin Matuska * CDDL HEADER END
21*8ac904ceSMartin Matuska */
22*8ac904ceSMartin Matuska /*
23*8ac904ceSMartin Matuska * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24*8ac904ceSMartin Matuska * Use is subject to license terms.
25*8ac904ceSMartin Matuska */
26*8ac904ceSMartin Matuska /*
27*8ac904ceSMartin Matuska * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
28*8ac904ceSMartin Matuska * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
29*8ac904ceSMartin Matuska * Copyright (c) 2014 by Delphix. All rights reserved.
30*8ac904ceSMartin Matuska */
31*8ac904ceSMartin Matuska
32*8ac904ceSMartin Matuska #include <sys/sysmacros.h>
33*8ac904ceSMartin Matuska #include <sys/timer.h>
34*8ac904ceSMartin Matuska #include <sys/types.h>
35*8ac904ceSMartin Matuska #include <sys/thread.h>
36*8ac904ceSMartin Matuska #include <sys/taskq.h>
37*8ac904ceSMartin Matuska #include <sys/kmem.h>
38*8ac904ceSMartin Matuska
39*8ac904ceSMartin Matuska static taskq_t *__system_taskq = NULL;
40*8ac904ceSMartin Matuska static taskq_t *__system_delay_taskq = NULL;
41*8ac904ceSMartin Matuska
42*8ac904ceSMartin Matuska taskq_t
_system_taskq(void)43*8ac904ceSMartin Matuska *_system_taskq(void)
44*8ac904ceSMartin Matuska {
45*8ac904ceSMartin Matuska return (__system_taskq);
46*8ac904ceSMartin Matuska }
47*8ac904ceSMartin Matuska
48*8ac904ceSMartin Matuska taskq_t
_system_delay_taskq(void)49*8ac904ceSMartin Matuska *_system_delay_taskq(void)
50*8ac904ceSMartin Matuska {
51*8ac904ceSMartin Matuska return (__system_delay_taskq);
52*8ac904ceSMartin Matuska }
53*8ac904ceSMartin Matuska
54*8ac904ceSMartin Matuska static pthread_key_t taskq_tsd;
55*8ac904ceSMartin Matuska
56*8ac904ceSMartin Matuska #define TASKQ_ACTIVE 0x00010000
57*8ac904ceSMartin Matuska
58*8ac904ceSMartin Matuska static taskq_ent_t *
task_alloc(taskq_t * tq,int tqflags)59*8ac904ceSMartin Matuska task_alloc(taskq_t *tq, int tqflags)
60*8ac904ceSMartin Matuska {
61*8ac904ceSMartin Matuska taskq_ent_t *t;
62*8ac904ceSMartin Matuska int rv;
63*8ac904ceSMartin Matuska
64*8ac904ceSMartin Matuska again: if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
65*8ac904ceSMartin Matuska ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
66*8ac904ceSMartin Matuska tq->tq_freelist = t->tqent_next;
67*8ac904ceSMartin Matuska } else {
68*8ac904ceSMartin Matuska if (tq->tq_nalloc >= tq->tq_maxalloc) {
69*8ac904ceSMartin Matuska if (!(tqflags & KM_SLEEP))
70*8ac904ceSMartin Matuska return (NULL);
71*8ac904ceSMartin Matuska
72*8ac904ceSMartin Matuska /*
73*8ac904ceSMartin Matuska * We don't want to exceed tq_maxalloc, but we can't
74*8ac904ceSMartin Matuska * wait for other tasks to complete (and thus free up
75*8ac904ceSMartin Matuska * task structures) without risking deadlock with
76*8ac904ceSMartin Matuska * the caller. So, we just delay for one second
77*8ac904ceSMartin Matuska * to throttle the allocation rate. If we have tasks
78*8ac904ceSMartin Matuska * complete before one second timeout expires then
79*8ac904ceSMartin Matuska * taskq_ent_free will signal us and we will
80*8ac904ceSMartin Matuska * immediately retry the allocation.
81*8ac904ceSMartin Matuska */
82*8ac904ceSMartin Matuska tq->tq_maxalloc_wait++;
83*8ac904ceSMartin Matuska rv = cv_timedwait(&tq->tq_maxalloc_cv,
84*8ac904ceSMartin Matuska &tq->tq_lock, ddi_get_lbolt() + hz);
85*8ac904ceSMartin Matuska tq->tq_maxalloc_wait--;
86*8ac904ceSMartin Matuska if (rv > 0)
87*8ac904ceSMartin Matuska goto again; /* signaled */
88*8ac904ceSMartin Matuska }
89*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
90*8ac904ceSMartin Matuska
91*8ac904ceSMartin Matuska t = kmem_alloc(sizeof (taskq_ent_t), tqflags);
92*8ac904ceSMartin Matuska
93*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
94*8ac904ceSMartin Matuska if (t != NULL) {
95*8ac904ceSMartin Matuska /* Make sure we start without any flags */
96*8ac904ceSMartin Matuska t->tqent_flags = 0;
97*8ac904ceSMartin Matuska tq->tq_nalloc++;
98*8ac904ceSMartin Matuska }
99*8ac904ceSMartin Matuska }
100*8ac904ceSMartin Matuska return (t);
101*8ac904ceSMartin Matuska }
102*8ac904ceSMartin Matuska
103*8ac904ceSMartin Matuska static void
task_free(taskq_t * tq,taskq_ent_t * t)104*8ac904ceSMartin Matuska task_free(taskq_t *tq, taskq_ent_t *t)
105*8ac904ceSMartin Matuska {
106*8ac904ceSMartin Matuska if (tq->tq_nalloc <= tq->tq_minalloc) {
107*8ac904ceSMartin Matuska t->tqent_next = tq->tq_freelist;
108*8ac904ceSMartin Matuska tq->tq_freelist = t;
109*8ac904ceSMartin Matuska } else {
110*8ac904ceSMartin Matuska tq->tq_nalloc--;
111*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
112*8ac904ceSMartin Matuska kmem_free(t, sizeof (taskq_ent_t));
113*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
114*8ac904ceSMartin Matuska }
115*8ac904ceSMartin Matuska
116*8ac904ceSMartin Matuska if (tq->tq_maxalloc_wait)
117*8ac904ceSMartin Matuska cv_signal(&tq->tq_maxalloc_cv);
118*8ac904ceSMartin Matuska }
119*8ac904ceSMartin Matuska
120*8ac904ceSMartin Matuska taskqid_t
taskq_dispatch(taskq_t * tq,task_func_t func,void * arg,uint_t tqflags)121*8ac904ceSMartin Matuska taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags)
122*8ac904ceSMartin Matuska {
123*8ac904ceSMartin Matuska taskq_ent_t *t;
124*8ac904ceSMartin Matuska
125*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
126*8ac904ceSMartin Matuska ASSERT(tq->tq_flags & TASKQ_ACTIVE);
127*8ac904ceSMartin Matuska if ((t = task_alloc(tq, tqflags)) == NULL) {
128*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
129*8ac904ceSMartin Matuska return (0);
130*8ac904ceSMartin Matuska }
131*8ac904ceSMartin Matuska if (tqflags & TQ_FRONT) {
132*8ac904ceSMartin Matuska t->tqent_next = tq->tq_task.tqent_next;
133*8ac904ceSMartin Matuska t->tqent_prev = &tq->tq_task;
134*8ac904ceSMartin Matuska } else {
135*8ac904ceSMartin Matuska t->tqent_next = &tq->tq_task;
136*8ac904ceSMartin Matuska t->tqent_prev = tq->tq_task.tqent_prev;
137*8ac904ceSMartin Matuska }
138*8ac904ceSMartin Matuska t->tqent_next->tqent_prev = t;
139*8ac904ceSMartin Matuska t->tqent_prev->tqent_next = t;
140*8ac904ceSMartin Matuska t->tqent_func = func;
141*8ac904ceSMartin Matuska t->tqent_arg = arg;
142*8ac904ceSMartin Matuska t->tqent_flags = 0;
143*8ac904ceSMartin Matuska cv_signal(&tq->tq_dispatch_cv);
144*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
145*8ac904ceSMartin Matuska return (1);
146*8ac904ceSMartin Matuska }
147*8ac904ceSMartin Matuska
148*8ac904ceSMartin Matuska taskqid_t
taskq_dispatch_delay(taskq_t * tq,task_func_t func,void * arg,uint_t tqflags,clock_t expire_time)149*8ac904ceSMartin Matuska taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags,
150*8ac904ceSMartin Matuska clock_t expire_time)
151*8ac904ceSMartin Matuska {
152*8ac904ceSMartin Matuska (void) tq, (void) func, (void) arg, (void) tqflags, (void) expire_time;
153*8ac904ceSMartin Matuska return (0);
154*8ac904ceSMartin Matuska }
155*8ac904ceSMartin Matuska
156*8ac904ceSMartin Matuska int
taskq_empty_ent(taskq_ent_t * t)157*8ac904ceSMartin Matuska taskq_empty_ent(taskq_ent_t *t)
158*8ac904ceSMartin Matuska {
159*8ac904ceSMartin Matuska return (t->tqent_next == NULL);
160*8ac904ceSMartin Matuska }
161*8ac904ceSMartin Matuska
162*8ac904ceSMartin Matuska void
taskq_init_ent(taskq_ent_t * t)163*8ac904ceSMartin Matuska taskq_init_ent(taskq_ent_t *t)
164*8ac904ceSMartin Matuska {
165*8ac904ceSMartin Matuska t->tqent_next = NULL;
166*8ac904ceSMartin Matuska t->tqent_prev = NULL;
167*8ac904ceSMartin Matuska t->tqent_func = NULL;
168*8ac904ceSMartin Matuska t->tqent_arg = NULL;
169*8ac904ceSMartin Matuska t->tqent_flags = 0;
170*8ac904ceSMartin Matuska }
171*8ac904ceSMartin Matuska
172*8ac904ceSMartin Matuska void
taskq_dispatch_ent(taskq_t * tq,task_func_t func,void * arg,uint_t flags,taskq_ent_t * t)173*8ac904ceSMartin Matuska taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
174*8ac904ceSMartin Matuska taskq_ent_t *t)
175*8ac904ceSMartin Matuska {
176*8ac904ceSMartin Matuska ASSERT(func != NULL);
177*8ac904ceSMartin Matuska
178*8ac904ceSMartin Matuska /*
179*8ac904ceSMartin Matuska * Mark it as a prealloc'd task. This is important
180*8ac904ceSMartin Matuska * to ensure that we don't free it later.
181*8ac904ceSMartin Matuska */
182*8ac904ceSMartin Matuska t->tqent_flags |= TQENT_FLAG_PREALLOC;
183*8ac904ceSMartin Matuska /*
184*8ac904ceSMartin Matuska * Enqueue the task to the underlying queue.
185*8ac904ceSMartin Matuska */
186*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
187*8ac904ceSMartin Matuska
188*8ac904ceSMartin Matuska if (flags & TQ_FRONT) {
189*8ac904ceSMartin Matuska t->tqent_next = tq->tq_task.tqent_next;
190*8ac904ceSMartin Matuska t->tqent_prev = &tq->tq_task;
191*8ac904ceSMartin Matuska } else {
192*8ac904ceSMartin Matuska t->tqent_next = &tq->tq_task;
193*8ac904ceSMartin Matuska t->tqent_prev = tq->tq_task.tqent_prev;
194*8ac904ceSMartin Matuska }
195*8ac904ceSMartin Matuska t->tqent_next->tqent_prev = t;
196*8ac904ceSMartin Matuska t->tqent_prev->tqent_next = t;
197*8ac904ceSMartin Matuska t->tqent_func = func;
198*8ac904ceSMartin Matuska t->tqent_arg = arg;
199*8ac904ceSMartin Matuska cv_signal(&tq->tq_dispatch_cv);
200*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
201*8ac904ceSMartin Matuska }
202*8ac904ceSMartin Matuska
203*8ac904ceSMartin Matuska void
taskq_wait(taskq_t * tq)204*8ac904ceSMartin Matuska taskq_wait(taskq_t *tq)
205*8ac904ceSMartin Matuska {
206*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
207*8ac904ceSMartin Matuska while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
208*8ac904ceSMartin Matuska cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
209*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
210*8ac904ceSMartin Matuska }
211*8ac904ceSMartin Matuska
212*8ac904ceSMartin Matuska void
taskq_wait_id(taskq_t * tq,taskqid_t id)213*8ac904ceSMartin Matuska taskq_wait_id(taskq_t *tq, taskqid_t id)
214*8ac904ceSMartin Matuska {
215*8ac904ceSMartin Matuska (void) id;
216*8ac904ceSMartin Matuska taskq_wait(tq);
217*8ac904ceSMartin Matuska }
218*8ac904ceSMartin Matuska
219*8ac904ceSMartin Matuska void
taskq_wait_outstanding(taskq_t * tq,taskqid_t id)220*8ac904ceSMartin Matuska taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
221*8ac904ceSMartin Matuska {
222*8ac904ceSMartin Matuska (void) id;
223*8ac904ceSMartin Matuska taskq_wait(tq);
224*8ac904ceSMartin Matuska }
225*8ac904ceSMartin Matuska
226*8ac904ceSMartin Matuska static __attribute__((noreturn)) void
taskq_thread(void * arg)227*8ac904ceSMartin Matuska taskq_thread(void *arg)
228*8ac904ceSMartin Matuska {
229*8ac904ceSMartin Matuska taskq_t *tq = arg;
230*8ac904ceSMartin Matuska taskq_ent_t *t;
231*8ac904ceSMartin Matuska boolean_t prealloc;
232*8ac904ceSMartin Matuska
233*8ac904ceSMartin Matuska VERIFY0(pthread_setspecific(taskq_tsd, tq));
234*8ac904ceSMartin Matuska
235*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
236*8ac904ceSMartin Matuska while (tq->tq_flags & TASKQ_ACTIVE) {
237*8ac904ceSMartin Matuska if ((t = tq->tq_task.tqent_next) == &tq->tq_task) {
238*8ac904ceSMartin Matuska if (--tq->tq_active == 0)
239*8ac904ceSMartin Matuska cv_broadcast(&tq->tq_wait_cv);
240*8ac904ceSMartin Matuska cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
241*8ac904ceSMartin Matuska tq->tq_active++;
242*8ac904ceSMartin Matuska continue;
243*8ac904ceSMartin Matuska }
244*8ac904ceSMartin Matuska t->tqent_prev->tqent_next = t->tqent_next;
245*8ac904ceSMartin Matuska t->tqent_next->tqent_prev = t->tqent_prev;
246*8ac904ceSMartin Matuska t->tqent_next = NULL;
247*8ac904ceSMartin Matuska t->tqent_prev = NULL;
248*8ac904ceSMartin Matuska prealloc = t->tqent_flags & TQENT_FLAG_PREALLOC;
249*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
250*8ac904ceSMartin Matuska
251*8ac904ceSMartin Matuska rw_enter(&tq->tq_threadlock, RW_READER);
252*8ac904ceSMartin Matuska t->tqent_func(t->tqent_arg);
253*8ac904ceSMartin Matuska rw_exit(&tq->tq_threadlock);
254*8ac904ceSMartin Matuska
255*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
256*8ac904ceSMartin Matuska if (!prealloc)
257*8ac904ceSMartin Matuska task_free(tq, t);
258*8ac904ceSMartin Matuska }
259*8ac904ceSMartin Matuska tq->tq_nthreads--;
260*8ac904ceSMartin Matuska cv_broadcast(&tq->tq_wait_cv);
261*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
262*8ac904ceSMartin Matuska thread_exit();
263*8ac904ceSMartin Matuska }
264*8ac904ceSMartin Matuska
265*8ac904ceSMartin Matuska taskq_t *
taskq_create(const char * name,int nthreads,pri_t pri,int minalloc,int maxalloc,uint_t flags)266*8ac904ceSMartin Matuska taskq_create(const char *name, int nthreads, pri_t pri,
267*8ac904ceSMartin Matuska int minalloc, int maxalloc, uint_t flags)
268*8ac904ceSMartin Matuska {
269*8ac904ceSMartin Matuska (void) pri;
270*8ac904ceSMartin Matuska taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP);
271*8ac904ceSMartin Matuska int t;
272*8ac904ceSMartin Matuska
273*8ac904ceSMartin Matuska if (flags & TASKQ_THREADS_CPU_PCT) {
274*8ac904ceSMartin Matuska int pct;
275*8ac904ceSMartin Matuska ASSERT3S(nthreads, >=, 0);
276*8ac904ceSMartin Matuska ASSERT3S(nthreads, <=, 100);
277*8ac904ceSMartin Matuska pct = MIN(nthreads, 100);
278*8ac904ceSMartin Matuska pct = MAX(pct, 0);
279*8ac904ceSMartin Matuska
280*8ac904ceSMartin Matuska nthreads = (sysconf(_SC_NPROCESSORS_ONLN) * pct) / 100;
281*8ac904ceSMartin Matuska nthreads = MAX(nthreads, 1); /* need at least 1 thread */
282*8ac904ceSMartin Matuska } else {
283*8ac904ceSMartin Matuska ASSERT3S(nthreads, >=, 1);
284*8ac904ceSMartin Matuska }
285*8ac904ceSMartin Matuska
286*8ac904ceSMartin Matuska rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
287*8ac904ceSMartin Matuska mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
288*8ac904ceSMartin Matuska cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
289*8ac904ceSMartin Matuska cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
290*8ac904ceSMartin Matuska cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
291*8ac904ceSMartin Matuska (void) strlcpy(tq->tq_name, name, sizeof (tq->tq_name));
292*8ac904ceSMartin Matuska tq->tq_flags = flags | TASKQ_ACTIVE;
293*8ac904ceSMartin Matuska tq->tq_active = nthreads;
294*8ac904ceSMartin Matuska tq->tq_nthreads = nthreads;
295*8ac904ceSMartin Matuska tq->tq_minalloc = minalloc;
296*8ac904ceSMartin Matuska tq->tq_maxalloc = maxalloc;
297*8ac904ceSMartin Matuska tq->tq_task.tqent_next = &tq->tq_task;
298*8ac904ceSMartin Matuska tq->tq_task.tqent_prev = &tq->tq_task;
299*8ac904ceSMartin Matuska tq->tq_threadlist = kmem_alloc(nthreads * sizeof (kthread_t *),
300*8ac904ceSMartin Matuska KM_SLEEP);
301*8ac904ceSMartin Matuska
302*8ac904ceSMartin Matuska if (flags & TASKQ_PREPOPULATE) {
303*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
304*8ac904ceSMartin Matuska while (minalloc-- > 0)
305*8ac904ceSMartin Matuska task_free(tq, task_alloc(tq, KM_SLEEP));
306*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
307*8ac904ceSMartin Matuska }
308*8ac904ceSMartin Matuska
309*8ac904ceSMartin Matuska for (t = 0; t < nthreads; t++)
310*8ac904ceSMartin Matuska VERIFY((tq->tq_threadlist[t] = thread_create_named(tq->tq_name,
311*8ac904ceSMartin Matuska NULL, 0, taskq_thread, tq, 0, &p0, TS_RUN, pri)) != NULL);
312*8ac904ceSMartin Matuska
313*8ac904ceSMartin Matuska return (tq);
314*8ac904ceSMartin Matuska }
315*8ac904ceSMartin Matuska
316*8ac904ceSMartin Matuska void
taskq_destroy(taskq_t * tq)317*8ac904ceSMartin Matuska taskq_destroy(taskq_t *tq)
318*8ac904ceSMartin Matuska {
319*8ac904ceSMartin Matuska int nthreads = tq->tq_nthreads;
320*8ac904ceSMartin Matuska
321*8ac904ceSMartin Matuska taskq_wait(tq);
322*8ac904ceSMartin Matuska
323*8ac904ceSMartin Matuska mutex_enter(&tq->tq_lock);
324*8ac904ceSMartin Matuska
325*8ac904ceSMartin Matuska tq->tq_flags &= ~TASKQ_ACTIVE;
326*8ac904ceSMartin Matuska cv_broadcast(&tq->tq_dispatch_cv);
327*8ac904ceSMartin Matuska
328*8ac904ceSMartin Matuska while (tq->tq_nthreads != 0)
329*8ac904ceSMartin Matuska cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
330*8ac904ceSMartin Matuska
331*8ac904ceSMartin Matuska tq->tq_minalloc = 0;
332*8ac904ceSMartin Matuska while (tq->tq_nalloc != 0) {
333*8ac904ceSMartin Matuska ASSERT(tq->tq_freelist != NULL);
334*8ac904ceSMartin Matuska taskq_ent_t *tqent_nexttq = tq->tq_freelist->tqent_next;
335*8ac904ceSMartin Matuska task_free(tq, tq->tq_freelist);
336*8ac904ceSMartin Matuska tq->tq_freelist = tqent_nexttq;
337*8ac904ceSMartin Matuska }
338*8ac904ceSMartin Matuska
339*8ac904ceSMartin Matuska mutex_exit(&tq->tq_lock);
340*8ac904ceSMartin Matuska
341*8ac904ceSMartin Matuska kmem_free(tq->tq_threadlist, nthreads * sizeof (kthread_t *));
342*8ac904ceSMartin Matuska
343*8ac904ceSMartin Matuska rw_destroy(&tq->tq_threadlock);
344*8ac904ceSMartin Matuska mutex_destroy(&tq->tq_lock);
345*8ac904ceSMartin Matuska cv_destroy(&tq->tq_dispatch_cv);
346*8ac904ceSMartin Matuska cv_destroy(&tq->tq_wait_cv);
347*8ac904ceSMartin Matuska cv_destroy(&tq->tq_maxalloc_cv);
348*8ac904ceSMartin Matuska
349*8ac904ceSMartin Matuska kmem_free(tq, sizeof (taskq_t));
350*8ac904ceSMartin Matuska }
351*8ac904ceSMartin Matuska
352*8ac904ceSMartin Matuska /*
353*8ac904ceSMartin Matuska * Create a taskq with a specified number of pool threads. Allocate
354*8ac904ceSMartin Matuska * and return an array of nthreads kthread_t pointers, one for each
355*8ac904ceSMartin Matuska * thread in the pool. The array is not ordered and must be freed
356*8ac904ceSMartin Matuska * by the caller.
357*8ac904ceSMartin Matuska */
358*8ac904ceSMartin Matuska taskq_t *
taskq_create_synced(const char * name,int nthreads,pri_t pri,int minalloc,int maxalloc,uint_t flags,kthread_t *** ktpp)359*8ac904ceSMartin Matuska taskq_create_synced(const char *name, int nthreads, pri_t pri,
360*8ac904ceSMartin Matuska int minalloc, int maxalloc, uint_t flags, kthread_t ***ktpp)
361*8ac904ceSMartin Matuska {
362*8ac904ceSMartin Matuska taskq_t *tq;
363*8ac904ceSMartin Matuska kthread_t **kthreads = kmem_zalloc(sizeof (*kthreads) * nthreads,
364*8ac904ceSMartin Matuska KM_SLEEP);
365*8ac904ceSMartin Matuska
366*8ac904ceSMartin Matuska (void) pri; (void) minalloc; (void) maxalloc;
367*8ac904ceSMartin Matuska
368*8ac904ceSMartin Matuska flags &= ~(TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT | TASKQ_DC_BATCH);
369*8ac904ceSMartin Matuska
370*8ac904ceSMartin Matuska tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX,
371*8ac904ceSMartin Matuska flags | TASKQ_PREPOPULATE);
372*8ac904ceSMartin Matuska VERIFY(tq != NULL);
373*8ac904ceSMartin Matuska VERIFY(tq->tq_nthreads == nthreads);
374*8ac904ceSMartin Matuska
375*8ac904ceSMartin Matuska for (int i = 0; i < nthreads; i++) {
376*8ac904ceSMartin Matuska kthreads[i] = tq->tq_threadlist[i];
377*8ac904ceSMartin Matuska }
378*8ac904ceSMartin Matuska *ktpp = kthreads;
379*8ac904ceSMartin Matuska return (tq);
380*8ac904ceSMartin Matuska }
381*8ac904ceSMartin Matuska
382*8ac904ceSMartin Matuska int
taskq_member(taskq_t * tq,kthread_t * t)383*8ac904ceSMartin Matuska taskq_member(taskq_t *tq, kthread_t *t)
384*8ac904ceSMartin Matuska {
385*8ac904ceSMartin Matuska int i;
386*8ac904ceSMartin Matuska
387*8ac904ceSMartin Matuska for (i = 0; i < tq->tq_nthreads; i++)
388*8ac904ceSMartin Matuska if (tq->tq_threadlist[i] == t)
389*8ac904ceSMartin Matuska return (1);
390*8ac904ceSMartin Matuska
391*8ac904ceSMartin Matuska return (0);
392*8ac904ceSMartin Matuska }
393*8ac904ceSMartin Matuska
394*8ac904ceSMartin Matuska taskq_t *
taskq_of_curthread(void)395*8ac904ceSMartin Matuska taskq_of_curthread(void)
396*8ac904ceSMartin Matuska {
397*8ac904ceSMartin Matuska return (pthread_getspecific(taskq_tsd));
398*8ac904ceSMartin Matuska }
399*8ac904ceSMartin Matuska
400*8ac904ceSMartin Matuska int
taskq_cancel_id(taskq_t * tq,taskqid_t id)401*8ac904ceSMartin Matuska taskq_cancel_id(taskq_t *tq, taskqid_t id)
402*8ac904ceSMartin Matuska {
403*8ac904ceSMartin Matuska (void) tq, (void) id;
404*8ac904ceSMartin Matuska return (ENOENT);
405*8ac904ceSMartin Matuska }
406*8ac904ceSMartin Matuska
407*8ac904ceSMartin Matuska void
system_taskq_init(void)408*8ac904ceSMartin Matuska system_taskq_init(void)
409*8ac904ceSMartin Matuska {
410*8ac904ceSMartin Matuska VERIFY0(pthread_key_create(&taskq_tsd, NULL));
411*8ac904ceSMartin Matuska __system_taskq = taskq_create("system_taskq", 64, maxclsyspri, 4, 512,
412*8ac904ceSMartin Matuska TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
413*8ac904ceSMartin Matuska __system_delay_taskq = taskq_create("delay_taskq", 4, maxclsyspri, 4,
414*8ac904ceSMartin Matuska 512, TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
415*8ac904ceSMartin Matuska }
416*8ac904ceSMartin Matuska
417*8ac904ceSMartin Matuska void
system_taskq_fini(void)418*8ac904ceSMartin Matuska system_taskq_fini(void)
419*8ac904ceSMartin Matuska {
420*8ac904ceSMartin Matuska taskq_destroy(__system_taskq);
421*8ac904ceSMartin Matuska __system_taskq = NULL; /* defensive */
422*8ac904ceSMartin Matuska taskq_destroy(__system_delay_taskq);
423*8ac904ceSMartin Matuska __system_delay_taskq = NULL;
424*8ac904ceSMartin Matuska VERIFY0(pthread_key_delete(taskq_tsd));
425*8ac904ceSMartin Matuska }
426