xref: /freebsd/sys/contrib/openzfs/module/os/freebsd/spl/spl_taskq.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2009 Pawel Jakub Dawidek <pjd@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Copyright (c) 2012 Spectra Logic Corporation.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/kmem.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/queue.h>
36 #include <sys/taskq.h>
37 #include <sys/taskqueue.h>
38 #include <sys/zfs_context.h>
39 
40 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
41 #include <machine/pcb.h>
42 #endif
43 
44 #include <vm/uma.h>
45 
46 static uint_t taskq_tsd;
47 static uma_zone_t taskq_zone;
48 
49 /*
50  * Global system-wide dynamic task queue available for all consumers. This
51  * taskq is not intended for long-running tasks; instead, a dedicated taskq
52  * should be created.
53  */
54 taskq_t *system_taskq = NULL;
55 taskq_t *system_delay_taskq = NULL;
56 taskq_t *dynamic_taskq = NULL;
57 
58 proc_t *system_proc;
59 
60 static MALLOC_DEFINE(M_TASKQ, "taskq", "taskq structures");
61 
62 static LIST_HEAD(tqenthashhead, taskq_ent) *tqenthashtbl;
63 static unsigned long tqenthash;
64 static unsigned long tqenthashlock;
65 static struct sx *tqenthashtbl_lock;
66 
67 static taskqid_t tqidnext;
68 
69 #define	TQIDHASH(tqid) (&tqenthashtbl[(tqid) & tqenthash])
70 #define	TQIDHASHLOCK(tqid) (&tqenthashtbl_lock[((tqid) & tqenthashlock)])
71 
72 #define	NORMAL_TASK 0
73 #define	TIMEOUT_TASK 1
74 
75 static void
system_taskq_init(void * arg)76 system_taskq_init(void *arg)
77 {
78 	int i;
79 
80 	tsd_create(&taskq_tsd, NULL);
81 	tqenthashtbl = hashinit(mp_ncpus * 8, M_TASKQ, &tqenthash);
82 	tqenthashlock = (tqenthash + 1) / 8;
83 	if (tqenthashlock > 0)
84 		tqenthashlock--;
85 	tqenthashtbl_lock =
86 	    malloc(sizeof (*tqenthashtbl_lock) * (tqenthashlock + 1),
87 	    M_TASKQ, M_WAITOK | M_ZERO);
88 	for (i = 0; i < tqenthashlock + 1; i++)
89 		sx_init_flags(&tqenthashtbl_lock[i], "tqenthash", SX_DUPOK);
90 	taskq_zone = uma_zcreate("taskq_zone", sizeof (taskq_ent_t),
91 	    NULL, NULL, NULL, NULL,
92 	    UMA_ALIGN_CACHE, 0);
93 	system_taskq = taskq_create("system_taskq", mp_ncpus, minclsyspri,
94 	    0, 0, 0);
95 	system_delay_taskq = taskq_create("system_delay_taskq", mp_ncpus,
96 	    minclsyspri, 0, 0, 0);
97 }
98 SYSINIT(system_taskq_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_init,
99     NULL);
100 
101 static void
system_taskq_fini(void * arg)102 system_taskq_fini(void *arg)
103 {
104 	int i;
105 
106 	taskq_destroy(system_delay_taskq);
107 	taskq_destroy(system_taskq);
108 	uma_zdestroy(taskq_zone);
109 	tsd_destroy(&taskq_tsd);
110 	for (i = 0; i < tqenthashlock + 1; i++)
111 		sx_destroy(&tqenthashtbl_lock[i]);
112 	for (i = 0; i < tqenthash + 1; i++)
113 		VERIFY(LIST_EMPTY(&tqenthashtbl[i]));
114 	free(tqenthashtbl_lock, M_TASKQ);
115 	free(tqenthashtbl, M_TASKQ);
116 }
117 SYSUNINIT(system_taskq_fini, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_fini,
118     NULL);
119 
120 #ifdef __LP64__
121 static taskqid_t
__taskq_genid(void)122 __taskq_genid(void)
123 {
124 	taskqid_t tqid;
125 
126 	/*
127 	 * Assume a 64-bit counter will not wrap in practice.
128 	 */
129 	tqid = atomic_add_64_nv(&tqidnext, 1);
130 	VERIFY(tqid);
131 	return (tqid);
132 }
133 #else
134 static taskqid_t
__taskq_genid(void)135 __taskq_genid(void)
136 {
137 	taskqid_t tqid;
138 
139 	for (;;) {
140 		tqid = atomic_add_32_nv(&tqidnext, 1);
141 		if (__predict_true(tqid != 0))
142 			break;
143 	}
144 	VERIFY(tqid);
145 	return (tqid);
146 }
147 #endif
148 
149 static taskq_ent_t *
taskq_lookup(taskqid_t tqid)150 taskq_lookup(taskqid_t tqid)
151 {
152 	taskq_ent_t *ent = NULL;
153 
154 	if (tqid == 0)
155 		return (NULL);
156 	sx_slock(TQIDHASHLOCK(tqid));
157 	LIST_FOREACH(ent, TQIDHASH(tqid), tqent_hash) {
158 		if (ent->tqent_id == tqid)
159 			break;
160 	}
161 	if (ent != NULL)
162 		refcount_acquire(&ent->tqent_rc);
163 	sx_sunlock(TQIDHASHLOCK(tqid));
164 	return (ent);
165 }
166 
167 static taskqid_t
taskq_insert(taskq_ent_t * ent)168 taskq_insert(taskq_ent_t *ent)
169 {
170 	taskqid_t tqid = __taskq_genid();
171 
172 	ent->tqent_id = tqid;
173 	sx_xlock(TQIDHASHLOCK(tqid));
174 	LIST_INSERT_HEAD(TQIDHASH(tqid), ent, tqent_hash);
175 	sx_xunlock(TQIDHASHLOCK(tqid));
176 	return (tqid);
177 }
178 
179 static void
taskq_remove(taskq_ent_t * ent)180 taskq_remove(taskq_ent_t *ent)
181 {
182 	taskqid_t tqid = ent->tqent_id;
183 
184 	if (tqid == 0)
185 		return;
186 	sx_xlock(TQIDHASHLOCK(tqid));
187 	if (ent->tqent_id != 0) {
188 		LIST_REMOVE(ent, tqent_hash);
189 		ent->tqent_id = 0;
190 	}
191 	sx_xunlock(TQIDHASHLOCK(tqid));
192 }
193 
194 static void
taskq_tsd_set(void * context)195 taskq_tsd_set(void *context)
196 {
197 	taskq_t *tq = context;
198 
199 #if defined(__amd64__) || defined(__aarch64__)
200 	if (context != NULL && tsd_get(taskq_tsd) == NULL)
201 		fpu_kern_thread(FPU_KERN_NORMAL);
202 #endif
203 	tsd_set(taskq_tsd, tq);
204 }
205 
206 static taskq_t *
taskq_create_impl(const char * name,int nthreads,pri_t pri,proc_t * proc __maybe_unused,uint_t flags)207 taskq_create_impl(const char *name, int nthreads, pri_t pri,
208     proc_t *proc __maybe_unused, uint_t flags)
209 {
210 	taskq_t *tq;
211 
212 	if ((flags & TASKQ_THREADS_CPU_PCT) != 0)
213 		nthreads = MAX((mp_ncpus * nthreads) / 100, 1);
214 
215 	tq = kmem_alloc(sizeof (*tq), KM_SLEEP);
216 	tq->tq_nthreads = nthreads;
217 	tq->tq_queue = taskqueue_create(name, M_WAITOK,
218 	    taskqueue_thread_enqueue, &tq->tq_queue);
219 	taskqueue_set_callback(tq->tq_queue, TASKQUEUE_CALLBACK_TYPE_INIT,
220 	    taskq_tsd_set, tq);
221 	taskqueue_set_callback(tq->tq_queue, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN,
222 	    taskq_tsd_set, NULL);
223 	(void) taskqueue_start_threads_in_proc(&tq->tq_queue, nthreads, pri,
224 	    proc, "%s", name);
225 
226 	return ((taskq_t *)tq);
227 }
228 
229 taskq_t *
taskq_create(const char * name,int nthreads,pri_t pri,int minalloc __unused,int maxalloc __unused,uint_t flags)230 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc __unused,
231     int maxalloc __unused, uint_t flags)
232 {
233 	return (taskq_create_impl(name, nthreads, pri, system_proc, flags));
234 }
235 
236 taskq_t *
taskq_create_proc(const char * name,int nthreads,pri_t pri,int minalloc __unused,int maxalloc __unused,proc_t * proc,uint_t flags)237 taskq_create_proc(const char *name, int nthreads, pri_t pri,
238     int minalloc __unused, int maxalloc __unused, proc_t *proc, uint_t flags)
239 {
240 	return (taskq_create_impl(name, nthreads, pri, proc, flags));
241 }
242 
243 void
taskq_destroy(taskq_t * tq)244 taskq_destroy(taskq_t *tq)
245 {
246 
247 	taskqueue_free(tq->tq_queue);
248 	kmem_free(tq, sizeof (*tq));
249 }
250 
251 static void taskq_sync_assign(void *arg);
252 
253 typedef struct taskq_sync_arg {
254 	kthread_t	*tqa_thread;
255 	kcondvar_t	tqa_cv;
256 	kmutex_t 	tqa_lock;
257 	int		tqa_ready;
258 } taskq_sync_arg_t;
259 
260 static void
taskq_sync_assign(void * arg)261 taskq_sync_assign(void *arg)
262 {
263 	taskq_sync_arg_t *tqa = arg;
264 
265 	mutex_enter(&tqa->tqa_lock);
266 	tqa->tqa_thread = curthread;
267 	tqa->tqa_ready = 1;
268 	cv_signal(&tqa->tqa_cv);
269 	while (tqa->tqa_ready == 1)
270 		cv_wait(&tqa->tqa_cv, &tqa->tqa_lock);
271 	mutex_exit(&tqa->tqa_lock);
272 }
273 
274 /*
275  * Create a taskq with a specified number of pool threads. Allocate
276  * and return an array of nthreads kthread_t pointers, one for each
277  * thread in the pool. The array is not ordered and must be freed
278  * by the caller.
279  */
280 taskq_t *
taskq_create_synced(const char * name,int nthreads,pri_t pri,int minalloc,int maxalloc,uint_t flags,kthread_t *** ktpp)281 taskq_create_synced(const char *name, int nthreads, pri_t pri,
282     int minalloc, int maxalloc, uint_t flags, kthread_t ***ktpp)
283 {
284 	taskq_t *tq;
285 	taskq_sync_arg_t *tqs = kmem_zalloc(sizeof (*tqs) * nthreads, KM_SLEEP);
286 	kthread_t **kthreads = kmem_zalloc(sizeof (*kthreads) * nthreads,
287 	    KM_SLEEP);
288 
289 	flags &= ~(TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT | TASKQ_DC_BATCH);
290 
291 	tq = taskq_create(name, nthreads, minclsyspri, nthreads, INT_MAX,
292 	    flags | TASKQ_PREPOPULATE);
293 	VERIFY(tq != NULL);
294 	VERIFY(tq->tq_nthreads == nthreads);
295 
296 	/* spawn all syncthreads */
297 	for (int i = 0; i < nthreads; i++) {
298 		cv_init(&tqs[i].tqa_cv, NULL, CV_DEFAULT, NULL);
299 		mutex_init(&tqs[i].tqa_lock, NULL, MUTEX_DEFAULT, NULL);
300 		(void) taskq_dispatch(tq, taskq_sync_assign,
301 		    &tqs[i], TQ_FRONT);
302 	}
303 
304 	/* wait on all syncthreads to start */
305 	for (int i = 0; i < nthreads; i++) {
306 		mutex_enter(&tqs[i].tqa_lock);
307 		while (tqs[i].tqa_ready == 0)
308 			cv_wait(&tqs[i].tqa_cv, &tqs[i].tqa_lock);
309 		mutex_exit(&tqs[i].tqa_lock);
310 	}
311 
312 	/* let all syncthreads resume, finish */
313 	for (int i = 0; i < nthreads; i++) {
314 		mutex_enter(&tqs[i].tqa_lock);
315 		tqs[i].tqa_ready = 2;
316 		cv_broadcast(&tqs[i].tqa_cv);
317 		mutex_exit(&tqs[i].tqa_lock);
318 	}
319 	taskq_wait(tq);
320 
321 	for (int i = 0; i < nthreads; i++) {
322 		kthreads[i] = tqs[i].tqa_thread;
323 		mutex_destroy(&tqs[i].tqa_lock);
324 		cv_destroy(&tqs[i].tqa_cv);
325 	}
326 	kmem_free(tqs, sizeof (*tqs) * nthreads);
327 
328 	*ktpp = kthreads;
329 	return (tq);
330 }
331 
332 int
taskq_member(taskq_t * tq,kthread_t * thread)333 taskq_member(taskq_t *tq, kthread_t *thread)
334 {
335 
336 	return (taskqueue_member(tq->tq_queue, thread));
337 }
338 
339 taskq_t *
taskq_of_curthread(void)340 taskq_of_curthread(void)
341 {
342 	return (tsd_get(taskq_tsd));
343 }
344 
345 static void
taskq_free(taskq_ent_t * task)346 taskq_free(taskq_ent_t *task)
347 {
348 	taskq_remove(task);
349 	if (refcount_release(&task->tqent_rc))
350 		uma_zfree(taskq_zone, task);
351 }
352 
353 int
taskq_cancel_id(taskq_t * tq,taskqid_t tid)354 taskq_cancel_id(taskq_t *tq, taskqid_t tid)
355 {
356 	uint32_t pend;
357 	int rc;
358 	taskq_ent_t *ent;
359 
360 	if ((ent = taskq_lookup(tid)) == NULL)
361 		return (ENOENT);
362 
363 	if (ent->tqent_type == NORMAL_TASK) {
364 		rc = taskqueue_cancel(tq->tq_queue, &ent->tqent_task, &pend);
365 		if (rc == EBUSY)
366 			taskqueue_drain(tq->tq_queue, &ent->tqent_task);
367 	} else {
368 		rc = taskqueue_cancel_timeout(tq->tq_queue,
369 		    &ent->tqent_timeout_task, &pend);
370 		if (rc == EBUSY) {
371 			taskqueue_drain_timeout(tq->tq_queue,
372 			    &ent->tqent_timeout_task);
373 		}
374 	}
375 	if (pend) {
376 		/*
377 		 * Tasks normally free themselves when run, but here the task
378 		 * was cancelled so it did not free itself.
379 		 */
380 		taskq_free(ent);
381 	}
382 	/* Free the extra reference we added with taskq_lookup. */
383 	taskq_free(ent);
384 	return (pend ? 0 : ENOENT);
385 }
386 
387 static void
taskq_run(void * arg,int pending)388 taskq_run(void *arg, int pending)
389 {
390 	taskq_ent_t *task = arg;
391 
392 	if (pending == 0)
393 		return;
394 	task->tqent_func(task->tqent_arg);
395 	taskq_free(task);
396 }
397 
398 taskqid_t
taskq_dispatch_delay(taskq_t * tq,task_func_t func,void * arg,uint_t flags,clock_t expire_time)399 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
400     uint_t flags, clock_t expire_time)
401 {
402 	taskq_ent_t *task;
403 	taskqid_t tqid;
404 	clock_t timo;
405 	int mflag;
406 
407 	timo = expire_time - ddi_get_lbolt();
408 	if (timo <= 0)
409 		return (taskq_dispatch(tq, func, arg, flags));
410 
411 	if ((flags & (TQ_SLEEP | TQ_NOQUEUE)) == TQ_SLEEP)
412 		mflag = M_WAITOK;
413 	else
414 		mflag = M_NOWAIT;
415 
416 	task = uma_zalloc(taskq_zone, mflag);
417 	if (task == NULL)
418 		return (0);
419 	task->tqent_func = func;
420 	task->tqent_arg = arg;
421 	task->tqent_type = TIMEOUT_TASK;
422 	refcount_init(&task->tqent_rc, 1);
423 	tqid = taskq_insert(task);
424 	TIMEOUT_TASK_INIT(tq->tq_queue, &task->tqent_timeout_task, 0,
425 	    taskq_run, task);
426 
427 	taskqueue_enqueue_timeout(tq->tq_queue, &task->tqent_timeout_task,
428 	    timo);
429 	return (tqid);
430 }
431 
432 taskqid_t
taskq_dispatch(taskq_t * tq,task_func_t func,void * arg,uint_t flags)433 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
434 {
435 	taskq_ent_t *task;
436 	int mflag, prio;
437 	taskqid_t tqid;
438 
439 	if ((flags & (TQ_SLEEP | TQ_NOQUEUE)) == TQ_SLEEP)
440 		mflag = M_WAITOK;
441 	else
442 		mflag = M_NOWAIT;
443 	/*
444 	 * If TQ_FRONT is given, we want higher priority for this task, so it
445 	 * can go at the front of the queue.
446 	 */
447 	prio = !!(flags & TQ_FRONT);
448 
449 	task = uma_zalloc(taskq_zone, mflag);
450 	if (task == NULL)
451 		return (0);
452 	refcount_init(&task->tqent_rc, 1);
453 	task->tqent_func = func;
454 	task->tqent_arg = arg;
455 	task->tqent_type = NORMAL_TASK;
456 	tqid = taskq_insert(task);
457 	TASK_INIT(&task->tqent_task, prio, taskq_run, task);
458 	taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
459 	return (tqid);
460 }
461 
462 static void
taskq_run_ent(void * arg,int pending)463 taskq_run_ent(void *arg, int pending)
464 {
465 	taskq_ent_t *task = arg;
466 
467 	if (pending == 0)
468 		return;
469 	task->tqent_func(task->tqent_arg);
470 }
471 
472 void
taskq_dispatch_ent(taskq_t * tq,task_func_t func,void * arg,uint32_t flags,taskq_ent_t * task)473 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint32_t flags,
474     taskq_ent_t *task)
475 {
476 	/*
477 	 * If TQ_FRONT is given, we want higher priority for this task, so it
478 	 * can go at the front of the queue.
479 	 */
480 	task->tqent_task.ta_priority = !!(flags & TQ_FRONT);
481 	task->tqent_func = func;
482 	task->tqent_arg = arg;
483 	taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
484 }
485 
486 void
taskq_init_ent(taskq_ent_t * task)487 taskq_init_ent(taskq_ent_t *task)
488 {
489 	TASK_INIT(&task->tqent_task, 0, taskq_run_ent, task);
490 	task->tqent_func = NULL;
491 	task->tqent_arg = NULL;
492 	task->tqent_id = 0;
493 	task->tqent_type = NORMAL_TASK;
494 	task->tqent_rc = 0;
495 }
496 
497 int
taskq_empty_ent(taskq_ent_t * task)498 taskq_empty_ent(taskq_ent_t *task)
499 {
500 	return (task->tqent_task.ta_pending == 0);
501 }
502 
503 void
taskq_wait(taskq_t * tq)504 taskq_wait(taskq_t *tq)
505 {
506 	taskqueue_quiesce(tq->tq_queue);
507 }
508 
509 void
taskq_wait_id(taskq_t * tq,taskqid_t tid)510 taskq_wait_id(taskq_t *tq, taskqid_t tid)
511 {
512 	taskq_ent_t *ent;
513 
514 	if ((ent = taskq_lookup(tid)) == NULL)
515 		return;
516 
517 	if (ent->tqent_type == NORMAL_TASK)
518 		taskqueue_drain(tq->tq_queue, &ent->tqent_task);
519 	else
520 		taskqueue_drain_timeout(tq->tq_queue, &ent->tqent_timeout_task);
521 	taskq_free(ent);
522 }
523 
524 void
taskq_wait_outstanding(taskq_t * tq,taskqid_t id __unused)525 taskq_wait_outstanding(taskq_t *tq, taskqid_t id __unused)
526 {
527 	taskqueue_drain_all(tq->tq_queue);
528 }
529