1 /*
2 * Copyright (C) 2004-2012 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 /* $Id$ */
19
20 /*! \file
21 * \author Principal Author: Bob Halley
22 */
23
24 /*
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
27 */
28
29 #include <config.h>
30
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
34 #include <isc/mem.h>
35 #include <isc/msgs.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
38 #include <isc/task.h>
39 #include <isc/thread.h>
40 #include <isc/util.h>
41 #include <isc/xml.h>
42
43 #ifdef OPENSSL_LEAKS
44 #include <openssl/err.h>
45 #endif
46
47 /*%
48 * For BIND9 internal applications:
49 * when built with threads we use multiple worker threads shared by the whole
50 * application.
51 * when built without threads we share a single global task manager and use
52 * an integrated event loop for socket, timer, and other generic task events.
53 * For generic library:
54 * we don't use either of them: an application can have multiple task managers
55 * whether or not it's threaded, and if the application is threaded each thread
56 * is expected to have a separate manager; no "worker threads" are shared by
57 * the application threads.
58 */
59 #ifdef BIND9
60 #ifdef ISC_PLATFORM_USETHREADS
61 #define USE_WORKER_THREADS
62 #else
63 #define USE_SHARED_MANAGER
64 #endif /* ISC_PLATFORM_USETHREADS */
65 #endif /* BIND9 */
66
67 #include "task_p.h"
68
69 #ifdef ISC_TASK_TRACE
70 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
71 task, isc_thread_self(), (m))
72 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
73 (t), isc_thread_self(), (m))
74 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
75 isc_thread_self(), (m))
76 #else
77 #define XTRACE(m)
78 #define XTTRACE(t, m)
79 #define XTHREADTRACE(m)
80 #endif
81
82 /***
83 *** Types.
84 ***/
85
86 typedef enum {
87 task_state_idle, task_state_ready, task_state_running,
88 task_state_done
89 } task_state_t;
90
91 #if defined(HAVE_LIBXML2) && defined(BIND9)
92 static const char *statenames[] = {
93 "idle", "ready", "running", "done",
94 };
95 #endif
96
97 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
98 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
99
100 typedef struct isc__task isc__task_t;
101 typedef struct isc__taskmgr isc__taskmgr_t;
102
103 struct isc__task {
104 /* Not locked. */
105 isc_task_t common;
106 isc__taskmgr_t * manager;
107 isc_mutex_t lock;
108 /* Locked by task lock. */
109 task_state_t state;
110 unsigned int references;
111 isc_eventlist_t events;
112 isc_eventlist_t on_shutdown;
113 unsigned int quantum;
114 unsigned int flags;
115 isc_stdtime_t now;
116 char name[16];
117 void * tag;
118 /* Locked by task manager lock. */
119 LINK(isc__task_t) link;
120 LINK(isc__task_t) ready_link;
121 LINK(isc__task_t) ready_priority_link;
122 };
123
124 #define TASK_F_SHUTTINGDOWN 0x01
125 #define TASK_F_PRIVILEGED 0x02
126
127 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
128 != 0)
129
130 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
131 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
132
133 typedef ISC_LIST(isc__task_t) isc__tasklist_t;
134
135 struct isc__taskmgr {
136 /* Not locked. */
137 isc_taskmgr_t common;
138 isc_mem_t * mctx;
139 isc_mutex_t lock;
140 #ifdef ISC_PLATFORM_USETHREADS
141 unsigned int workers;
142 isc_thread_t * threads;
143 #endif /* ISC_PLATFORM_USETHREADS */
144 /* Locked by task manager lock. */
145 unsigned int default_quantum;
146 LIST(isc__task_t) tasks;
147 isc__tasklist_t ready_tasks;
148 isc__tasklist_t ready_priority_tasks;
149 isc_taskmgrmode_t mode;
150 #ifdef ISC_PLATFORM_USETHREADS
151 isc_condition_t work_available;
152 isc_condition_t exclusive_granted;
153 isc_condition_t paused;
154 #endif /* ISC_PLATFORM_USETHREADS */
155 unsigned int tasks_running;
156 isc_boolean_t pause_requested;
157 isc_boolean_t exclusive_requested;
158 isc_boolean_t exiting;
159 #ifdef USE_SHARED_MANAGER
160 unsigned int refs;
161 #endif /* ISC_PLATFORM_USETHREADS */
162 };
163
164 #define DEFAULT_TASKMGR_QUANTUM 10
165 #define DEFAULT_DEFAULT_QUANTUM 5
166 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
167
168 #ifdef USE_SHARED_MANAGER
169 static isc__taskmgr_t *taskmgr = NULL;
170 #endif /* USE_SHARED_MANAGER */
171
172 /*%
173 * The following can be either static or public, depending on build environment.
174 */
175
176 #ifdef BIND9
177 #define ISC_TASKFUNC_SCOPE
178 #else
179 #define ISC_TASKFUNC_SCOPE static
180 #endif
181
182 ISC_TASKFUNC_SCOPE isc_result_t
183 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
184 isc_task_t **taskp);
185 ISC_TASKFUNC_SCOPE void
186 isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
187 ISC_TASKFUNC_SCOPE void
188 isc__task_detach(isc_task_t **taskp);
189 ISC_TASKFUNC_SCOPE void
190 isc__task_send(isc_task_t *task0, isc_event_t **eventp);
191 ISC_TASKFUNC_SCOPE void
192 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
193 ISC_TASKFUNC_SCOPE unsigned int
194 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
195 isc_eventtype_t last, void *tag);
196 ISC_TASKFUNC_SCOPE unsigned int
197 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
198 void *tag);
199 ISC_TASKFUNC_SCOPE isc_boolean_t
200 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
201 ISC_TASKFUNC_SCOPE unsigned int
202 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
203 isc_eventtype_t last, void *tag,
204 isc_eventlist_t *events);
205 ISC_TASKFUNC_SCOPE unsigned int
206 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
207 void *tag, isc_eventlist_t *events);
208 ISC_TASKFUNC_SCOPE isc_result_t
209 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
210 const void *arg);
211 ISC_TASKFUNC_SCOPE void
212 isc__task_shutdown(isc_task_t *task0);
213 ISC_TASKFUNC_SCOPE void
214 isc__task_destroy(isc_task_t **taskp);
215 ISC_TASKFUNC_SCOPE void
216 isc__task_setname(isc_task_t *task0, const char *name, void *tag);
217 ISC_TASKFUNC_SCOPE const char *
218 isc__task_getname(isc_task_t *task0);
219 ISC_TASKFUNC_SCOPE void *
220 isc__task_gettag(isc_task_t *task0);
221 ISC_TASKFUNC_SCOPE void
222 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
223 ISC_TASKFUNC_SCOPE isc_result_t
224 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
225 unsigned int default_quantum, isc_taskmgr_t **managerp);
226 ISC_TASKFUNC_SCOPE void
227 isc__taskmgr_destroy(isc_taskmgr_t **managerp);
228 ISC_TASKFUNC_SCOPE isc_result_t
229 isc__task_beginexclusive(isc_task_t *task);
230 ISC_TASKFUNC_SCOPE void
231 isc__task_endexclusive(isc_task_t *task0);
232 ISC_TASKFUNC_SCOPE void
233 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv);
234 ISC_TASKFUNC_SCOPE isc_boolean_t
235 isc__task_privilege(isc_task_t *task0);
236 ISC_TASKFUNC_SCOPE void
237 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode);
238 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
239 isc__taskmgr_mode(isc_taskmgr_t *manager0);
240
241 static inline isc_boolean_t
242 empty_readyq(isc__taskmgr_t *manager);
243
244 static inline isc__task_t *
245 pop_readyq(isc__taskmgr_t *manager);
246
247 static inline void
248 push_readyq(isc__taskmgr_t *manager, isc__task_t *task);
249
250 static struct isc__taskmethods {
251 isc_taskmethods_t methods;
252
253 /*%
254 * The following are defined just for avoiding unused static functions.
255 */
256 #ifndef BIND9
257 void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
258 #endif
259 } taskmethods = {
260 {
261 isc__task_attach,
262 isc__task_detach,
263 isc__task_destroy,
264 isc__task_send,
265 isc__task_sendanddetach,
266 isc__task_unsend,
267 isc__task_onshutdown,
268 isc__task_shutdown,
269 isc__task_setname,
270 isc__task_purge,
271 isc__task_purgerange,
272 isc__task_beginexclusive,
273 isc__task_endexclusive,
274 isc__task_setprivilege,
275 isc__task_privilege
276 }
277 #ifndef BIND9
278 ,
279 (void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
280 (void *)isc__task_getname, (void *)isc__task_gettag,
281 (void *)isc__task_getcurrenttime
282 #endif
283 };
284
285 static isc_taskmgrmethods_t taskmgrmethods = {
286 isc__taskmgr_destroy,
287 isc__taskmgr_setmode,
288 isc__taskmgr_mode,
289 isc__task_create
290 };
291
292 /***
293 *** Tasks.
294 ***/
295
296 static void
task_finished(isc__task_t * task)297 task_finished(isc__task_t *task) {
298 isc__taskmgr_t *manager = task->manager;
299
300 REQUIRE(EMPTY(task->events));
301 REQUIRE(EMPTY(task->on_shutdown));
302 REQUIRE(task->references == 0);
303 REQUIRE(task->state == task_state_done);
304
305 XTRACE("task_finished");
306
307 LOCK(&manager->lock);
308 UNLINK(manager->tasks, task, link);
309 #ifdef USE_WORKER_THREADS
310 if (FINISHED(manager)) {
311 /*
312 * All tasks have completed and the
313 * task manager is exiting. Wake up
314 * any idle worker threads so they
315 * can exit.
316 */
317 BROADCAST(&manager->work_available);
318 }
319 #endif /* USE_WORKER_THREADS */
320 UNLOCK(&manager->lock);
321
322 DESTROYLOCK(&task->lock);
323 task->common.impmagic = 0;
324 task->common.magic = 0;
325 isc_mem_put(manager->mctx, task, sizeof(*task));
326 }
327
328 ISC_TASKFUNC_SCOPE isc_result_t
isc__task_create(isc_taskmgr_t * manager0,unsigned int quantum,isc_task_t ** taskp)329 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
330 isc_task_t **taskp)
331 {
332 isc__taskmgr_t *manager = (void*)manager0;
333 isc__task_t *task;
334 isc_boolean_t exiting;
335 isc_result_t result;
336
337 REQUIRE(VALID_MANAGER(manager));
338 REQUIRE(taskp != NULL && *taskp == NULL);
339
340 task = isc_mem_get(manager->mctx, sizeof(*task));
341 if (task == NULL)
342 return (ISC_R_NOMEMORY);
343 XTRACE("isc_task_create");
344 result = isc_mutex_init(&task->lock);
345 if (result != ISC_R_SUCCESS) {
346 isc_mem_put(manager->mctx, task, sizeof(*task));
347 return (result);
348 }
349 LOCK(&manager->lock);
350 LOCK(&task->lock); /* helps coverity analysis noise ratio */
351 task->manager = manager;
352 task->state = task_state_idle;
353 task->references = 1;
354 INIT_LIST(task->events);
355 INIT_LIST(task->on_shutdown);
356 task->quantum = quantum;
357 task->flags = 0;
358 task->now = 0;
359 memset(task->name, 0, sizeof(task->name));
360 task->tag = NULL;
361 INIT_LINK(task, link);
362 INIT_LINK(task, ready_link);
363 INIT_LINK(task, ready_priority_link);
364 UNLOCK(&task->lock);
365 UNLOCK(&manager->lock);
366
367 exiting = ISC_FALSE;
368 LOCK(&manager->lock);
369 if (!manager->exiting) {
370 if (task->quantum == 0)
371 task->quantum = manager->default_quantum;
372 APPEND(manager->tasks, task, link);
373 } else
374 exiting = ISC_TRUE;
375 UNLOCK(&manager->lock);
376
377 if (exiting) {
378 DESTROYLOCK(&task->lock);
379 isc_mem_put(manager->mctx, task, sizeof(*task));
380 return (ISC_R_SHUTTINGDOWN);
381 }
382
383 task->common.methods = (isc_taskmethods_t *)&taskmethods;
384 task->common.magic = ISCAPI_TASK_MAGIC;
385 task->common.impmagic = TASK_MAGIC;
386 *taskp = (isc_task_t *)task;
387
388 return (ISC_R_SUCCESS);
389 }
390
391 ISC_TASKFUNC_SCOPE void
isc__task_attach(isc_task_t * source0,isc_task_t ** targetp)392 isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
393 isc__task_t *source = (isc__task_t *)source0;
394
395 /*
396 * Attach *targetp to source.
397 */
398
399 REQUIRE(VALID_TASK(source));
400 REQUIRE(targetp != NULL && *targetp == NULL);
401
402 XTTRACE(source, "isc_task_attach");
403
404 LOCK(&source->lock);
405 source->references++;
406 UNLOCK(&source->lock);
407
408 *targetp = (isc_task_t *)source;
409 }
410
411 static inline isc_boolean_t
task_shutdown(isc__task_t * task)412 task_shutdown(isc__task_t *task) {
413 isc_boolean_t was_idle = ISC_FALSE;
414 isc_event_t *event, *prev;
415
416 /*
417 * Caller must be holding the task's lock.
418 */
419
420 XTRACE("task_shutdown");
421
422 if (! TASK_SHUTTINGDOWN(task)) {
423 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
424 ISC_MSG_SHUTTINGDOWN, "shutting down"));
425 task->flags |= TASK_F_SHUTTINGDOWN;
426 if (task->state == task_state_idle) {
427 INSIST(EMPTY(task->events));
428 task->state = task_state_ready;
429 was_idle = ISC_TRUE;
430 }
431 INSIST(task->state == task_state_ready ||
432 task->state == task_state_running);
433
434 /*
435 * Note that we post shutdown events LIFO.
436 */
437 for (event = TAIL(task->on_shutdown);
438 event != NULL;
439 event = prev) {
440 prev = PREV(event, ev_link);
441 DEQUEUE(task->on_shutdown, event, ev_link);
442 ENQUEUE(task->events, event, ev_link);
443 }
444 }
445
446 return (was_idle);
447 }
448
449 /*
450 * Moves a task onto the appropriate run queue.
451 *
452 * Caller must NOT hold manager lock.
453 */
454 static inline void
task_ready(isc__task_t * task)455 task_ready(isc__task_t *task) {
456 isc__taskmgr_t *manager = task->manager;
457 #ifdef USE_WORKER_THREADS
458 isc_boolean_t has_privilege = isc__task_privilege((isc_task_t *) task);
459 #endif /* USE_WORKER_THREADS */
460
461 REQUIRE(VALID_MANAGER(manager));
462 REQUIRE(task->state == task_state_ready);
463
464 XTRACE("task_ready");
465
466 LOCK(&manager->lock);
467 push_readyq(manager, task);
468 #ifdef USE_WORKER_THREADS
469 if (manager->mode == isc_taskmgrmode_normal || has_privilege)
470 SIGNAL(&manager->work_available);
471 #endif /* USE_WORKER_THREADS */
472 UNLOCK(&manager->lock);
473 }
474
475 static inline isc_boolean_t
task_detach(isc__task_t * task)476 task_detach(isc__task_t *task) {
477
478 /*
479 * Caller must be holding the task lock.
480 */
481
482 REQUIRE(task->references > 0);
483
484 XTRACE("detach");
485
486 task->references--;
487 if (task->references == 0 && task->state == task_state_idle) {
488 INSIST(EMPTY(task->events));
489 /*
490 * There are no references to this task, and no
491 * pending events. We could try to optimize and
492 * either initiate shutdown or clean up the task,
493 * depending on its state, but it's easier to just
494 * make the task ready and allow run() or the event
495 * loop to deal with shutting down and termination.
496 */
497 task->state = task_state_ready;
498 return (ISC_TRUE);
499 }
500
501 return (ISC_FALSE);
502 }
503
504 ISC_TASKFUNC_SCOPE void
isc__task_detach(isc_task_t ** taskp)505 isc__task_detach(isc_task_t **taskp) {
506 isc__task_t *task;
507 isc_boolean_t was_idle;
508
509 /*
510 * Detach *taskp from its task.
511 */
512
513 REQUIRE(taskp != NULL);
514 task = (isc__task_t *)*taskp;
515 REQUIRE(VALID_TASK(task));
516
517 XTRACE("isc_task_detach");
518
519 LOCK(&task->lock);
520 was_idle = task_detach(task);
521 UNLOCK(&task->lock);
522
523 if (was_idle)
524 task_ready(task);
525
526 *taskp = NULL;
527 }
528
529 static inline isc_boolean_t
task_send(isc__task_t * task,isc_event_t ** eventp)530 task_send(isc__task_t *task, isc_event_t **eventp) {
531 isc_boolean_t was_idle = ISC_FALSE;
532 isc_event_t *event;
533
534 /*
535 * Caller must be holding the task lock.
536 */
537
538 REQUIRE(eventp != NULL);
539 event = *eventp;
540 REQUIRE(event != NULL);
541 REQUIRE(event->ev_type > 0);
542 REQUIRE(task->state != task_state_done);
543
544 XTRACE("task_send");
545
546 if (task->state == task_state_idle) {
547 was_idle = ISC_TRUE;
548 INSIST(EMPTY(task->events));
549 task->state = task_state_ready;
550 }
551 INSIST(task->state == task_state_ready ||
552 task->state == task_state_running);
553 ENQUEUE(task->events, event, ev_link);
554 *eventp = NULL;
555
556 return (was_idle);
557 }
558
559 ISC_TASKFUNC_SCOPE void
isc__task_send(isc_task_t * task0,isc_event_t ** eventp)560 isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
561 isc__task_t *task = (isc__task_t *)task0;
562 isc_boolean_t was_idle;
563
564 /*
565 * Send '*event' to 'task'.
566 */
567
568 REQUIRE(VALID_TASK(task));
569
570 XTRACE("isc_task_send");
571
572 /*
573 * We're trying hard to hold locks for as short a time as possible.
574 * We're also trying to hold as few locks as possible. This is why
575 * some processing is deferred until after the lock is released.
576 */
577 LOCK(&task->lock);
578 was_idle = task_send(task, eventp);
579 UNLOCK(&task->lock);
580
581 if (was_idle) {
582 /*
583 * We need to add this task to the ready queue.
584 *
585 * We've waited until now to do it because making a task
586 * ready requires locking the manager. If we tried to do
587 * this while holding the task lock, we could deadlock.
588 *
589 * We've changed the state to ready, so no one else will
590 * be trying to add this task to the ready queue. The
591 * only way to leave the ready state is by executing the
592 * task. It thus doesn't matter if events are added,
593 * removed, or a shutdown is started in the interval
594 * between the time we released the task lock, and the time
595 * we add the task to the ready queue.
596 */
597 task_ready(task);
598 }
599 }
600
601 ISC_TASKFUNC_SCOPE void
isc__task_sendanddetach(isc_task_t ** taskp,isc_event_t ** eventp)602 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
603 isc_boolean_t idle1, idle2;
604 isc__task_t *task;
605
606 /*
607 * Send '*event' to '*taskp' and then detach '*taskp' from its
608 * task.
609 */
610
611 REQUIRE(taskp != NULL);
612 task = (isc__task_t *)*taskp;
613 REQUIRE(VALID_TASK(task));
614
615 XTRACE("isc_task_sendanddetach");
616
617 LOCK(&task->lock);
618 idle1 = task_send(task, eventp);
619 idle2 = task_detach(task);
620 UNLOCK(&task->lock);
621
622 /*
623 * If idle1, then idle2 shouldn't be true as well since we're holding
624 * the task lock, and thus the task cannot switch from ready back to
625 * idle.
626 */
627 INSIST(!(idle1 && idle2));
628
629 if (idle1 || idle2)
630 task_ready(task);
631
632 *taskp = NULL;
633 }
634
635 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
636
637 static unsigned int
dequeue_events(isc__task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag,isc_eventlist_t * events,isc_boolean_t purging)638 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
639 isc_eventtype_t last, void *tag,
640 isc_eventlist_t *events, isc_boolean_t purging)
641 {
642 isc_event_t *event, *next_event;
643 unsigned int count = 0;
644
645 REQUIRE(VALID_TASK(task));
646 REQUIRE(last >= first);
647
648 XTRACE("dequeue_events");
649
650 /*
651 * Events matching 'sender', whose type is >= first and <= last, and
652 * whose tag is 'tag' will be dequeued. If 'purging', matching events
653 * which are marked as unpurgable will not be dequeued.
654 *
655 * sender == NULL means "any sender", and tag == NULL means "any tag".
656 */
657
658 LOCK(&task->lock);
659
660 for (event = HEAD(task->events); event != NULL; event = next_event) {
661 next_event = NEXT(event, ev_link);
662 if (event->ev_type >= first && event->ev_type <= last &&
663 (sender == NULL || event->ev_sender == sender) &&
664 (tag == NULL || event->ev_tag == tag) &&
665 (!purging || PURGE_OK(event))) {
666 DEQUEUE(task->events, event, ev_link);
667 ENQUEUE(*events, event, ev_link);
668 count++;
669 }
670 }
671
672 UNLOCK(&task->lock);
673
674 return (count);
675 }
676
677 ISC_TASKFUNC_SCOPE unsigned int
isc__task_purgerange(isc_task_t * task0,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag)678 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
679 isc_eventtype_t last, void *tag)
680 {
681 isc__task_t *task = (isc__task_t *)task0;
682 unsigned int count;
683 isc_eventlist_t events;
684 isc_event_t *event, *next_event;
685
686 /*
687 * Purge events from a task's event queue.
688 */
689
690 XTRACE("isc_task_purgerange");
691
692 ISC_LIST_INIT(events);
693
694 count = dequeue_events(task, sender, first, last, tag, &events,
695 ISC_TRUE);
696
697 for (event = HEAD(events); event != NULL; event = next_event) {
698 next_event = NEXT(event, ev_link);
699 isc_event_free(&event);
700 }
701
702 /*
703 * Note that purging never changes the state of the task.
704 */
705
706 return (count);
707 }
708
709 ISC_TASKFUNC_SCOPE unsigned int
isc__task_purge(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag)710 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
711 void *tag)
712 {
713 /*
714 * Purge events from a task's event queue.
715 */
716
717 XTRACE("isc_task_purge");
718
719 return (isc__task_purgerange(task, sender, type, type, tag));
720 }
721
722 ISC_TASKFUNC_SCOPE isc_boolean_t
isc__task_purgeevent(isc_task_t * task0,isc_event_t * event)723 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
724 isc__task_t *task = (isc__task_t *)task0;
725 isc_event_t *curr_event, *next_event;
726
727 /*
728 * Purge 'event' from a task's event queue.
729 *
730 * XXXRTH: WARNING: This method may be removed before beta.
731 */
732
733 REQUIRE(VALID_TASK(task));
734
735 /*
736 * If 'event' is on the task's event queue, it will be purged,
737 * unless it is marked as unpurgeable. 'event' does not have to be
738 * on the task's event queue; in fact, it can even be an invalid
739 * pointer. Purging only occurs if the event is actually on the task's
740 * event queue.
741 *
742 * Purging never changes the state of the task.
743 */
744
745 LOCK(&task->lock);
746 for (curr_event = HEAD(task->events);
747 curr_event != NULL;
748 curr_event = next_event) {
749 next_event = NEXT(curr_event, ev_link);
750 if (curr_event == event && PURGE_OK(event)) {
751 DEQUEUE(task->events, curr_event, ev_link);
752 break;
753 }
754 }
755 UNLOCK(&task->lock);
756
757 if (curr_event == NULL)
758 return (ISC_FALSE);
759
760 isc_event_free(&curr_event);
761
762 return (ISC_TRUE);
763 }
764
765 ISC_TASKFUNC_SCOPE unsigned int
isc__task_unsendrange(isc_task_t * task,void * sender,isc_eventtype_t first,isc_eventtype_t last,void * tag,isc_eventlist_t * events)766 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
767 isc_eventtype_t last, void *tag,
768 isc_eventlist_t *events)
769 {
770 /*
771 * Remove events from a task's event queue.
772 */
773
774 XTRACE("isc_task_unsendrange");
775
776 return (dequeue_events((isc__task_t *)task, sender, first,
777 last, tag, events, ISC_FALSE));
778 }
779
780 ISC_TASKFUNC_SCOPE unsigned int
isc__task_unsend(isc_task_t * task,void * sender,isc_eventtype_t type,void * tag,isc_eventlist_t * events)781 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
782 void *tag, isc_eventlist_t *events)
783 {
784 /*
785 * Remove events from a task's event queue.
786 */
787
788 XTRACE("isc_task_unsend");
789
790 return (dequeue_events((isc__task_t *)task, sender, type,
791 type, tag, events, ISC_FALSE));
792 }
793
794 ISC_TASKFUNC_SCOPE isc_result_t
isc__task_onshutdown(isc_task_t * task0,isc_taskaction_t action,const void * arg)795 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
796 const void *arg)
797 {
798 isc__task_t *task = (isc__task_t *)task0;
799 isc_boolean_t disallowed = ISC_FALSE;
800 isc_result_t result = ISC_R_SUCCESS;
801 isc_event_t *event;
802
803 /*
804 * Send a shutdown event with action 'action' and argument 'arg' when
805 * 'task' is shutdown.
806 */
807
808 REQUIRE(VALID_TASK(task));
809 REQUIRE(action != NULL);
810
811 event = isc_event_allocate(task->manager->mctx,
812 NULL,
813 ISC_TASKEVENT_SHUTDOWN,
814 action,
815 arg,
816 sizeof(*event));
817 if (event == NULL)
818 return (ISC_R_NOMEMORY);
819
820 LOCK(&task->lock);
821 if (TASK_SHUTTINGDOWN(task)) {
822 disallowed = ISC_TRUE;
823 result = ISC_R_SHUTTINGDOWN;
824 } else
825 ENQUEUE(task->on_shutdown, event, ev_link);
826 UNLOCK(&task->lock);
827
828 if (disallowed)
829 isc_mem_put(task->manager->mctx, event, sizeof(*event));
830
831 return (result);
832 }
833
834 ISC_TASKFUNC_SCOPE void
isc__task_shutdown(isc_task_t * task0)835 isc__task_shutdown(isc_task_t *task0) {
836 isc__task_t *task = (isc__task_t *)task0;
837 isc_boolean_t was_idle;
838
839 /*
840 * Shutdown 'task'.
841 */
842
843 REQUIRE(VALID_TASK(task));
844
845 LOCK(&task->lock);
846 was_idle = task_shutdown(task);
847 UNLOCK(&task->lock);
848
849 if (was_idle)
850 task_ready(task);
851 }
852
853 ISC_TASKFUNC_SCOPE void
isc__task_destroy(isc_task_t ** taskp)854 isc__task_destroy(isc_task_t **taskp) {
855
856 /*
857 * Destroy '*taskp'.
858 */
859
860 REQUIRE(taskp != NULL);
861
862 isc_task_shutdown(*taskp);
863 isc_task_detach(taskp);
864 }
865
866 ISC_TASKFUNC_SCOPE void
isc__task_setname(isc_task_t * task0,const char * name,void * tag)867 isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
868 isc__task_t *task = (isc__task_t *)task0;
869
870 /*
871 * Name 'task'.
872 */
873
874 REQUIRE(VALID_TASK(task));
875
876 LOCK(&task->lock);
877 memset(task->name, 0, sizeof(task->name));
878 strncpy(task->name, name, sizeof(task->name) - 1);
879 task->tag = tag;
880 UNLOCK(&task->lock);
881 }
882
883 ISC_TASKFUNC_SCOPE const char *
isc__task_getname(isc_task_t * task0)884 isc__task_getname(isc_task_t *task0) {
885 isc__task_t *task = (isc__task_t *)task0;
886
887 REQUIRE(VALID_TASK(task));
888
889 return (task->name);
890 }
891
892 ISC_TASKFUNC_SCOPE void *
isc__task_gettag(isc_task_t * task0)893 isc__task_gettag(isc_task_t *task0) {
894 isc__task_t *task = (isc__task_t *)task0;
895
896 REQUIRE(VALID_TASK(task));
897
898 return (task->tag);
899 }
900
901 ISC_TASKFUNC_SCOPE void
isc__task_getcurrenttime(isc_task_t * task0,isc_stdtime_t * t)902 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
903 isc__task_t *task = (isc__task_t *)task0;
904
905 REQUIRE(VALID_TASK(task));
906 REQUIRE(t != NULL);
907
908 LOCK(&task->lock);
909 *t = task->now;
910 UNLOCK(&task->lock);
911 }
912
913 /***
914 *** Task Manager.
915 ***/
916
917 /*
918 * Return ISC_TRUE if the current ready list for the manager, which is
919 * either ready_tasks or the ready_priority_tasks, depending on whether
920 * the manager is currently in normal or privileged execution mode.
921 *
922 * Caller must hold the task manager lock.
923 */
924 static inline isc_boolean_t
empty_readyq(isc__taskmgr_t * manager)925 empty_readyq(isc__taskmgr_t *manager) {
926 isc__tasklist_t queue;
927
928 if (manager->mode == isc_taskmgrmode_normal)
929 queue = manager->ready_tasks;
930 else
931 queue = manager->ready_priority_tasks;
932
933 return (ISC_TF(EMPTY(queue)));
934 }
935
936 /*
937 * Dequeue and return a pointer to the first task on the current ready
938 * list for the manager.
939 * If the task is privileged, dequeue it from the other ready list
940 * as well.
941 *
942 * Caller must hold the task manager lock.
943 */
944 static inline isc__task_t *
pop_readyq(isc__taskmgr_t * manager)945 pop_readyq(isc__taskmgr_t *manager) {
946 isc__task_t *task;
947
948 if (manager->mode == isc_taskmgrmode_normal)
949 task = HEAD(manager->ready_tasks);
950 else
951 task = HEAD(manager->ready_priority_tasks);
952
953 if (task != NULL) {
954 DEQUEUE(manager->ready_tasks, task, ready_link);
955 if (ISC_LINK_LINKED(task, ready_priority_link))
956 DEQUEUE(manager->ready_priority_tasks, task,
957 ready_priority_link);
958 }
959
960 return (task);
961 }
962
963 /*
964 * Push 'task' onto the ready_tasks queue. If 'task' has the privilege
965 * flag set, then also push it onto the ready_priority_tasks queue.
966 *
967 * Caller must hold the task manager lock.
968 */
969 static inline void
push_readyq(isc__taskmgr_t * manager,isc__task_t * task)970 push_readyq(isc__taskmgr_t *manager, isc__task_t *task) {
971 ENQUEUE(manager->ready_tasks, task, ready_link);
972 if ((task->flags & TASK_F_PRIVILEGED) != 0)
973 ENQUEUE(manager->ready_priority_tasks, task,
974 ready_priority_link);
975 }
976
977 static void
dispatch(isc__taskmgr_t * manager)978 dispatch(isc__taskmgr_t *manager) {
979 isc__task_t *task;
980 #ifndef USE_WORKER_THREADS
981 unsigned int total_dispatch_count = 0;
982 isc__tasklist_t new_ready_tasks;
983 isc__tasklist_t new_priority_tasks;
984 #endif /* USE_WORKER_THREADS */
985
986 REQUIRE(VALID_MANAGER(manager));
987
988 /*
989 * Again we're trying to hold the lock for as short a time as possible
990 * and to do as little locking and unlocking as possible.
991 *
992 * In both while loops, the appropriate lock must be held before the
993 * while body starts. Code which acquired the lock at the top of
994 * the loop would be more readable, but would result in a lot of
995 * extra locking. Compare:
996 *
997 * Straightforward:
998 *
999 * LOCK();
1000 * ...
1001 * UNLOCK();
1002 * while (expression) {
1003 * LOCK();
1004 * ...
1005 * UNLOCK();
1006 *
1007 * Unlocked part here...
1008 *
1009 * LOCK();
1010 * ...
1011 * UNLOCK();
1012 * }
1013 *
1014 * Note how if the loop continues we unlock and then immediately lock.
1015 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
1016 * unlocks. Also note that the lock is not held when the while
1017 * condition is tested, which may or may not be important, depending
1018 * on the expression.
1019 *
1020 * As written:
1021 *
1022 * LOCK();
1023 * while (expression) {
1024 * ...
1025 * UNLOCK();
1026 *
1027 * Unlocked part here...
1028 *
1029 * LOCK();
1030 * ...
1031 * }
1032 * UNLOCK();
1033 *
1034 * For N iterations of the loop, this code does N+1 locks and N+1
1035 * unlocks. The while expression is always protected by the lock.
1036 */
1037
1038 #ifndef USE_WORKER_THREADS
1039 ISC_LIST_INIT(new_ready_tasks);
1040 ISC_LIST_INIT(new_priority_tasks);
1041 #endif
1042 LOCK(&manager->lock);
1043
1044 while (!FINISHED(manager)) {
1045 #ifdef USE_WORKER_THREADS
1046 /*
1047 * For reasons similar to those given in the comment in
1048 * isc_task_send() above, it is safe for us to dequeue
1049 * the task while only holding the manager lock, and then
1050 * change the task to running state while only holding the
1051 * task lock.
1052 *
1053 * If a pause has been requested, don't do any work
1054 * until it's been released.
1055 */
1056 while ((empty_readyq(manager) || manager->pause_requested ||
1057 manager->exclusive_requested) && !FINISHED(manager))
1058 {
1059 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1060 ISC_MSGSET_GENERAL,
1061 ISC_MSG_WAIT, "wait"));
1062 WAIT(&manager->work_available, &manager->lock);
1063 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1064 ISC_MSGSET_TASK,
1065 ISC_MSG_AWAKE, "awake"));
1066 }
1067 #else /* USE_WORKER_THREADS */
1068 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
1069 empty_readyq(manager))
1070 break;
1071 #endif /* USE_WORKER_THREADS */
1072 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
1073 ISC_MSG_WORKING, "working"));
1074
1075 task = pop_readyq(manager);
1076 if (task != NULL) {
1077 unsigned int dispatch_count = 0;
1078 isc_boolean_t done = ISC_FALSE;
1079 isc_boolean_t requeue = ISC_FALSE;
1080 isc_boolean_t finished = ISC_FALSE;
1081 isc_event_t *event;
1082
1083 INSIST(VALID_TASK(task));
1084
1085 /*
1086 * Note we only unlock the manager lock if we actually
1087 * have a task to do. We must reacquire the manager
1088 * lock before exiting the 'if (task != NULL)' block.
1089 */
1090 manager->tasks_running++;
1091 UNLOCK(&manager->lock);
1092
1093 LOCK(&task->lock);
1094 INSIST(task->state == task_state_ready);
1095 task->state = task_state_running;
1096 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1097 ISC_MSG_RUNNING, "running"));
1098 isc_stdtime_get(&task->now);
1099 do {
1100 if (!EMPTY(task->events)) {
1101 event = HEAD(task->events);
1102 DEQUEUE(task->events, event, ev_link);
1103
1104 /*
1105 * Execute the event action.
1106 */
1107 XTRACE(isc_msgcat_get(isc_msgcat,
1108 ISC_MSGSET_TASK,
1109 ISC_MSG_EXECUTE,
1110 "execute action"));
1111 if (event->ev_action != NULL) {
1112 UNLOCK(&task->lock);
1113 (event->ev_action)(
1114 (isc_task_t *)task,
1115 event);
1116 LOCK(&task->lock);
1117 }
1118 dispatch_count++;
1119 #ifndef USE_WORKER_THREADS
1120 total_dispatch_count++;
1121 #endif /* USE_WORKER_THREADS */
1122 }
1123
1124 if (task->references == 0 &&
1125 EMPTY(task->events) &&
1126 !TASK_SHUTTINGDOWN(task)) {
1127 isc_boolean_t was_idle;
1128
1129 /*
1130 * There are no references and no
1131 * pending events for this task,
1132 * which means it will not become
1133 * runnable again via an external
1134 * action (such as sending an event
1135 * or detaching).
1136 *
1137 * We initiate shutdown to prevent
1138 * it from becoming a zombie.
1139 *
1140 * We do this here instead of in
1141 * the "if EMPTY(task->events)" block
1142 * below because:
1143 *
1144 * If we post no shutdown events,
1145 * we want the task to finish.
1146 *
1147 * If we did post shutdown events,
1148 * will still want the task's
1149 * quantum to be applied.
1150 */
1151 was_idle = task_shutdown(task);
1152 INSIST(!was_idle);
1153 }
1154
1155 if (EMPTY(task->events)) {
1156 /*
1157 * Nothing else to do for this task
1158 * right now.
1159 */
1160 XTRACE(isc_msgcat_get(isc_msgcat,
1161 ISC_MSGSET_TASK,
1162 ISC_MSG_EMPTY,
1163 "empty"));
1164 if (task->references == 0 &&
1165 TASK_SHUTTINGDOWN(task)) {
1166 /*
1167 * The task is done.
1168 */
1169 XTRACE(isc_msgcat_get(
1170 isc_msgcat,
1171 ISC_MSGSET_TASK,
1172 ISC_MSG_DONE,
1173 "done"));
1174 finished = ISC_TRUE;
1175 task->state = task_state_done;
1176 } else
1177 task->state = task_state_idle;
1178 done = ISC_TRUE;
1179 } else if (dispatch_count >= task->quantum) {
1180 /*
1181 * Our quantum has expired, but
1182 * there is more work to be done.
1183 * We'll requeue it to the ready
1184 * queue later.
1185 *
1186 * We don't check quantum until
1187 * dispatching at least one event,
1188 * so the minimum quantum is one.
1189 */
1190 XTRACE(isc_msgcat_get(isc_msgcat,
1191 ISC_MSGSET_TASK,
1192 ISC_MSG_QUANTUM,
1193 "quantum"));
1194 task->state = task_state_ready;
1195 requeue = ISC_TRUE;
1196 done = ISC_TRUE;
1197 }
1198 } while (!done);
1199 UNLOCK(&task->lock);
1200
1201 if (finished)
1202 task_finished(task);
1203
1204 LOCK(&manager->lock);
1205 manager->tasks_running--;
1206 #ifdef USE_WORKER_THREADS
1207 if (manager->exclusive_requested &&
1208 manager->tasks_running == 1) {
1209 SIGNAL(&manager->exclusive_granted);
1210 } else if (manager->pause_requested &&
1211 manager->tasks_running == 0) {
1212 SIGNAL(&manager->paused);
1213 }
1214 #endif /* USE_WORKER_THREADS */
1215 if (requeue) {
1216 /*
1217 * We know we're awake, so we don't have
1218 * to wakeup any sleeping threads if the
1219 * ready queue is empty before we requeue.
1220 *
1221 * A possible optimization if the queue is
1222 * empty is to 'goto' the 'if (task != NULL)'
1223 * block, avoiding the ENQUEUE of the task
1224 * and the subsequent immediate DEQUEUE
1225 * (since it is the only executable task).
1226 * We don't do this because then we'd be
1227 * skipping the exit_requested check. The
1228 * cost of ENQUEUE is low anyway, especially
1229 * when you consider that we'd have to do
1230 * an extra EMPTY check to see if we could
1231 * do the optimization. If the ready queue
1232 * were usually nonempty, the 'optimization'
1233 * might even hurt rather than help.
1234 */
1235 #ifdef USE_WORKER_THREADS
1236 push_readyq(manager, task);
1237 #else
1238 ENQUEUE(new_ready_tasks, task, ready_link);
1239 if ((task->flags & TASK_F_PRIVILEGED) != 0)
1240 ENQUEUE(new_priority_tasks, task,
1241 ready_priority_link);
1242 #endif
1243 }
1244 }
1245
1246 #ifdef USE_WORKER_THREADS
1247 /*
1248 * If we are in privileged execution mode and there are no
1249 * tasks remaining on the current ready queue, then
1250 * we're stuck. Automatically drop privileges at that
1251 * point and continue with the regular ready queue.
1252 */
1253 if (manager->tasks_running == 0 && empty_readyq(manager)) {
1254 manager->mode = isc_taskmgrmode_normal;
1255 if (!empty_readyq(manager))
1256 BROADCAST(&manager->work_available);
1257 }
1258 #endif
1259 }
1260
1261 #ifndef USE_WORKER_THREADS
1262 ISC_LIST_APPENDLIST(manager->ready_tasks, new_ready_tasks, ready_link);
1263 ISC_LIST_APPENDLIST(manager->ready_priority_tasks, new_priority_tasks,
1264 ready_priority_link);
1265 if (empty_readyq(manager))
1266 manager->mode = isc_taskmgrmode_normal;
1267 #endif
1268
1269 UNLOCK(&manager->lock);
1270 }
1271
1272 #ifdef USE_WORKER_THREADS
1273 static isc_threadresult_t
1274 #ifdef _WIN32
1275 WINAPI
1276 #endif
run(void * uap)1277 run(void *uap) {
1278 isc__taskmgr_t *manager = uap;
1279
1280 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1281 ISC_MSG_STARTING, "starting"));
1282
1283 dispatch(manager);
1284
1285 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1286 ISC_MSG_EXITING, "exiting"));
1287
1288 #ifdef OPENSSL_LEAKS
1289 ERR_remove_state(0);
1290 #endif
1291
1292 return ((isc_threadresult_t)0);
1293 }
1294 #endif /* USE_WORKER_THREADS */
1295
1296 static void
manager_free(isc__taskmgr_t * manager)1297 manager_free(isc__taskmgr_t *manager) {
1298 isc_mem_t *mctx;
1299
1300 LOCK(&manager->lock);
1301 #ifdef USE_WORKER_THREADS
1302 (void)isc_condition_destroy(&manager->exclusive_granted);
1303 (void)isc_condition_destroy(&manager->work_available);
1304 (void)isc_condition_destroy(&manager->paused);
1305 isc_mem_free(manager->mctx, manager->threads);
1306 #endif /* USE_WORKER_THREADS */
1307 manager->common.impmagic = 0;
1308 manager->common.magic = 0;
1309 mctx = manager->mctx;
1310 UNLOCK(&manager->lock);
1311 DESTROYLOCK(&manager->lock);
1312 isc_mem_put(mctx, manager, sizeof(*manager));
1313 isc_mem_detach(&mctx);
1314
1315 #ifdef USE_SHARED_MANAGER
1316 taskmgr = NULL;
1317 #endif /* USE_SHARED_MANAGER */
1318 }
1319
1320 ISC_TASKFUNC_SCOPE isc_result_t
isc__taskmgr_create(isc_mem_t * mctx,unsigned int workers,unsigned int default_quantum,isc_taskmgr_t ** managerp)1321 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1322 unsigned int default_quantum, isc_taskmgr_t **managerp)
1323 {
1324 isc_result_t result;
1325 unsigned int i, started = 0;
1326 isc__taskmgr_t *manager;
1327
1328 /*
1329 * Create a new task manager.
1330 */
1331
1332 REQUIRE(workers > 0);
1333 REQUIRE(managerp != NULL && *managerp == NULL);
1334
1335 #ifndef USE_WORKER_THREADS
1336 UNUSED(i);
1337 UNUSED(started);
1338 #endif
1339
1340 #ifdef USE_SHARED_MANAGER
1341 if (taskmgr != NULL) {
1342 if (taskmgr->refs == 0)
1343 return (ISC_R_SHUTTINGDOWN);
1344 taskmgr->refs++;
1345 *managerp = (isc_taskmgr_t *)taskmgr;
1346 return (ISC_R_SUCCESS);
1347 }
1348 #endif /* USE_SHARED_MANAGER */
1349
1350 manager = isc_mem_get(mctx, sizeof(*manager));
1351 if (manager == NULL)
1352 return (ISC_R_NOMEMORY);
1353 manager->common.methods = &taskmgrmethods;
1354 manager->common.impmagic = TASK_MANAGER_MAGIC;
1355 manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1356 manager->mode = isc_taskmgrmode_normal;
1357 manager->mctx = NULL;
1358 result = isc_mutex_init(&manager->lock);
1359 if (result != ISC_R_SUCCESS)
1360 goto cleanup_mgr;
1361 LOCK(&manager->lock);
1362
1363 #ifdef USE_WORKER_THREADS
1364 manager->workers = 0;
1365 manager->threads = isc_mem_allocate(mctx,
1366 workers * sizeof(isc_thread_t));
1367 if (manager->threads == NULL) {
1368 result = ISC_R_NOMEMORY;
1369 goto cleanup_lock;
1370 }
1371 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1372 UNEXPECTED_ERROR(__FILE__, __LINE__,
1373 "isc_condition_init() %s",
1374 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1375 ISC_MSG_FAILED, "failed"));
1376 result = ISC_R_UNEXPECTED;
1377 goto cleanup_threads;
1378 }
1379 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1380 UNEXPECTED_ERROR(__FILE__, __LINE__,
1381 "isc_condition_init() %s",
1382 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1383 ISC_MSG_FAILED, "failed"));
1384 result = ISC_R_UNEXPECTED;
1385 goto cleanup_workavailable;
1386 }
1387 if (isc_condition_init(&manager->paused) != ISC_R_SUCCESS) {
1388 UNEXPECTED_ERROR(__FILE__, __LINE__,
1389 "isc_condition_init() %s",
1390 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1391 ISC_MSG_FAILED, "failed"));
1392 result = ISC_R_UNEXPECTED;
1393 goto cleanup_exclusivegranted;
1394 }
1395 #endif /* USE_WORKER_THREADS */
1396 if (default_quantum == 0)
1397 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1398 manager->default_quantum = default_quantum;
1399 INIT_LIST(manager->tasks);
1400 INIT_LIST(manager->ready_tasks);
1401 INIT_LIST(manager->ready_priority_tasks);
1402 manager->tasks_running = 0;
1403 manager->exclusive_requested = ISC_FALSE;
1404 manager->pause_requested = ISC_FALSE;
1405 manager->exiting = ISC_FALSE;
1406
1407 isc_mem_attach(mctx, &manager->mctx);
1408
1409 #ifdef USE_WORKER_THREADS
1410 /*
1411 * Start workers.
1412 */
1413 for (i = 0; i < workers; i++) {
1414 if (isc_thread_create(run, manager,
1415 &manager->threads[manager->workers]) ==
1416 ISC_R_SUCCESS) {
1417 manager->workers++;
1418 started++;
1419 }
1420 }
1421 UNLOCK(&manager->lock);
1422
1423 if (started == 0) {
1424 manager_free(manager);
1425 return (ISC_R_NOTHREADS);
1426 }
1427 isc_thread_setconcurrency(workers);
1428 #endif /* USE_WORKER_THREADS */
1429 #ifdef USE_SHARED_MANAGER
1430 manager->refs = 1;
1431 UNLOCK(&manager->lock);
1432 taskmgr = manager;
1433 #endif /* USE_SHARED_MANAGER */
1434
1435 *managerp = (isc_taskmgr_t *)manager;
1436
1437 return (ISC_R_SUCCESS);
1438
1439 #ifdef USE_WORKER_THREADS
1440 cleanup_exclusivegranted:
1441 (void)isc_condition_destroy(&manager->exclusive_granted);
1442 cleanup_workavailable:
1443 (void)isc_condition_destroy(&manager->work_available);
1444 cleanup_threads:
1445 isc_mem_free(mctx, manager->threads);
1446 cleanup_lock:
1447 UNLOCK(&manager->lock);
1448 DESTROYLOCK(&manager->lock);
1449 #endif
1450 cleanup_mgr:
1451 isc_mem_put(mctx, manager, sizeof(*manager));
1452 return (result);
1453 }
1454
1455 ISC_TASKFUNC_SCOPE void
isc__taskmgr_destroy(isc_taskmgr_t ** managerp)1456 isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1457 isc__taskmgr_t *manager;
1458 isc__task_t *task;
1459 unsigned int i;
1460
1461 /*
1462 * Destroy '*managerp'.
1463 */
1464
1465 REQUIRE(managerp != NULL);
1466 manager = (void*)(*managerp);
1467 REQUIRE(VALID_MANAGER(manager));
1468
1469 #ifndef USE_WORKER_THREADS
1470 UNUSED(i);
1471 #endif /* USE_WORKER_THREADS */
1472
1473 #ifdef USE_SHARED_MANAGER
1474 manager->refs--;
1475 if (manager->refs > 0) {
1476 *managerp = NULL;
1477 return;
1478 }
1479 #endif
1480
1481 XTHREADTRACE("isc_taskmgr_destroy");
1482 /*
1483 * Only one non-worker thread may ever call this routine.
1484 * If a worker thread wants to initiate shutdown of the
1485 * task manager, it should ask some non-worker thread to call
1486 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1487 * that the startup thread is sleeping on.
1488 */
1489
1490 /*
1491 * Unlike elsewhere, we're going to hold this lock a long time.
1492 * We need to do so, because otherwise the list of tasks could
1493 * change while we were traversing it.
1494 *
1495 * This is also the only function where we will hold both the
1496 * task manager lock and a task lock at the same time.
1497 */
1498
1499 LOCK(&manager->lock);
1500
1501 /*
1502 * Make sure we only get called once.
1503 */
1504 INSIST(!manager->exiting);
1505 manager->exiting = ISC_TRUE;
1506
1507 /*
1508 * If privileged mode was on, turn it off.
1509 */
1510 manager->mode = isc_taskmgrmode_normal;
1511
1512 /*
1513 * Post shutdown event(s) to every task (if they haven't already been
1514 * posted).
1515 */
1516 for (task = HEAD(manager->tasks);
1517 task != NULL;
1518 task = NEXT(task, link)) {
1519 LOCK(&task->lock);
1520 if (task_shutdown(task))
1521 push_readyq(manager, task);
1522 UNLOCK(&task->lock);
1523 }
1524 #ifdef USE_WORKER_THREADS
1525 /*
1526 * Wake up any sleeping workers. This ensures we get work done if
1527 * there's work left to do, and if there are already no tasks left
1528 * it will cause the workers to see manager->exiting.
1529 */
1530 BROADCAST(&manager->work_available);
1531 UNLOCK(&manager->lock);
1532
1533 /*
1534 * Wait for all the worker threads to exit.
1535 */
1536 for (i = 0; i < manager->workers; i++)
1537 (void)isc_thread_join(manager->threads[i], NULL);
1538 #else /* USE_WORKER_THREADS */
1539 /*
1540 * Dispatch the shutdown events.
1541 */
1542 UNLOCK(&manager->lock);
1543 while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1544 (void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1545 #ifdef BIND9
1546 if (!ISC_LIST_EMPTY(manager->tasks))
1547 isc_mem_printallactive(stderr);
1548 #endif
1549 INSIST(ISC_LIST_EMPTY(manager->tasks));
1550 #ifdef USE_SHARED_MANAGER
1551 taskmgr = NULL;
1552 #endif
1553 #endif /* USE_WORKER_THREADS */
1554
1555 manager_free(manager);
1556
1557 *managerp = NULL;
1558 }
1559
1560 ISC_TASKFUNC_SCOPE void
isc__taskmgr_setmode(isc_taskmgr_t * manager0,isc_taskmgrmode_t mode)1561 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode) {
1562 isc__taskmgr_t *manager = (void*)manager0;
1563
1564 LOCK(&manager->lock);
1565 manager->mode = mode;
1566 UNLOCK(&manager->lock);
1567 }
1568
1569 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
isc__taskmgr_mode(isc_taskmgr_t * manager0)1570 isc__taskmgr_mode(isc_taskmgr_t *manager0) {
1571 isc__taskmgr_t *manager = (void*)manager0;
1572 isc_taskmgrmode_t mode;
1573 LOCK(&manager->lock);
1574 mode = manager->mode;
1575 UNLOCK(&manager->lock);
1576 return (mode);
1577 }
1578
1579 #ifndef USE_WORKER_THREADS
1580 isc_boolean_t
isc__taskmgr_ready(isc_taskmgr_t * manager0)1581 isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1582 isc__taskmgr_t *manager = (void*)manager0;
1583 isc_boolean_t is_ready;
1584
1585 #ifdef USE_SHARED_MANAGER
1586 if (manager == NULL)
1587 manager = taskmgr;
1588 #endif
1589 if (manager == NULL)
1590 return (ISC_FALSE);
1591
1592 LOCK(&manager->lock);
1593 is_ready = !empty_readyq(manager);
1594 UNLOCK(&manager->lock);
1595
1596 return (is_ready);
1597 }
1598
1599 isc_result_t
isc__taskmgr_dispatch(isc_taskmgr_t * manager0)1600 isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1601 isc__taskmgr_t *manager = (void*)manager0;
1602
1603 #ifdef USE_SHARED_MANAGER
1604 if (manager == NULL)
1605 manager = taskmgr;
1606 #endif
1607 if (manager == NULL)
1608 return (ISC_R_NOTFOUND);
1609
1610 dispatch(manager);
1611
1612 return (ISC_R_SUCCESS);
1613 }
1614
1615 #else
1616 ISC_TASKFUNC_SCOPE void
isc__taskmgr_pause(isc_taskmgr_t * manager0)1617 isc__taskmgr_pause(isc_taskmgr_t *manager0) {
1618 isc__taskmgr_t *manager = (void*)manager0;
1619 LOCK(&manager->lock);
1620 while (manager->tasks_running > 0) {
1621 WAIT(&manager->paused, &manager->lock);
1622 }
1623 manager->pause_requested = ISC_TRUE;
1624 UNLOCK(&manager->lock);
1625 }
1626
1627 ISC_TASKFUNC_SCOPE void
isc__taskmgr_resume(isc_taskmgr_t * manager0)1628 isc__taskmgr_resume(isc_taskmgr_t *manager0) {
1629 isc__taskmgr_t *manager = (void*)manager0;
1630
1631 LOCK(&manager->lock);
1632 if (manager->pause_requested) {
1633 manager->pause_requested = ISC_FALSE;
1634 BROADCAST(&manager->work_available);
1635 }
1636 UNLOCK(&manager->lock);
1637 }
1638 #endif /* USE_WORKER_THREADS */
1639
1640 ISC_TASKFUNC_SCOPE isc_result_t
isc__task_beginexclusive(isc_task_t * task0)1641 isc__task_beginexclusive(isc_task_t *task0) {
1642 #ifdef USE_WORKER_THREADS
1643 isc__task_t *task = (isc__task_t *)task0;
1644 isc__taskmgr_t *manager = task->manager;
1645 REQUIRE(task->state == task_state_running);
1646 LOCK(&manager->lock);
1647 if (manager->exclusive_requested) {
1648 UNLOCK(&manager->lock);
1649 return (ISC_R_LOCKBUSY);
1650 }
1651 manager->exclusive_requested = ISC_TRUE;
1652 while (manager->tasks_running > 1) {
1653 WAIT(&manager->exclusive_granted, &manager->lock);
1654 }
1655 UNLOCK(&manager->lock);
1656 #else
1657 UNUSED(task0);
1658 #endif
1659 return (ISC_R_SUCCESS);
1660 }
1661
1662 ISC_TASKFUNC_SCOPE void
isc__task_endexclusive(isc_task_t * task0)1663 isc__task_endexclusive(isc_task_t *task0) {
1664 #ifdef USE_WORKER_THREADS
1665 isc__task_t *task = (isc__task_t *)task0;
1666 isc__taskmgr_t *manager = task->manager;
1667
1668 REQUIRE(task->state == task_state_running);
1669 LOCK(&manager->lock);
1670 REQUIRE(manager->exclusive_requested);
1671 manager->exclusive_requested = ISC_FALSE;
1672 BROADCAST(&manager->work_available);
1673 UNLOCK(&manager->lock);
1674 #else
1675 UNUSED(task0);
1676 #endif
1677 }
1678
1679 ISC_TASKFUNC_SCOPE void
isc__task_setprivilege(isc_task_t * task0,isc_boolean_t priv)1680 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv) {
1681 isc__task_t *task = (isc__task_t *)task0;
1682 isc__taskmgr_t *manager = task->manager;
1683 isc_boolean_t oldpriv;
1684
1685 LOCK(&task->lock);
1686 oldpriv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1687 if (priv)
1688 task->flags |= TASK_F_PRIVILEGED;
1689 else
1690 task->flags &= ~TASK_F_PRIVILEGED;
1691 UNLOCK(&task->lock);
1692
1693 if (priv == oldpriv)
1694 return;
1695
1696 LOCK(&manager->lock);
1697 if (priv && ISC_LINK_LINKED(task, ready_link))
1698 ENQUEUE(manager->ready_priority_tasks, task,
1699 ready_priority_link);
1700 else if (!priv && ISC_LINK_LINKED(task, ready_priority_link))
1701 DEQUEUE(manager->ready_priority_tasks, task,
1702 ready_priority_link);
1703 UNLOCK(&manager->lock);
1704 }
1705
1706 ISC_TASKFUNC_SCOPE isc_boolean_t
isc__task_privilege(isc_task_t * task0)1707 isc__task_privilege(isc_task_t *task0) {
1708 isc__task_t *task = (isc__task_t *)task0;
1709 isc_boolean_t priv;
1710
1711 LOCK(&task->lock);
1712 priv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1713 UNLOCK(&task->lock);
1714 return (priv);
1715 }
1716
1717 #ifdef USE_SOCKETIMPREGISTER
1718 isc_result_t
isc__task_register()1719 isc__task_register() {
1720 return (isc_task_register(isc__taskmgr_create));
1721 }
1722 #endif
1723
1724 isc_boolean_t
isc_task_exiting(isc_task_t * t)1725 isc_task_exiting(isc_task_t *t) {
1726 isc__task_t *task = (isc__task_t *)t;
1727
1728 REQUIRE(VALID_TASK(task));
1729 return (TASK_SHUTTINGDOWN(task));
1730 }
1731
1732
1733 #if defined(HAVE_LIBXML2) && defined(BIND9)
1734 void
isc_taskmgr_renderxml(isc_taskmgr_t * mgr0,xmlTextWriterPtr writer)1735 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1736 isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1737 isc__task_t *task;
1738
1739 LOCK(&mgr->lock);
1740
1741 /*
1742 * Write out the thread-model, and some details about each depending
1743 * on which type is enabled.
1744 */
1745 xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
1746 #ifdef ISC_PLATFORM_USETHREADS
1747 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1748 xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
1749 xmlTextWriterEndElement(writer); /* type */
1750
1751 xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
1752 xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
1753 xmlTextWriterEndElement(writer); /* worker-threads */
1754 #else /* ISC_PLATFORM_USETHREADS */
1755 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1756 xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
1757 xmlTextWriterEndElement(writer); /* type */
1758
1759 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1760 xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
1761 xmlTextWriterEndElement(writer); /* references */
1762 #endif /* ISC_PLATFORM_USETHREADS */
1763
1764 xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
1765 xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
1766 xmlTextWriterEndElement(writer); /* default-quantum */
1767
1768 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
1769 xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
1770 xmlTextWriterEndElement(writer); /* tasks-running */
1771
1772 xmlTextWriterEndElement(writer); /* thread-model */
1773
1774 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
1775 task = ISC_LIST_HEAD(mgr->tasks);
1776 while (task != NULL) {
1777 LOCK(&task->lock);
1778 xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
1779
1780 if (task->name[0] != 0) {
1781 xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
1782 xmlTextWriterWriteFormatString(writer, "%s",
1783 task->name);
1784 xmlTextWriterEndElement(writer); /* name */
1785 }
1786
1787 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1788 xmlTextWriterWriteFormatString(writer, "%d", task->references);
1789 xmlTextWriterEndElement(writer); /* references */
1790
1791 xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
1792 xmlTextWriterWriteFormatString(writer, "%p", task);
1793 xmlTextWriterEndElement(writer); /* id */
1794
1795 xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
1796 xmlTextWriterWriteFormatString(writer, "%s",
1797 statenames[task->state]);
1798 xmlTextWriterEndElement(writer); /* state */
1799
1800 xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
1801 xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
1802 xmlTextWriterEndElement(writer); /* quantum */
1803
1804 xmlTextWriterEndElement(writer);
1805
1806 UNLOCK(&task->lock);
1807 task = ISC_LIST_NEXT(task, link);
1808 }
1809 xmlTextWriterEndElement(writer); /* tasks */
1810
1811 UNLOCK(&mgr->lock);
1812 }
1813 #endif /* HAVE_LIBXML2 && BIND9 */
1814