xref: /freebsd/contrib/unbound/libunbound/libworker.c (revision b7579f77d18196a58ff700756c84dc9a302a7f67)
1 /*
2  * libunbound/worker.c - worker thread or process that resolves
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains the worker process or thread that performs
40  * the DNS resolving and validation. The worker is called by a procedure
41  * and if in the background continues until exit, if in the foreground
42  * returns from the procedure when done.
43  */
44 #include "config.h"
45 #include <ldns/dname.h>
46 #include <ldns/wire2host.h>
47 #include <openssl/ssl.h>
48 #include "libunbound/libworker.h"
49 #include "libunbound/context.h"
50 #include "libunbound/unbound.h"
51 #include "services/outside_network.h"
52 #include "services/mesh.h"
53 #include "services/localzone.h"
54 #include "services/cache/rrset.h"
55 #include "services/outbound_list.h"
56 #include "util/module.h"
57 #include "util/regional.h"
58 #include "util/random.h"
59 #include "util/config_file.h"
60 #include "util/netevent.h"
61 #include "util/storage/lookup3.h"
62 #include "util/storage/slabhash.h"
63 #include "util/net_help.h"
64 #include "util/data/dname.h"
65 #include "util/data/msgreply.h"
66 #include "util/data/msgencode.h"
67 #include "util/tube.h"
68 #include "iterator/iter_fwd.h"
69 #include "iterator/iter_hints.h"
70 
71 /** handle new query command for bg worker */
72 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
73 
74 /** delete libworker struct */
75 static void
76 libworker_delete(struct libworker* w)
77 {
78 	if(!w) return;
79 	if(w->env) {
80 		outside_network_quit_prepare(w->back);
81 		mesh_delete(w->env->mesh);
82 		context_release_alloc(w->ctx, w->env->alloc,
83 			!w->is_bg || w->is_bg_thread);
84 		ldns_buffer_free(w->env->scratch_buffer);
85 		regional_destroy(w->env->scratch);
86 		forwards_delete(w->env->fwds);
87 		hints_delete(w->env->hints);
88 		ub_randfree(w->env->rnd);
89 		free(w->env);
90 	}
91 	SSL_CTX_free(w->sslctx);
92 	outside_network_delete(w->back);
93 	comm_base_delete(w->base);
94 	free(w);
95 }
96 
97 /** setup fresh libworker struct */
98 static struct libworker*
99 libworker_setup(struct ub_ctx* ctx, int is_bg)
100 {
101 	unsigned int seed;
102 	struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
103 	struct config_file* cfg = ctx->env->cfg;
104 	int* ports;
105 	int numports;
106 	if(!w) return NULL;
107 	w->is_bg = is_bg;
108 	w->ctx = ctx;
109 	w->env = (struct module_env*)malloc(sizeof(*w->env));
110 	if(!w->env) {
111 		free(w);
112 		return NULL;
113 	}
114 	*w->env = *ctx->env;
115 	w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
116 	if(!w->env->alloc) {
117 		libworker_delete(w);
118 		return NULL;
119 	}
120 	w->thread_num = w->env->alloc->thread_num;
121 	alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
122 	if(!w->is_bg || w->is_bg_thread) {
123 		lock_basic_lock(&ctx->cfglock);
124 	}
125 	w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
126 	w->env->scratch_buffer = ldns_buffer_new(cfg->msg_buffer_size);
127 	w->env->fwds = forwards_create();
128 	if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
129 		forwards_delete(w->env->fwds);
130 		w->env->fwds = NULL;
131 	}
132 	w->env->hints = hints_create();
133 	if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
134 		hints_delete(w->env->hints);
135 		w->env->hints = NULL;
136 	}
137 	if(cfg->ssl_upstream) {
138 		w->sslctx = connect_sslctx_create(NULL, NULL, NULL);
139 		if(!w->sslctx) {
140 			/* to make the setup fail after unlock */
141 			hints_delete(w->env->hints);
142 			w->env->hints = NULL;
143 		}
144 	}
145 	if(!w->is_bg || w->is_bg_thread) {
146 		lock_basic_unlock(&ctx->cfglock);
147 	}
148 	if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
149 		!w->env->hints) {
150 		libworker_delete(w);
151 		return NULL;
152 	}
153 	w->env->worker = (struct worker*)w;
154 	w->env->probe_timer = NULL;
155 	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
156 		(((unsigned int)w->thread_num)<<17);
157 	seed ^= (unsigned int)w->env->alloc->next_id;
158 	if(!w->is_bg || w->is_bg_thread) {
159 		lock_basic_lock(&ctx->cfglock);
160 	}
161 	if(!(w->env->rnd = ub_initstate(seed, ctx->seed_rnd))) {
162 		if(!w->is_bg || w->is_bg_thread) {
163 			lock_basic_unlock(&ctx->cfglock);
164 		}
165 		seed = 0;
166 		libworker_delete(w);
167 		return NULL;
168 	}
169 	if(!w->is_bg || w->is_bg_thread) {
170 		lock_basic_unlock(&ctx->cfglock);
171 	}
172 	if(1) {
173 		/* primitive lockout for threading: if it overwrites another
174 		 * thread it is like wiping the cache (which is likely empty
175 		 * at the start) */
176 		/* note we are holding the ctx lock in normal threaded
177 		 * cases so that is solved properly, it is only for many ctx
178 		 * in different threads that this may clash */
179 		static int done_raninit = 0;
180 		if(!done_raninit) {
181 			done_raninit = 1;
182 			hash_set_raninit((uint32_t)ub_random(w->env->rnd));
183 		}
184 	}
185 	seed = 0;
186 
187 	w->base = comm_base_create(0);
188 	if(!w->base) {
189 		libworker_delete(w);
190 		return NULL;
191 	}
192 	if(!w->is_bg || w->is_bg_thread) {
193 		lock_basic_lock(&ctx->cfglock);
194 	}
195 	numports = cfg_condense_ports(cfg, &ports);
196 	if(numports == 0) {
197 		libworker_delete(w);
198 		return NULL;
199 	}
200 	w->back = outside_network_create(w->base, cfg->msg_buffer_size,
201 		(size_t)cfg->outgoing_num_ports, cfg->out_ifs,
202 		cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
203 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
204 		w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
205 		ports, numports, cfg->unwanted_threshold,
206 		&libworker_alloc_cleanup, w, cfg->do_udp, w->sslctx);
207 	if(!w->is_bg || w->is_bg_thread) {
208 		lock_basic_unlock(&ctx->cfglock);
209 	}
210 	free(ports);
211 	if(!w->back) {
212 		libworker_delete(w);
213 		return NULL;
214 	}
215 	w->env->mesh = mesh_create(&ctx->mods, w->env);
216 	if(!w->env->mesh) {
217 		libworker_delete(w);
218 		return NULL;
219 	}
220 	w->env->send_query = &libworker_send_query;
221 	w->env->detach_subs = &mesh_detach_subs;
222 	w->env->attach_sub = &mesh_attach_sub;
223 	w->env->kill_sub = &mesh_state_delete;
224 	w->env->detect_cycle = &mesh_detect_cycle;
225 	comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
226 	return w;
227 }
228 
229 /** handle cancel command for bg worker */
230 static void
231 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
232 {
233 	struct ctx_query* q;
234 	if(w->is_bg_thread) {
235 		lock_basic_lock(&w->ctx->cfglock);
236 		q = context_deserialize_cancel(w->ctx, buf, len);
237 		lock_basic_unlock(&w->ctx->cfglock);
238 	} else {
239 		q = context_deserialize_cancel(w->ctx, buf, len);
240 	}
241 	if(!q) {
242 		/* probably simply lookup failed, i.e. the message had been
243 		 * processed and answered before the cancel arrived */
244 		return;
245 	}
246 	q->cancelled = 1;
247 	free(buf);
248 }
249 
250 /** do control command coming into bg server */
251 static void
252 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
253 {
254 	switch(context_serial_getcmd(msg, len)) {
255 		default:
256 		case UB_LIBCMD_ANSWER:
257 			log_err("unknown command for bg worker %d",
258 				(int)context_serial_getcmd(msg, len));
259 			/* and fall through to quit */
260 		case UB_LIBCMD_QUIT:
261 			free(msg);
262 			comm_base_exit(w->base);
263 			break;
264 		case UB_LIBCMD_NEWQUERY:
265 			handle_newq(w, msg, len);
266 			break;
267 		case UB_LIBCMD_CANCEL:
268 			handle_cancel(w, msg, len);
269 			break;
270 	}
271 }
272 
273 /** handle control command coming into server */
274 void
275 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
276 	uint8_t* msg, size_t len, int err, void* arg)
277 {
278 	struct libworker* w = (struct libworker*)arg;
279 
280 	if(err != 0) {
281 		free(msg);
282 		/* it is of no use to go on, exit */
283 		comm_base_exit(w->base);
284 		return;
285 	}
286 	libworker_do_cmd(w, msg, len); /* also frees the buf */
287 }
288 
289 /** the background thread func */
290 static void*
291 libworker_dobg(void* arg)
292 {
293 	/* setup */
294 	uint32_t m;
295 	struct libworker* w = (struct libworker*)arg;
296 	struct ub_ctx* ctx;
297 	if(!w) {
298 		log_err("libunbound bg worker init failed, nomem");
299 		return NULL;
300 	}
301 	ctx = w->ctx;
302 	log_thread_set(&w->thread_num);
303 #ifdef THREADS_DISABLED
304 	/* we are forked */
305 	w->is_bg_thread = 0;
306 	/* close non-used parts of the pipes */
307 	tube_close_write(ctx->qq_pipe);
308 	tube_close_read(ctx->rr_pipe);
309 #endif
310 	if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
311 		libworker_handle_control_cmd, w)) {
312 		log_err("libunbound bg worker init failed, no bglisten");
313 		return NULL;
314 	}
315 	if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
316 		log_err("libunbound bg worker init failed, no bgwrite");
317 		return NULL;
318 	}
319 
320 	/* do the work */
321 	comm_base_dispatch(w->base);
322 
323 	/* cleanup */
324 	m = UB_LIBCMD_QUIT;
325 	tube_remove_bg_listen(w->ctx->qq_pipe);
326 	tube_remove_bg_write(w->ctx->rr_pipe);
327 	libworker_delete(w);
328 	(void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
329 		(uint32_t)sizeof(m), 0);
330 #ifdef THREADS_DISABLED
331 	/* close pipes from forked process before exit */
332 	tube_close_read(ctx->qq_pipe);
333 	tube_close_write(ctx->rr_pipe);
334 #endif
335 	return NULL;
336 }
337 
338 int libworker_bg(struct ub_ctx* ctx)
339 {
340 	struct libworker* w;
341 	/* fork or threadcreate */
342 	lock_basic_lock(&ctx->cfglock);
343 	if(ctx->dothread) {
344 		lock_basic_unlock(&ctx->cfglock);
345 		w = libworker_setup(ctx, 1);
346 		if(!w) return UB_NOMEM;
347 		w->is_bg_thread = 1;
348 #ifdef ENABLE_LOCK_CHECKS
349 		w->thread_num = 1; /* for nicer DEBUG checklocks */
350 #endif
351 		ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
352 	} else {
353 		lock_basic_unlock(&ctx->cfglock);
354 #ifndef HAVE_FORK
355 		/* no fork on windows */
356 		return UB_FORKFAIL;
357 #else /* HAVE_FORK */
358 		switch((ctx->bg_pid=fork())) {
359 			case 0:
360 				w = libworker_setup(ctx, 1);
361 				if(!w) fatal_exit("out of memory");
362 				/* close non-used parts of the pipes */
363 				tube_close_write(ctx->qq_pipe);
364 				tube_close_read(ctx->rr_pipe);
365 				(void)libworker_dobg(w);
366 				exit(0);
367 				break;
368 			case -1:
369 				return UB_FORKFAIL;
370 			default:
371 				break;
372 		}
373 #endif /* HAVE_FORK */
374 	}
375 	return UB_NOERROR;
376 }
377 
378 /** get msg reply struct (in temp region) */
379 static struct reply_info*
380 parse_reply(ldns_buffer* pkt, struct regional* region, struct query_info* qi)
381 {
382 	struct reply_info* rep;
383 	struct msg_parse* msg;
384 	if(!(msg = regional_alloc(region, sizeof(*msg)))) {
385 		return NULL;
386 	}
387 	memset(msg, 0, sizeof(*msg));
388 	ldns_buffer_set_position(pkt, 0);
389 	if(parse_packet(pkt, msg, region) != 0)
390 		return 0;
391 	if(!parse_create_msg(pkt, msg, NULL, qi, &rep, region)) {
392 		return 0;
393 	}
394 	return rep;
395 }
396 
397 /** insert canonname */
398 static int
399 fill_canon(struct ub_result* res, uint8_t* s)
400 {
401 	char buf[255+2];
402 	dname_str(s, buf);
403 	res->canonname = strdup(buf);
404 	return res->canonname != 0;
405 }
406 
407 /** fill data into result */
408 static int
409 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
410 	uint8_t* finalcname, struct query_info* rq)
411 {
412 	size_t i;
413 	struct packed_rrset_data* data;
414 	if(!answer) {
415 		if(finalcname) {
416 			if(!fill_canon(res, finalcname))
417 				return 0; /* out of memory */
418 		}
419 		res->data = (char**)calloc(1, sizeof(char*));
420 		res->len = (int*)calloc(1, sizeof(int));
421 		return (res->data && res->len);
422 	}
423 	data = (struct packed_rrset_data*)answer->entry.data;
424 	if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
425 		if(!fill_canon(res, answer->rk.dname))
426 			return 0; /* out of memory */
427 	} else	res->canonname = NULL;
428 	res->data = (char**)calloc(data->count+1, sizeof(char*));
429 	res->len = (int*)calloc(data->count+1, sizeof(int));
430 	if(!res->data || !res->len)
431 		return 0; /* out of memory */
432 	for(i=0; i<data->count; i++) {
433 		/* remove rdlength from rdata */
434 		res->len[i] = (int)(data->rr_len[i] - 2);
435 		res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
436 		if(!res->data[i])
437 			return 0; /* out of memory */
438 	}
439 	res->data[data->count] = NULL;
440 	res->len[data->count] = 0;
441 	return 1;
442 }
443 
444 /** fill result from parsed message, on error fills servfail */
445 void
446 libworker_enter_result(struct ub_result* res, ldns_buffer* buf,
447 	struct regional* temp, enum sec_status msg_security)
448 {
449 	struct query_info rq;
450 	struct reply_info* rep;
451 	res->rcode = LDNS_RCODE_SERVFAIL;
452 	rep = parse_reply(buf, temp, &rq);
453 	if(!rep) {
454 		log_err("cannot parse buf");
455 		return; /* error parsing buf, or out of memory */
456 	}
457 	if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
458 		reply_find_final_cname_target(&rq, rep), &rq))
459 		return; /* out of memory */
460 	/* rcode, havedata, nxdomain, secure, bogus */
461 	res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
462 	if(res->data && res->data[0])
463 		res->havedata = 1;
464 	if(res->rcode == LDNS_RCODE_NXDOMAIN)
465 		res->nxdomain = 1;
466 	if(msg_security == sec_status_secure)
467 		res->secure = 1;
468 	if(msg_security == sec_status_bogus)
469 		res->bogus = 1;
470 }
471 
472 /** fillup fg results */
473 static void
474 libworker_fillup_fg(struct ctx_query* q, int rcode, ldns_buffer* buf,
475 	enum sec_status s, char* why_bogus)
476 {
477 	if(why_bogus)
478 		q->res->why_bogus = strdup(why_bogus);
479 	if(rcode != 0) {
480 		q->res->rcode = rcode;
481 		q->msg_security = s;
482 		return;
483 	}
484 
485 	q->res->rcode = LDNS_RCODE_SERVFAIL;
486 	q->msg_security = 0;
487 	q->msg = memdup(ldns_buffer_begin(buf), ldns_buffer_limit(buf));
488 	q->msg_len = ldns_buffer_limit(buf);
489 	if(!q->msg) {
490 		return; /* the error is in the rcode */
491 	}
492 
493 	/* canonname and results */
494 	q->msg_security = s;
495 	libworker_enter_result(q->res, buf, q->w->env->scratch, s);
496 }
497 
498 void
499 libworker_fg_done_cb(void* arg, int rcode, ldns_buffer* buf, enum sec_status s,
500 	char* why_bogus)
501 {
502 	struct ctx_query* q = (struct ctx_query*)arg;
503 	/* fg query is done; exit comm base */
504 	comm_base_exit(q->w->base);
505 
506 	libworker_fillup_fg(q, rcode, buf, s, why_bogus);
507 }
508 
509 /** setup qinfo and edns */
510 static int
511 setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
512 	struct query_info* qinfo, struct edns_data* edns)
513 {
514 	ldns_rdf* rdf;
515 	qinfo->qtype = (uint16_t)q->res->qtype;
516 	qinfo->qclass = (uint16_t)q->res->qclass;
517 	rdf = ldns_dname_new_frm_str(q->res->qname);
518 	if(!rdf) {
519 		return 0;
520 	}
521 #ifdef UNBOUND_ALLOC_LITE
522 	qinfo->qname = memdup(ldns_rdf_data(rdf), ldns_rdf_size(rdf));
523 	qinfo->qname_len = ldns_rdf_size(rdf);
524 	ldns_rdf_deep_free(rdf);
525 	rdf = 0;
526 #else
527 	qinfo->qname = ldns_rdf_data(rdf);
528 	qinfo->qname_len = ldns_rdf_size(rdf);
529 #endif
530 	edns->edns_present = 1;
531 	edns->ext_rcode = 0;
532 	edns->edns_version = 0;
533 	edns->bits = EDNS_DO;
534 	if(ldns_buffer_capacity(w->back->udp_buff) < 65535)
535 		edns->udp_size = (uint16_t)ldns_buffer_capacity(
536 			w->back->udp_buff);
537 	else	edns->udp_size = 65535;
538 	ldns_rdf_free(rdf);
539 	return 1;
540 }
541 
542 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
543 {
544 	struct libworker* w = libworker_setup(ctx, 0);
545 	uint16_t qflags, qid;
546 	struct query_info qinfo;
547 	struct edns_data edns;
548 	if(!w)
549 		return UB_INITFAIL;
550 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
551 		libworker_delete(w);
552 		return UB_SYNTAX;
553 	}
554 	qid = 0;
555 	qflags = BIT_RD;
556 	q->w = w;
557 	/* see if there is a fixed answer */
558 	ldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
559 	ldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
560 	if(local_zones_answer(ctx->local_zones, &qinfo, &edns,
561 		w->back->udp_buff, w->env->scratch)) {
562 		regional_free_all(w->env->scratch);
563 		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
564 			w->back->udp_buff, sec_status_insecure, NULL);
565 		libworker_delete(w);
566 		free(qinfo.qname);
567 		return UB_NOERROR;
568 	}
569 	/* process new query */
570 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
571 		w->back->udp_buff, qid, libworker_fg_done_cb, q)) {
572 		free(qinfo.qname);
573 		return UB_NOMEM;
574 	}
575 	free(qinfo.qname);
576 
577 	/* wait for reply */
578 	comm_base_dispatch(w->base);
579 
580 	libworker_delete(w);
581 	return UB_NOERROR;
582 }
583 
584 /** add result to the bg worker result queue */
585 static void
586 add_bg_result(struct libworker* w, struct ctx_query* q, ldns_buffer* pkt,
587 	int err, char* reason)
588 {
589 	uint8_t* msg = NULL;
590 	uint32_t len = 0;
591 
592 	/* serialize and delete unneeded q */
593 	if(w->is_bg_thread) {
594 		lock_basic_lock(&w->ctx->cfglock);
595 		if(reason)
596 			q->res->why_bogus = strdup(reason);
597 		if(pkt) {
598 			q->msg_len = ldns_buffer_remaining(pkt);
599 			q->msg = memdup(ldns_buffer_begin(pkt), q->msg_len);
600 			if(!q->msg)
601 				msg = context_serialize_answer(q, UB_NOMEM,
602 				NULL, &len);
603 			else	msg = context_serialize_answer(q, err,
604 				NULL, &len);
605 		} else msg = context_serialize_answer(q, err, NULL, &len);
606 		lock_basic_unlock(&w->ctx->cfglock);
607 	} else {
608 		if(reason)
609 			q->res->why_bogus = strdup(reason);
610 		msg = context_serialize_answer(q, err, pkt, &len);
611 		(void)rbtree_delete(&w->ctx->queries, q->node.key);
612 		w->ctx->num_async--;
613 		context_query_delete(q);
614 	}
615 
616 	if(!msg) {
617 		log_err("out of memory for async answer");
618 		return;
619 	}
620 	if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
621 		log_err("out of memory for async answer");
622 		return;
623 	}
624 }
625 
626 void
627 libworker_bg_done_cb(void* arg, int rcode, ldns_buffer* buf, enum sec_status s,
628 	char* why_bogus)
629 {
630 	struct ctx_query* q = (struct ctx_query*)arg;
631 
632 	if(q->cancelled) {
633 		if(q->w->is_bg_thread) {
634 			/* delete it now */
635 			struct ub_ctx* ctx = q->w->ctx;
636 			lock_basic_lock(&ctx->cfglock);
637 			(void)rbtree_delete(&ctx->queries, q->node.key);
638 			ctx->num_async--;
639 			context_query_delete(q);
640 			lock_basic_unlock(&ctx->cfglock);
641 		}
642 		/* cancelled, do not give answer */
643 		return;
644 	}
645 	q->msg_security = s;
646 	if(rcode != 0) {
647 		error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
648 	}
649 	add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus);
650 }
651 
652 
653 /** handle new query command for bg worker */
654 static void
655 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
656 {
657 	uint16_t qflags, qid;
658 	struct query_info qinfo;
659 	struct edns_data edns;
660 	struct ctx_query* q;
661 	if(w->is_bg_thread) {
662 		lock_basic_lock(&w->ctx->cfglock);
663 		q = context_lookup_new_query(w->ctx, buf, len);
664 		lock_basic_unlock(&w->ctx->cfglock);
665 	} else {
666 		q = context_deserialize_new_query(w->ctx, buf, len);
667 	}
668 	free(buf);
669 	if(!q) {
670 		log_err("failed to deserialize newq");
671 		return;
672 	}
673 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
674 		add_bg_result(w, q, NULL, UB_SYNTAX, NULL);
675 		return;
676 	}
677 	qid = 0;
678 	qflags = BIT_RD;
679 	/* see if there is a fixed answer */
680 	ldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
681 	ldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
682 	if(local_zones_answer(w->ctx->local_zones, &qinfo, &edns,
683 		w->back->udp_buff, w->env->scratch)) {
684 		regional_free_all(w->env->scratch);
685 		q->msg_security = sec_status_insecure;
686 		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL);
687 		free(qinfo.qname);
688 		return;
689 	}
690 	q->w = w;
691 	/* process new query */
692 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
693 		w->back->udp_buff, qid, libworker_bg_done_cb, q)) {
694 		add_bg_result(w, q, NULL, UB_NOMEM, NULL);
695 	}
696 	free(qinfo.qname);
697 }
698 
699 void libworker_alloc_cleanup(void* arg)
700 {
701 	struct libworker* w = (struct libworker*)arg;
702 	slabhash_clear(&w->env->rrset_cache->table);
703         slabhash_clear(w->env->msg_cache);
704 }
705 
706 /** compare outbound entry qstates */
707 static int
708 outbound_entry_compare(void* a, void* b)
709 {
710         struct outbound_entry* e1 = (struct outbound_entry*)a;
711         struct outbound_entry* e2 = (struct outbound_entry*)b;
712         if(e1->qstate == e2->qstate)
713                 return 1;
714         return 0;
715 }
716 
717 struct outbound_entry* libworker_send_query(uint8_t* qname, size_t qnamelen,
718         uint16_t qtype, uint16_t qclass, uint16_t flags, int dnssec,
719 	int want_dnssec, struct sockaddr_storage* addr, socklen_t addrlen,
720 	uint8_t* zone, size_t zonelen, struct module_qstate* q)
721 {
722 	struct libworker* w = (struct libworker*)q->env->worker;
723 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
724 		q->region, sizeof(*e));
725 	if(!e)
726 		return NULL;
727 	e->qstate = q;
728 	e->qsent = outnet_serviced_query(w->back, qname,
729 		qnamelen, qtype, qclass, flags, dnssec, want_dnssec,
730 		q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
731 		addrlen, zone, zonelen, libworker_handle_service_reply, e,
732 		w->back->udp_buff, &outbound_entry_compare);
733 	if(!e->qsent) {
734 		return NULL;
735 	}
736 	return e;
737 }
738 
739 int
740 libworker_handle_reply(struct comm_point* c, void* arg, int error,
741         struct comm_reply* reply_info)
742 {
743 	struct module_qstate* q = (struct module_qstate*)arg;
744 	struct libworker* lw = (struct libworker*)q->env->worker;
745 	struct outbound_entry e;
746 	e.qstate = q;
747 	e.qsent = NULL;
748 
749 	if(error != 0) {
750 		mesh_report_reply(lw->env->mesh, &e, reply_info, error);
751 		return 0;
752 	}
753 	/* sanity check. */
754 	if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
755 		|| LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
756 			LDNS_PACKET_QUERY
757 		|| LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
758 		/* error becomes timeout for the module as if this reply
759 		 * never arrived. */
760 		mesh_report_reply(lw->env->mesh, &e, reply_info,
761 			NETEVENT_TIMEOUT);
762 		return 0;
763 	}
764 	mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR);
765 	return 0;
766 }
767 
768 int
769 libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
770         struct comm_reply* reply_info)
771 {
772 	struct outbound_entry* e = (struct outbound_entry*)arg;
773 	struct libworker* lw = (struct libworker*)e->qstate->env->worker;
774 
775 	if(error != 0) {
776 		mesh_report_reply(lw->env->mesh, e, reply_info, error);
777 		return 0;
778 	}
779 	/* sanity check. */
780 	if(!LDNS_QR_WIRE(ldns_buffer_begin(c->buffer))
781 		|| LDNS_OPCODE_WIRE(ldns_buffer_begin(c->buffer)) !=
782 			LDNS_PACKET_QUERY
783 		|| LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1) {
784 		/* error becomes timeout for the module as if this reply
785 		 * never arrived. */
786 		mesh_report_reply(lw->env->mesh, e, reply_info,
787 			NETEVENT_TIMEOUT);
788 		return 0;
789 	}
790 	mesh_report_reply(lw->env->mesh,  e, reply_info, NETEVENT_NOERROR);
791 	return 0;
792 }
793 
794 /* --- fake callbacks for fptr_wlist to work --- */
795 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
796 	uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
797 	int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
798 {
799 	log_assert(0);
800 }
801 
802 int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
803 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
804         struct comm_reply* ATTR_UNUSED(repinfo))
805 {
806 	log_assert(0);
807 	return 0;
808 }
809 
810 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c),
811 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
812         struct comm_reply* ATTR_UNUSED(reply_info))
813 {
814 	log_assert(0);
815 	return 0;
816 }
817 
818 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
819 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
820         struct comm_reply* ATTR_UNUSED(reply_info))
821 {
822 	log_assert(0);
823 	return 0;
824 }
825 
826 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
827 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
828         struct comm_reply* ATTR_UNUSED(repinfo))
829 {
830 	log_assert(0);
831 	return 0;
832 }
833 
834 int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
835 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
836         struct comm_reply* ATTR_UNUSED(repinfo))
837 {
838 	log_assert(0);
839 	return 0;
840 }
841 
842 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
843 {
844 	log_assert(0);
845 }
846 
847 struct outbound_entry* worker_send_query(uint8_t* ATTR_UNUSED(qname),
848 	size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
849 	uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
850 	int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
851 	struct sockaddr_storage* ATTR_UNUSED(addr),
852 	socklen_t ATTR_UNUSED(addrlen), struct module_qstate* ATTR_UNUSED(q))
853 {
854 	log_assert(0);
855 	return 0;
856 }
857 
858 void
859 worker_alloc_cleanup(void* ATTR_UNUSED(arg))
860 {
861 	log_assert(0);
862 }
863 
864 void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
865 {
866 	log_assert(0);
867 }
868 
869 void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
870 {
871 	log_assert(0);
872 }
873 
874 void worker_start_accept(void* ATTR_UNUSED(arg))
875 {
876 	log_assert(0);
877 }
878 
879 void worker_stop_accept(void* ATTR_UNUSED(arg))
880 {
881 	log_assert(0);
882 }
883 
884 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
885 {
886 	log_assert(0);
887 	return 0;
888 }
889 
890 int
891 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
892 {
893 	log_assert(0);
894 	return 0;
895 }
896 
897 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
898 {
899         log_assert(0);
900         return 0;
901 }
902 
903 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
904 {
905         log_assert(0);
906 }
907 
908 #ifdef UB_ON_WINDOWS
909 void
910 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
911         ATTR_UNUSED(arg)) {
912         log_assert(0);
913 }
914 
915 void
916 wsvc_cron_cb(void* ATTR_UNUSED(arg))
917 {
918         log_assert(0);
919 }
920 #endif /* UB_ON_WINDOWS */
921