xref: /freebsd/contrib/unbound/libunbound/libworker.c (revision 2dd94b045e8c069c1a748d40d30d979e30e02fc9)
1 /*
2  * libunbound/worker.c - worker thread or process that resolves
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains the worker process or thread that performs
40  * the DNS resolving and validation. The worker is called by a procedure
41  * and if in the background continues until exit, if in the foreground
42  * returns from the procedure when done.
43  */
44 #include "config.h"
45 #ifdef HAVE_SSL
46 #include <openssl/ssl.h>
47 #endif
48 #include "libunbound/libworker.h"
49 #include "libunbound/context.h"
50 #include "libunbound/unbound.h"
51 #include "libunbound/worker.h"
52 #include "libunbound/unbound-event.h"
53 #include "services/outside_network.h"
54 #include "services/mesh.h"
55 #include "services/localzone.h"
56 #include "services/cache/rrset.h"
57 #include "services/outbound_list.h"
58 #include "services/authzone.h"
59 #include "util/fptr_wlist.h"
60 #include "util/module.h"
61 #include "util/regional.h"
62 #include "util/random.h"
63 #include "util/config_file.h"
64 #include "util/netevent.h"
65 #include "util/storage/lookup3.h"
66 #include "util/storage/slabhash.h"
67 #include "util/net_help.h"
68 #include "util/data/dname.h"
69 #include "util/data/msgreply.h"
70 #include "util/data/msgencode.h"
71 #include "util/tube.h"
72 #include "iterator/iter_fwd.h"
73 #include "iterator/iter_hints.h"
74 #include "sldns/sbuffer.h"
75 #include "sldns/str2wire.h"
76 
77 /** handle new query command for bg worker */
78 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
79 
80 /** delete libworker env */
81 static void
82 libworker_delete_env(struct libworker* w)
83 {
84 	if(w->env) {
85 		outside_network_quit_prepare(w->back);
86 		mesh_delete(w->env->mesh);
87 		context_release_alloc(w->ctx, w->env->alloc,
88 			!w->is_bg || w->is_bg_thread);
89 		sldns_buffer_free(w->env->scratch_buffer);
90 		regional_destroy(w->env->scratch);
91 		forwards_delete(w->env->fwds);
92 		hints_delete(w->env->hints);
93 		ub_randfree(w->env->rnd);
94 		free(w->env);
95 	}
96 #ifdef HAVE_SSL
97 	SSL_CTX_free(w->sslctx);
98 #endif
99 	outside_network_delete(w->back);
100 }
101 
102 /** delete libworker struct */
103 static void
104 libworker_delete(struct libworker* w)
105 {
106 	if(!w) return;
107 	libworker_delete_env(w);
108 	comm_base_delete(w->base);
109 	free(w);
110 }
111 
112 void
113 libworker_delete_event(struct libworker* w)
114 {
115 	if(!w) return;
116 	libworker_delete_env(w);
117 	comm_base_delete_no_base(w->base);
118 	free(w);
119 }
120 
121 /** setup fresh libworker struct */
122 static struct libworker*
123 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
124 {
125 	struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
126 	struct config_file* cfg = ctx->env->cfg;
127 	int* ports;
128 	int numports;
129 	if(!w) return NULL;
130 	w->is_bg = is_bg;
131 	w->ctx = ctx;
132 	w->env = (struct module_env*)malloc(sizeof(*w->env));
133 	if(!w->env) {
134 		free(w);
135 		return NULL;
136 	}
137 	*w->env = *ctx->env;
138 	w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
139 	if(!w->env->alloc) {
140 		libworker_delete(w);
141 		return NULL;
142 	}
143 	w->thread_num = w->env->alloc->thread_num;
144 	alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
145 	if(!w->is_bg || w->is_bg_thread) {
146 		lock_basic_lock(&ctx->cfglock);
147 	}
148 	w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
149 	w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
150 	w->env->fwds = forwards_create();
151 	if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
152 		forwards_delete(w->env->fwds);
153 		w->env->fwds = NULL;
154 	}
155 	w->env->hints = hints_create();
156 	if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
157 		hints_delete(w->env->hints);
158 		w->env->hints = NULL;
159 	}
160 	if(cfg->ssl_upstream || (cfg->tls_cert_bundle && cfg->tls_cert_bundle[0]) || cfg->tls_win_cert) {
161 		w->sslctx = connect_sslctx_create(NULL, NULL,
162 			cfg->tls_cert_bundle, cfg->tls_win_cert);
163 		if(!w->sslctx) {
164 			/* to make the setup fail after unlock */
165 			hints_delete(w->env->hints);
166 			w->env->hints = NULL;
167 		}
168 	}
169 	if(!w->is_bg || w->is_bg_thread) {
170 		lock_basic_unlock(&ctx->cfglock);
171 	}
172 	if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
173 		!w->env->hints) {
174 		libworker_delete(w);
175 		return NULL;
176 	}
177 	w->env->worker = (struct worker*)w;
178 	w->env->probe_timer = NULL;
179 	if(!w->is_bg || w->is_bg_thread) {
180 		lock_basic_lock(&ctx->cfglock);
181 	}
182 	if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) {
183 		if(!w->is_bg || w->is_bg_thread) {
184 			lock_basic_unlock(&ctx->cfglock);
185 		}
186 		libworker_delete(w);
187 		return NULL;
188 	}
189 	if(!w->is_bg || w->is_bg_thread) {
190 		lock_basic_unlock(&ctx->cfglock);
191 	}
192 	if(1) {
193 		/* primitive lockout for threading: if it overwrites another
194 		 * thread it is like wiping the cache (which is likely empty
195 		 * at the start) */
196 		/* note we are holding the ctx lock in normal threaded
197 		 * cases so that is solved properly, it is only for many ctx
198 		 * in different threads that this may clash */
199 		static int done_raninit = 0;
200 		if(!done_raninit) {
201 			done_raninit = 1;
202 			hash_set_raninit((uint32_t)ub_random(w->env->rnd));
203 		}
204 	}
205 
206 	if(eb)
207 		w->base = comm_base_create_event(eb);
208 	else	w->base = comm_base_create(0);
209 	if(!w->base) {
210 		libworker_delete(w);
211 		return NULL;
212 	}
213 	w->env->worker_base = w->base;
214 	if(!w->is_bg || w->is_bg_thread) {
215 		lock_basic_lock(&ctx->cfglock);
216 	}
217 	numports = cfg_condense_ports(cfg, &ports);
218 	if(numports == 0) {
219 		if(!w->is_bg || w->is_bg_thread) {
220 			lock_basic_unlock(&ctx->cfglock);
221 		}
222 		libworker_delete(w);
223 		return NULL;
224 	}
225 	w->back = outside_network_create(w->base, cfg->msg_buffer_size,
226 		(size_t)cfg->outgoing_num_ports, cfg->out_ifs,
227 		cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
228 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
229 		w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
230 		ports, numports, cfg->unwanted_threshold,
231 		cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w,
232 		cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx,
233 		cfg->delay_close, NULL);
234 	w->env->outnet = w->back;
235 	if(!w->is_bg || w->is_bg_thread) {
236 		lock_basic_unlock(&ctx->cfglock);
237 	}
238 	free(ports);
239 	if(!w->back) {
240 		libworker_delete(w);
241 		return NULL;
242 	}
243 	w->env->mesh = mesh_create(&ctx->mods, w->env);
244 	if(!w->env->mesh) {
245 		libworker_delete(w);
246 		return NULL;
247 	}
248 	w->env->send_query = &libworker_send_query;
249 	w->env->detach_subs = &mesh_detach_subs;
250 	w->env->attach_sub = &mesh_attach_sub;
251 	w->env->add_sub = &mesh_add_sub;
252 	w->env->kill_sub = &mesh_state_delete;
253 	w->env->detect_cycle = &mesh_detect_cycle;
254 	comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
255 	return w;
256 }
257 
258 struct libworker* libworker_create_event(struct ub_ctx* ctx,
259 	struct ub_event_base* eb)
260 {
261 	return libworker_setup(ctx, 0, eb);
262 }
263 
264 /** handle cancel command for bg worker */
265 static void
266 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
267 {
268 	struct ctx_query* q;
269 	if(w->is_bg_thread) {
270 		lock_basic_lock(&w->ctx->cfglock);
271 		q = context_deserialize_cancel(w->ctx, buf, len);
272 		lock_basic_unlock(&w->ctx->cfglock);
273 	} else {
274 		q = context_deserialize_cancel(w->ctx, buf, len);
275 	}
276 	if(!q) {
277 		/* probably simply lookup failed, i.e. the message had been
278 		 * processed and answered before the cancel arrived */
279 		return;
280 	}
281 	q->cancelled = 1;
282 	free(buf);
283 }
284 
285 /** do control command coming into bg server */
286 static void
287 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
288 {
289 	switch(context_serial_getcmd(msg, len)) {
290 		default:
291 		case UB_LIBCMD_ANSWER:
292 			log_err("unknown command for bg worker %d",
293 				(int)context_serial_getcmd(msg, len));
294 			/* and fall through to quit */
295 			/* fallthrough */
296 		case UB_LIBCMD_QUIT:
297 			free(msg);
298 			comm_base_exit(w->base);
299 			break;
300 		case UB_LIBCMD_NEWQUERY:
301 			handle_newq(w, msg, len);
302 			break;
303 		case UB_LIBCMD_CANCEL:
304 			handle_cancel(w, msg, len);
305 			break;
306 	}
307 }
308 
309 /** handle control command coming into server */
310 void
311 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
312 	uint8_t* msg, size_t len, int err, void* arg)
313 {
314 	struct libworker* w = (struct libworker*)arg;
315 
316 	if(err != 0) {
317 		free(msg);
318 		/* it is of no use to go on, exit */
319 		comm_base_exit(w->base);
320 		return;
321 	}
322 	libworker_do_cmd(w, msg, len); /* also frees the buf */
323 }
324 
325 /** the background thread func */
326 static void*
327 libworker_dobg(void* arg)
328 {
329 	/* setup */
330 	uint32_t m;
331 	struct libworker* w = (struct libworker*)arg;
332 	struct ub_ctx* ctx;
333 	if(!w) {
334 		log_err("libunbound bg worker init failed, nomem");
335 		return NULL;
336 	}
337 	ctx = w->ctx;
338 	log_thread_set(&w->thread_num);
339 #ifdef THREADS_DISABLED
340 	/* we are forked */
341 	w->is_bg_thread = 0;
342 	/* close non-used parts of the pipes */
343 	tube_close_write(ctx->qq_pipe);
344 	tube_close_read(ctx->rr_pipe);
345 #endif
346 	if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
347 		libworker_handle_control_cmd, w)) {
348 		log_err("libunbound bg worker init failed, no bglisten");
349 		return NULL;
350 	}
351 	if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
352 		log_err("libunbound bg worker init failed, no bgwrite");
353 		return NULL;
354 	}
355 
356 	/* do the work */
357 	comm_base_dispatch(w->base);
358 
359 	/* cleanup */
360 	m = UB_LIBCMD_QUIT;
361 	w->want_quit = 1;
362 	tube_remove_bg_listen(w->ctx->qq_pipe);
363 	tube_remove_bg_write(w->ctx->rr_pipe);
364 	libworker_delete(w);
365 	(void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
366 		(uint32_t)sizeof(m), 0);
367 #ifdef THREADS_DISABLED
368 	/* close pipes from forked process before exit */
369 	tube_close_read(ctx->qq_pipe);
370 	tube_close_write(ctx->rr_pipe);
371 #endif
372 	return NULL;
373 }
374 
375 int libworker_bg(struct ub_ctx* ctx)
376 {
377 	struct libworker* w;
378 	/* fork or threadcreate */
379 	lock_basic_lock(&ctx->cfglock);
380 	if(ctx->dothread) {
381 		lock_basic_unlock(&ctx->cfglock);
382 		w = libworker_setup(ctx, 1, NULL);
383 		if(!w) return UB_NOMEM;
384 		w->is_bg_thread = 1;
385 #ifdef ENABLE_LOCK_CHECKS
386 		w->thread_num = 1; /* for nicer DEBUG checklocks */
387 #endif
388 		ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
389 	} else {
390 		lock_basic_unlock(&ctx->cfglock);
391 #ifndef HAVE_FORK
392 		/* no fork on windows */
393 		return UB_FORKFAIL;
394 #else /* HAVE_FORK */
395 		switch((ctx->bg_pid=fork())) {
396 			case 0:
397 				w = libworker_setup(ctx, 1, NULL);
398 				if(!w) fatal_exit("out of memory");
399 				/* close non-used parts of the pipes */
400 				tube_close_write(ctx->qq_pipe);
401 				tube_close_read(ctx->rr_pipe);
402 				(void)libworker_dobg(w);
403 				exit(0);
404 				break;
405 			case -1:
406 				return UB_FORKFAIL;
407 			default:
408 				/* close non-used parts, so that the worker
409 				 * bgprocess gets 'pipe closed' when the
410 				 * main process exits */
411 				tube_close_read(ctx->qq_pipe);
412 				tube_close_write(ctx->rr_pipe);
413 				break;
414 		}
415 #endif /* HAVE_FORK */
416 	}
417 	return UB_NOERROR;
418 }
419 
420 /** insert canonname */
421 static int
422 fill_canon(struct ub_result* res, uint8_t* s)
423 {
424 	char buf[255+2];
425 	dname_str(s, buf);
426 	res->canonname = strdup(buf);
427 	return res->canonname != 0;
428 }
429 
430 /** fill data into result */
431 static int
432 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
433 	uint8_t* finalcname, struct query_info* rq, struct reply_info* rep)
434 {
435 	size_t i;
436 	struct packed_rrset_data* data;
437 	res->ttl = 0;
438 	if(!answer) {
439 		if(finalcname) {
440 			if(!fill_canon(res, finalcname))
441 				return 0; /* out of memory */
442 		}
443 		if(rep->rrset_count != 0)
444 			res->ttl = (int)rep->ttl;
445 		res->data = (char**)calloc(1, sizeof(char*));
446 		res->len = (int*)calloc(1, sizeof(int));
447 		return (res->data && res->len);
448 	}
449 	data = (struct packed_rrset_data*)answer->entry.data;
450 	if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
451 		if(!fill_canon(res, answer->rk.dname))
452 			return 0; /* out of memory */
453 	} else	res->canonname = NULL;
454 	res->data = (char**)calloc(data->count+1, sizeof(char*));
455 	res->len = (int*)calloc(data->count+1, sizeof(int));
456 	if(!res->data || !res->len)
457 		return 0; /* out of memory */
458 	for(i=0; i<data->count; i++) {
459 		/* remove rdlength from rdata */
460 		res->len[i] = (int)(data->rr_len[i] - 2);
461 		res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
462 		if(!res->data[i])
463 			return 0; /* out of memory */
464 	}
465 	/* ttl for positive answers, from CNAME and answer RRs */
466 	if(data->count != 0) {
467 		size_t j;
468 		res->ttl = (int)data->ttl;
469 		for(j=0; j<rep->an_numrrsets; j++) {
470 			struct packed_rrset_data* d =
471 				(struct packed_rrset_data*)rep->rrsets[j]->
472 				entry.data;
473 			if((int)d->ttl < res->ttl)
474 				res->ttl = (int)d->ttl;
475 		}
476 	}
477 	/* ttl for negative answers */
478 	if(data->count == 0 && rep->rrset_count != 0)
479 		res->ttl = (int)rep->ttl;
480 	res->data[data->count] = NULL;
481 	res->len[data->count] = 0;
482 	return 1;
483 }
484 
485 /** fill result from parsed message, on error fills servfail */
486 void
487 libworker_enter_result(struct ub_result* res, sldns_buffer* buf,
488 	struct regional* temp, enum sec_status msg_security)
489 {
490 	struct query_info rq;
491 	struct reply_info* rep;
492 	res->rcode = LDNS_RCODE_SERVFAIL;
493 	rep = parse_reply_in_temp_region(buf, temp, &rq);
494 	if(!rep) {
495 		log_err("cannot parse buf");
496 		return; /* error parsing buf, or out of memory */
497 	}
498 	if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
499 		reply_find_final_cname_target(&rq, rep), &rq, rep))
500 		return; /* out of memory */
501 	/* rcode, havedata, nxdomain, secure, bogus */
502 	res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
503 	if(res->data && res->data[0])
504 		res->havedata = 1;
505 	if(res->rcode == LDNS_RCODE_NXDOMAIN)
506 		res->nxdomain = 1;
507 	if(msg_security == sec_status_secure)
508 		res->secure = 1;
509 	if(msg_security == sec_status_bogus ||
510 		msg_security == sec_status_secure_sentinel_fail)
511 		res->bogus = 1;
512 }
513 
514 /** fillup fg results */
515 static void
516 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf,
517 	enum sec_status s, char* why_bogus, int was_ratelimited)
518 {
519 	q->res->was_ratelimited = was_ratelimited;
520 	if(why_bogus)
521 		q->res->why_bogus = strdup(why_bogus);
522 	if(rcode != 0) {
523 		q->res->rcode = rcode;
524 		q->msg_security = s;
525 		return;
526 	}
527 
528 	q->res->rcode = LDNS_RCODE_SERVFAIL;
529 	q->msg_security = sec_status_unchecked;
530 	q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf));
531 	q->msg_len = sldns_buffer_limit(buf);
532 	if(!q->msg) {
533 		return; /* the error is in the rcode */
534 	}
535 
536 	/* canonname and results */
537 	q->msg_security = s;
538 	libworker_enter_result(q->res, buf, q->w->env->scratch, s);
539 }
540 
541 void
542 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
543 	char* why_bogus, int was_ratelimited)
544 {
545 	struct ctx_query* q = (struct ctx_query*)arg;
546 	/* fg query is done; exit comm base */
547 	comm_base_exit(q->w->base);
548 
549 	libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited);
550 }
551 
552 /** setup qinfo and edns */
553 static int
554 setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
555 	struct query_info* qinfo, struct edns_data* edns)
556 {
557 	qinfo->qtype = (uint16_t)q->res->qtype;
558 	qinfo->qclass = (uint16_t)q->res->qclass;
559 	qinfo->local_alias = NULL;
560 	qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len);
561 	if(!qinfo->qname) {
562 		return 0;
563 	}
564 	qinfo->local_alias = NULL;
565 	edns->edns_present = 1;
566 	edns->ext_rcode = 0;
567 	edns->edns_version = 0;
568 	edns->bits = EDNS_DO;
569 	edns->opt_list = NULL;
570 	if(sldns_buffer_capacity(w->back->udp_buff) < 65535)
571 		edns->udp_size = (uint16_t)sldns_buffer_capacity(
572 			w->back->udp_buff);
573 	else	edns->udp_size = 65535;
574 	return 1;
575 }
576 
577 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
578 {
579 	struct libworker* w = libworker_setup(ctx, 0, NULL);
580 	uint16_t qflags, qid;
581 	struct query_info qinfo;
582 	struct edns_data edns;
583 	if(!w)
584 		return UB_INITFAIL;
585 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
586 		libworker_delete(w);
587 		return UB_SYNTAX;
588 	}
589 	qid = 0;
590 	qflags = BIT_RD;
591 	q->w = w;
592 	/* see if there is a fixed answer */
593 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
594 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
595 	if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
596 		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
597 		NULL, 0, NULL, 0, NULL)) {
598 		regional_free_all(w->env->scratch);
599 		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
600 			w->back->udp_buff, sec_status_insecure, NULL, 0);
601 		libworker_delete(w);
602 		free(qinfo.qname);
603 		return UB_NOERROR;
604 	}
605 	if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
606 		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
607 		regional_free_all(w->env->scratch);
608 		libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
609 			w->back->udp_buff, sec_status_insecure, NULL, 0);
610 		libworker_delete(w);
611 		free(qinfo.qname);
612 		return UB_NOERROR;
613 	}
614 	/* process new query */
615 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
616 		w->back->udp_buff, qid, libworker_fg_done_cb, q)) {
617 		free(qinfo.qname);
618 		return UB_NOMEM;
619 	}
620 	free(qinfo.qname);
621 
622 	/* wait for reply */
623 	comm_base_dispatch(w->base);
624 
625 	libworker_delete(w);
626 	return UB_NOERROR;
627 }
628 
629 void
630 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf,
631 	enum sec_status s, char* why_bogus, int was_ratelimited)
632 {
633 	struct ctx_query* q = (struct ctx_query*)arg;
634 	ub_event_callback_type cb = q->cb_event;
635 	void* cb_arg = q->cb_arg;
636 	int cancelled = q->cancelled;
637 
638 	/* delete it now */
639 	struct ub_ctx* ctx = q->w->ctx;
640 	lock_basic_lock(&ctx->cfglock);
641 	(void)rbtree_delete(&ctx->queries, q->node.key);
642 	ctx->num_async--;
643 	context_query_delete(q);
644 	lock_basic_unlock(&ctx->cfglock);
645 
646 	if(!cancelled) {
647 		/* call callback */
648 		int sec = 0;
649 		if(s == sec_status_bogus)
650 			sec = 1;
651 		else if(s == sec_status_secure)
652 			sec = 2;
653 		(*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL),
654 			(buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited);
655 	}
656 }
657 
658 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q,
659 	int* async_id)
660 {
661 	struct libworker* w = ctx->event_worker;
662 	uint16_t qflags, qid;
663 	struct query_info qinfo;
664 	struct edns_data edns;
665 	if(!w)
666 		return UB_INITFAIL;
667 	if(!setup_qinfo_edns(w, q, &qinfo, &edns))
668 		return UB_SYNTAX;
669 	qid = 0;
670 	qflags = BIT_RD;
671 	q->w = w;
672 	/* see if there is a fixed answer */
673 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
674 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
675 	if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
676 		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
677 		NULL, 0, NULL, 0, NULL)) {
678 		regional_free_all(w->env->scratch);
679 		free(qinfo.qname);
680 		libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
681 			w->back->udp_buff, sec_status_insecure, NULL, 0);
682 		return UB_NOERROR;
683 	}
684 	if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
685 		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
686 		regional_free_all(w->env->scratch);
687 		free(qinfo.qname);
688 		libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
689 			w->back->udp_buff, sec_status_insecure, NULL, 0);
690 		return UB_NOERROR;
691 	}
692 	/* process new query */
693 	if(async_id)
694 		*async_id = q->querynum;
695 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
696 		w->back->udp_buff, qid, libworker_event_done_cb, q)) {
697 		free(qinfo.qname);
698 		return UB_NOMEM;
699 	}
700 	free(qinfo.qname);
701 	return UB_NOERROR;
702 }
703 
704 /** add result to the bg worker result queue */
705 static void
706 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt,
707 	int err, char* reason, int was_ratelimited)
708 {
709 	uint8_t* msg = NULL;
710 	uint32_t len = 0;
711 
712 	if(w->want_quit) {
713 		context_query_delete(q);
714 		return;
715 	}
716 	/* serialize and delete unneeded q */
717 	if(w->is_bg_thread) {
718 		lock_basic_lock(&w->ctx->cfglock);
719 		if(reason)
720 			q->res->why_bogus = strdup(reason);
721 		q->res->was_ratelimited = was_ratelimited;
722 		if(pkt) {
723 			q->msg_len = sldns_buffer_remaining(pkt);
724 			q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len);
725 			if(!q->msg) {
726 				msg = context_serialize_answer(q, UB_NOMEM, NULL, &len);
727 			} else {
728 				msg = context_serialize_answer(q, err, NULL, &len);
729 			}
730 		} else {
731 			msg = context_serialize_answer(q, err, NULL, &len);
732 		}
733 		lock_basic_unlock(&w->ctx->cfglock);
734 	} else {
735 		if(reason)
736 			q->res->why_bogus = strdup(reason);
737 		q->res->was_ratelimited = was_ratelimited;
738 		msg = context_serialize_answer(q, err, pkt, &len);
739 		(void)rbtree_delete(&w->ctx->queries, q->node.key);
740 		w->ctx->num_async--;
741 		context_query_delete(q);
742 	}
743 
744 	if(!msg) {
745 		log_err("out of memory for async answer");
746 		return;
747 	}
748 	if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
749 		log_err("out of memory for async answer");
750 		return;
751 	}
752 }
753 
754 void
755 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
756 	char* why_bogus, int was_ratelimited)
757 {
758 	struct ctx_query* q = (struct ctx_query*)arg;
759 
760 	if(q->cancelled || q->w->back->want_to_quit) {
761 		if(q->w->is_bg_thread) {
762 			/* delete it now */
763 			struct ub_ctx* ctx = q->w->ctx;
764 			lock_basic_lock(&ctx->cfglock);
765 			(void)rbtree_delete(&ctx->queries, q->node.key);
766 			ctx->num_async--;
767 			context_query_delete(q);
768 			lock_basic_unlock(&ctx->cfglock);
769 		}
770 		/* cancelled, do not give answer */
771 		return;
772 	}
773 	q->msg_security = s;
774 	if(!buf) {
775 		buf = q->w->env->scratch_buffer;
776 	}
777 	if(rcode != 0) {
778 		error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
779 	}
780 	add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited);
781 }
782 
783 
784 /** handle new query command for bg worker */
785 static void
786 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
787 {
788 	uint16_t qflags, qid;
789 	struct query_info qinfo;
790 	struct edns_data edns;
791 	struct ctx_query* q;
792 	if(w->is_bg_thread) {
793 		lock_basic_lock(&w->ctx->cfglock);
794 		q = context_lookup_new_query(w->ctx, buf, len);
795 		lock_basic_unlock(&w->ctx->cfglock);
796 	} else {
797 		q = context_deserialize_new_query(w->ctx, buf, len);
798 	}
799 	free(buf);
800 	if(!q) {
801 		log_err("failed to deserialize newq");
802 		return;
803 	}
804 	if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
805 		add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0);
806 		return;
807 	}
808 	qid = 0;
809 	qflags = BIT_RD;
810 	/* see if there is a fixed answer */
811 	sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
812 	sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
813 	if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns,
814 		w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
815 		NULL, 0, NULL, 0, NULL)) {
816 		regional_free_all(w->env->scratch);
817 		q->msg_security = sec_status_insecure;
818 		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
819 		free(qinfo.qname);
820 		return;
821 	}
822 	if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones,
823 		w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
824 		regional_free_all(w->env->scratch);
825 		q->msg_security = sec_status_insecure;
826 		add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
827 		free(qinfo.qname);
828 		return;
829 	}
830 	q->w = w;
831 	/* process new query */
832 	if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
833 		w->back->udp_buff, qid, libworker_bg_done_cb, q)) {
834 		add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0);
835 	}
836 	free(qinfo.qname);
837 }
838 
839 void libworker_alloc_cleanup(void* arg)
840 {
841 	struct libworker* w = (struct libworker*)arg;
842 	slabhash_clear(&w->env->rrset_cache->table);
843         slabhash_clear(w->env->msg_cache);
844 }
845 
846 struct outbound_entry* libworker_send_query(struct query_info* qinfo,
847 	uint16_t flags, int dnssec, int want_dnssec, int nocaps,
848 	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
849 	size_t zonelen, int ssl_upstream, char* tls_auth_name,
850 	struct module_qstate* q)
851 {
852 	struct libworker* w = (struct libworker*)q->env->worker;
853 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
854 		q->region, sizeof(*e));
855 	if(!e)
856 		return NULL;
857 	e->qstate = q;
858 	e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec,
859 		want_dnssec, nocaps, q->env->cfg->tcp_upstream, ssl_upstream,
860 		tls_auth_name, addr, addrlen, zone, zonelen, q,
861 		libworker_handle_service_reply, e, w->back->udp_buff, q->env);
862 	if(!e->qsent) {
863 		return NULL;
864 	}
865 	return e;
866 }
867 
868 int
869 libworker_handle_reply(struct comm_point* c, void* arg, int error,
870         struct comm_reply* reply_info)
871 {
872 	struct module_qstate* q = (struct module_qstate*)arg;
873 	struct libworker* lw = (struct libworker*)q->env->worker;
874 	struct outbound_entry e;
875 	e.qstate = q;
876 	e.qsent = NULL;
877 
878 	if(error != 0) {
879 		mesh_report_reply(lw->env->mesh, &e, reply_info, error);
880 		return 0;
881 	}
882 	/* sanity check. */
883 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
884 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
885 			LDNS_PACKET_QUERY
886 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
887 		/* error becomes timeout for the module as if this reply
888 		 * never arrived. */
889 		mesh_report_reply(lw->env->mesh, &e, reply_info,
890 			NETEVENT_TIMEOUT);
891 		return 0;
892 	}
893 	mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR);
894 	return 0;
895 }
896 
897 int
898 libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
899         struct comm_reply* reply_info)
900 {
901 	struct outbound_entry* e = (struct outbound_entry*)arg;
902 	struct libworker* lw = (struct libworker*)e->qstate->env->worker;
903 
904 	if(error != 0) {
905 		mesh_report_reply(lw->env->mesh, e, reply_info, error);
906 		return 0;
907 	}
908 	/* sanity check. */
909 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
910 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
911 			LDNS_PACKET_QUERY
912 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
913 		/* error becomes timeout for the module as if this reply
914 		 * never arrived. */
915 		mesh_report_reply(lw->env->mesh, e, reply_info,
916 			NETEVENT_TIMEOUT);
917 		return 0;
918 	}
919 	mesh_report_reply(lw->env->mesh,  e, reply_info, NETEVENT_NOERROR);
920 	return 0;
921 }
922 
923 /* --- fake callbacks for fptr_wlist to work --- */
924 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
925 	uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
926 	int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
927 {
928 	log_assert(0);
929 }
930 
931 int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
932 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
933         struct comm_reply* ATTR_UNUSED(repinfo))
934 {
935 	log_assert(0);
936 	return 0;
937 }
938 
939 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c),
940 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
941         struct comm_reply* ATTR_UNUSED(reply_info))
942 {
943 	log_assert(0);
944 	return 0;
945 }
946 
947 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
948 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
949         struct comm_reply* ATTR_UNUSED(reply_info))
950 {
951 	log_assert(0);
952 	return 0;
953 }
954 
955 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
956 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
957         struct comm_reply* ATTR_UNUSED(repinfo))
958 {
959 	log_assert(0);
960 	return 0;
961 }
962 
963 int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
964 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
965         struct comm_reply* ATTR_UNUSED(repinfo))
966 {
967 	log_assert(0);
968 	return 0;
969 }
970 
971 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
972 {
973 	log_assert(0);
974 }
975 
976 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo),
977 	uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec),
978 	int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps),
979 	struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen),
980 	uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen),
981 	int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name),
982 	struct module_qstate* ATTR_UNUSED(q))
983 {
984 	log_assert(0);
985 	return 0;
986 }
987 
988 void
989 worker_alloc_cleanup(void* ATTR_UNUSED(arg))
990 {
991 	log_assert(0);
992 }
993 
994 void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
995 {
996 	log_assert(0);
997 }
998 
999 void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
1000 {
1001 	log_assert(0);
1002 }
1003 
1004 void worker_start_accept(void* ATTR_UNUSED(arg))
1005 {
1006 	log_assert(0);
1007 }
1008 
1009 void worker_stop_accept(void* ATTR_UNUSED(arg))
1010 {
1011 	log_assert(0);
1012 }
1013 
1014 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1015 {
1016 	log_assert(0);
1017 	return 0;
1018 }
1019 
1020 int
1021 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1022 {
1023 	log_assert(0);
1024 	return 0;
1025 }
1026 
1027 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1028 {
1029         log_assert(0);
1030         return 0;
1031 }
1032 
1033 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
1034 {
1035         log_assert(0);
1036 }
1037 
1038 #ifdef UB_ON_WINDOWS
1039 void
1040 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
1041         ATTR_UNUSED(arg)) {
1042         log_assert(0);
1043 }
1044 
1045 void
1046 wsvc_cron_cb(void* ATTR_UNUSED(arg))
1047 {
1048         log_assert(0);
1049 }
1050 #endif /* UB_ON_WINDOWS */
1051