xref: /freebsd/contrib/unbound/daemon/worker.c (revision 63d1fd5970ec814904aa0f4580b10a0d302d08b2)
1 /*
2  * daemon/worker.c - worker that handles a pending list of requests.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file implements the worker that handles callbacks on events, for
40  * pending requests.
41  */
42 #include "config.h"
43 #include "util/log.h"
44 #include "util/net_help.h"
45 #include "util/random.h"
46 #include "daemon/worker.h"
47 #include "daemon/daemon.h"
48 #include "daemon/remote.h"
49 #include "daemon/acl_list.h"
50 #include "util/netevent.h"
51 #include "util/config_file.h"
52 #include "util/module.h"
53 #include "util/regional.h"
54 #include "util/storage/slabhash.h"
55 #include "services/listen_dnsport.h"
56 #include "services/outside_network.h"
57 #include "services/outbound_list.h"
58 #include "services/cache/rrset.h"
59 #include "services/cache/infra.h"
60 #include "services/cache/dns.h"
61 #include "services/mesh.h"
62 #include "services/localzone.h"
63 #include "util/data/msgparse.h"
64 #include "util/data/msgencode.h"
65 #include "util/data/dname.h"
66 #include "util/fptr_wlist.h"
67 #include "util/tube.h"
68 #include "iterator/iter_fwd.h"
69 #include "iterator/iter_hints.h"
70 #include "validator/autotrust.h"
71 #include "validator/val_anchor.h"
72 #include "libunbound/context.h"
73 #include "libunbound/libworker.h"
74 #include "sldns/sbuffer.h"
75 
76 #ifdef HAVE_SYS_TYPES_H
77 #  include <sys/types.h>
78 #endif
79 #ifdef HAVE_NETDB_H
80 #include <netdb.h>
81 #endif
82 #include <signal.h>
83 #ifdef UB_ON_WINDOWS
84 #include "winrc/win_svc.h"
85 #endif
86 
87 /** Size of an UDP datagram */
88 #define NORMAL_UDP_SIZE	512 /* bytes */
89 /** ratelimit for error responses */
90 #define ERROR_RATELIMIT 100 /* qps */
91 
92 /**
93  * seconds to add to prefetch leeway.  This is a TTL that expires old rrsets
94  * earlier than they should in order to put the new update into the cache.
95  * This additional value is to make sure that if not all TTLs are equal in
96  * the message to be updated(and replaced), that rrsets with up to this much
97  * extra TTL are also replaced.  This means that the resulting new message
98  * will have (most likely) this TTL at least, avoiding very small 'split
99  * second' TTLs due to operators choosing relative primes for TTLs (or so).
100  * Also has to be at least one to break ties (and overwrite cached entry).
101  */
102 #define PREFETCH_EXPIRY_ADD 60
103 
104 #ifdef UNBOUND_ALLOC_STATS
105 /** measure memory leakage */
106 static void
107 debug_memleak(size_t accounted, size_t heap,
108 	size_t total_alloc, size_t total_free)
109 {
110 	static int init = 0;
111 	static size_t base_heap, base_accounted, base_alloc, base_free;
112 	size_t base_af, cur_af, grow_af, grow_acc;
113 	if(!init) {
114 		init = 1;
115 		base_heap = heap;
116 		base_accounted = accounted;
117 		base_alloc = total_alloc;
118 		base_free = total_free;
119 	}
120 	base_af = base_alloc - base_free;
121 	cur_af = total_alloc - total_free;
122 	grow_af = cur_af - base_af;
123 	grow_acc = accounted - base_accounted;
124 	log_info("Leakage: %d leaked. growth: %u use, %u acc, %u heap",
125 		(int)(grow_af - grow_acc), (unsigned)grow_af,
126 		(unsigned)grow_acc, (unsigned)(heap - base_heap));
127 }
128 
129 /** give debug heap size indication */
130 static void
131 debug_total_mem(size_t calctotal)
132 {
133 #ifdef HAVE_SBRK
134 	extern void* unbound_start_brk;
135 	extern size_t unbound_mem_alloc, unbound_mem_freed;
136 	void* cur = sbrk(0);
137 	int total = cur-unbound_start_brk;
138 	log_info("Total heap memory estimate: %u  total-alloc: %u  "
139 		"total-free: %u", (unsigned)total,
140 		(unsigned)unbound_mem_alloc, (unsigned)unbound_mem_freed);
141 	debug_memleak(calctotal, (size_t)total,
142 		unbound_mem_alloc, unbound_mem_freed);
143 #else
144 	(void)calctotal;
145 #endif /* HAVE_SBRK */
146 }
147 #endif /* UNBOUND_ALLOC_STATS */
148 
149 /** Report on memory usage by this thread and global */
150 static void
151 worker_mem_report(struct worker* ATTR_UNUSED(worker),
152 	struct serviced_query* ATTR_UNUSED(cur_serv))
153 {
154 #ifdef UNBOUND_ALLOC_STATS
155 	/* debug func in validator module */
156 	size_t total, front, back, mesh, msg, rrset, infra, ac, superac;
157 	size_t me, iter, val, anch;
158 	int i;
159 	if(verbosity < VERB_ALGO)
160 		return;
161 	front = listen_get_mem(worker->front);
162 	back = outnet_get_mem(worker->back);
163 	msg = slabhash_get_mem(worker->env.msg_cache);
164 	rrset = slabhash_get_mem(&worker->env.rrset_cache->table);
165 	infra = infra_get_mem(worker->env.infra_cache);
166 	mesh = mesh_get_mem(worker->env.mesh);
167 	ac = alloc_get_mem(&worker->alloc);
168 	superac = alloc_get_mem(&worker->daemon->superalloc);
169 	anch = anchors_get_mem(worker->env.anchors);
170 	iter = 0;
171 	val = 0;
172 	for(i=0; i<worker->env.mesh->mods.num; i++) {
173 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
174 			mods.mod[i]->get_mem));
175 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
176 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
177 				(&worker->env, i);
178 		else	iter += (*worker->env.mesh->mods.mod[i]->get_mem)
179 				(&worker->env, i);
180 	}
181 	me = sizeof(*worker) + sizeof(*worker->base) + sizeof(*worker->comsig)
182 		+ comm_point_get_mem(worker->cmd_com)
183 		+ sizeof(worker->rndstate)
184 		+ regional_get_mem(worker->scratchpad)
185 		+ sizeof(*worker->env.scratch_buffer)
186 		+ sldns_buffer_capacity(worker->env.scratch_buffer)
187 		+ forwards_get_mem(worker->env.fwds)
188 		+ hints_get_mem(worker->env.hints);
189 	if(worker->thread_num == 0)
190 		me += acl_list_get_mem(worker->daemon->acl);
191 	if(cur_serv) {
192 		me += serviced_get_mem(cur_serv);
193 	}
194 	total = front+back+mesh+msg+rrset+infra+iter+val+ac+superac+me;
195 	log_info("Memory conditions: %u front=%u back=%u mesh=%u msg=%u "
196 		"rrset=%u infra=%u iter=%u val=%u anchors=%u "
197 		"alloccache=%u globalalloccache=%u me=%u",
198 		(unsigned)total, (unsigned)front, (unsigned)back,
199 		(unsigned)mesh, (unsigned)msg, (unsigned)rrset,
200 		(unsigned)infra, (unsigned)iter, (unsigned)val, (unsigned)anch,
201 		(unsigned)ac, (unsigned)superac, (unsigned)me);
202 	debug_total_mem(total);
203 #else /* no UNBOUND_ALLOC_STATS */
204 	size_t val = 0;
205 	int i;
206 	if(verbosity < VERB_QUERY)
207 		return;
208 	for(i=0; i<worker->env.mesh->mods.num; i++) {
209 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
210 			mods.mod[i]->get_mem));
211 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
212 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
213 				(&worker->env, i);
214 	}
215 	verbose(VERB_QUERY, "cache memory msg=%u rrset=%u infra=%u val=%u",
216 		(unsigned)slabhash_get_mem(worker->env.msg_cache),
217 		(unsigned)slabhash_get_mem(&worker->env.rrset_cache->table),
218 		(unsigned)infra_get_mem(worker->env.infra_cache),
219 		(unsigned)val);
220 #endif /* UNBOUND_ALLOC_STATS */
221 }
222 
223 void
224 worker_send_cmd(struct worker* worker, enum worker_commands cmd)
225 {
226 	uint32_t c = (uint32_t)htonl(cmd);
227 	if(!tube_write_msg(worker->cmd, (uint8_t*)&c, sizeof(c), 0)) {
228 		log_err("worker send cmd %d failed", (int)cmd);
229 	}
230 }
231 
232 int
233 worker_handle_reply(struct comm_point* c, void* arg, int error,
234 	struct comm_reply* reply_info)
235 {
236 	struct module_qstate* q = (struct module_qstate*)arg;
237 	struct worker* worker = q->env->worker;
238 	struct outbound_entry e;
239 	e.qstate = q;
240 	e.qsent = NULL;
241 
242 	if(error != 0) {
243 		mesh_report_reply(worker->env.mesh, &e, reply_info, error);
244 		worker_mem_report(worker, NULL);
245 		return 0;
246 	}
247 	/* sanity check. */
248 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
249 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
250 			LDNS_PACKET_QUERY
251 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
252 		/* error becomes timeout for the module as if this reply
253 		 * never arrived. */
254 		mesh_report_reply(worker->env.mesh, &e, reply_info,
255 			NETEVENT_TIMEOUT);
256 		worker_mem_report(worker, NULL);
257 		return 0;
258 	}
259 	mesh_report_reply(worker->env.mesh, &e, reply_info, NETEVENT_NOERROR);
260 	worker_mem_report(worker, NULL);
261 	return 0;
262 }
263 
264 int
265 worker_handle_service_reply(struct comm_point* c, void* arg, int error,
266 	struct comm_reply* reply_info)
267 {
268 	struct outbound_entry* e = (struct outbound_entry*)arg;
269 	struct worker* worker = e->qstate->env->worker;
270 	struct serviced_query *sq = e->qsent;
271 
272 	verbose(VERB_ALGO, "worker svcd callback for qstate %p", e->qstate);
273 	if(error != 0) {
274 		mesh_report_reply(worker->env.mesh, e, reply_info, error);
275 		worker_mem_report(worker, sq);
276 		return 0;
277 	}
278 	/* sanity check. */
279 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
280 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
281 			LDNS_PACKET_QUERY
282 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
283 		/* error becomes timeout for the module as if this reply
284 		 * never arrived. */
285 		verbose(VERB_ALGO, "worker: bad reply handled as timeout");
286 		mesh_report_reply(worker->env.mesh, e, reply_info,
287 			NETEVENT_TIMEOUT);
288 		worker_mem_report(worker, sq);
289 		return 0;
290 	}
291 	mesh_report_reply(worker->env.mesh, e, reply_info, NETEVENT_NOERROR);
292 	worker_mem_report(worker, sq);
293 	return 0;
294 }
295 
296 /** ratelimit error replies
297  * @param worker: the worker struct with ratelimit counter
298  * @param err: error code that would be wanted.
299  * @return value of err if okay, or -1 if it should be discarded instead.
300  */
301 static int
302 worker_err_ratelimit(struct worker* worker, int err)
303 {
304 	if(worker->err_limit_time == *worker->env.now) {
305 		/* see if limit is exceeded for this second */
306 		if(worker->err_limit_count++ > ERROR_RATELIMIT)
307 			return -1;
308 	} else {
309 		/* new second, new limits */
310 		worker->err_limit_time = *worker->env.now;
311 		worker->err_limit_count = 1;
312 	}
313 	return err;
314 }
315 
316 /** check request sanity.
317  * @param pkt: the wire packet to examine for sanity.
318  * @param worker: parameters for checking.
319  * @return error code, 0 OK, or -1 discard.
320 */
321 static int
322 worker_check_request(sldns_buffer* pkt, struct worker* worker)
323 {
324 	if(sldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) {
325 		verbose(VERB_QUERY, "request too short, discarded");
326 		return -1;
327 	}
328 	if(sldns_buffer_limit(pkt) > NORMAL_UDP_SIZE &&
329 		worker->daemon->cfg->harden_large_queries) {
330 		verbose(VERB_QUERY, "request too large, discarded");
331 		return -1;
332 	}
333 	if(LDNS_QR_WIRE(sldns_buffer_begin(pkt))) {
334 		verbose(VERB_QUERY, "request has QR bit on, discarded");
335 		return -1;
336 	}
337 	if(LDNS_TC_WIRE(sldns_buffer_begin(pkt))) {
338 		LDNS_TC_CLR(sldns_buffer_begin(pkt));
339 		verbose(VERB_QUERY, "request bad, has TC bit on");
340 		return worker_err_ratelimit(worker, LDNS_RCODE_FORMERR);
341 	}
342 	if(LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)) != LDNS_PACKET_QUERY) {
343 		verbose(VERB_QUERY, "request unknown opcode %d",
344 			LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)));
345 		return worker_err_ratelimit(worker, LDNS_RCODE_NOTIMPL);
346 	}
347 	if(LDNS_QDCOUNT(sldns_buffer_begin(pkt)) != 1) {
348 		verbose(VERB_QUERY, "request wrong nr qd=%d",
349 			LDNS_QDCOUNT(sldns_buffer_begin(pkt)));
350 		return worker_err_ratelimit(worker, LDNS_RCODE_FORMERR);
351 	}
352 	if(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) != 0) {
353 		verbose(VERB_QUERY, "request wrong nr an=%d",
354 			LDNS_ANCOUNT(sldns_buffer_begin(pkt)));
355 		return worker_err_ratelimit(worker, LDNS_RCODE_FORMERR);
356 	}
357 	if(LDNS_NSCOUNT(sldns_buffer_begin(pkt)) != 0) {
358 		verbose(VERB_QUERY, "request wrong nr ns=%d",
359 			LDNS_NSCOUNT(sldns_buffer_begin(pkt)));
360 		return worker_err_ratelimit(worker, LDNS_RCODE_FORMERR);
361 	}
362 	if(LDNS_ARCOUNT(sldns_buffer_begin(pkt)) > 1) {
363 		verbose(VERB_QUERY, "request wrong nr ar=%d",
364 			LDNS_ARCOUNT(sldns_buffer_begin(pkt)));
365 		return worker_err_ratelimit(worker, LDNS_RCODE_FORMERR);
366 	}
367 	return 0;
368 }
369 
370 void
371 worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), uint8_t* msg,
372 	size_t len, int error, void* arg)
373 {
374 	struct worker* worker = (struct worker*)arg;
375 	enum worker_commands cmd;
376 	if(error != NETEVENT_NOERROR) {
377 		free(msg);
378 		if(error == NETEVENT_CLOSED)
379 			comm_base_exit(worker->base);
380 		else	log_info("control event: %d", error);
381 		return;
382 	}
383 	if(len != sizeof(uint32_t)) {
384 		fatal_exit("bad control msg length %d", (int)len);
385 	}
386 	cmd = sldns_read_uint32(msg);
387 	free(msg);
388 	switch(cmd) {
389 	case worker_cmd_quit:
390 		verbose(VERB_ALGO, "got control cmd quit");
391 		comm_base_exit(worker->base);
392 		break;
393 	case worker_cmd_stats:
394 		verbose(VERB_ALGO, "got control cmd stats");
395 		server_stats_reply(worker, 1);
396 		break;
397 	case worker_cmd_stats_noreset:
398 		verbose(VERB_ALGO, "got control cmd stats_noreset");
399 		server_stats_reply(worker, 0);
400 		break;
401 	case worker_cmd_remote:
402 		verbose(VERB_ALGO, "got control cmd remote");
403 		daemon_remote_exec(worker);
404 		break;
405 	default:
406 		log_err("bad command %d", (int)cmd);
407 		break;
408 	}
409 }
410 
411 /** check if a delegation is secure */
412 static enum sec_status
413 check_delegation_secure(struct reply_info *rep)
414 {
415 	/* return smallest security status */
416 	size_t i;
417 	enum sec_status sec = sec_status_secure;
418 	enum sec_status s;
419 	size_t num = rep->an_numrrsets + rep->ns_numrrsets;
420 	/* check if answer and authority are OK */
421 	for(i=0; i<num; i++) {
422 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
423 			->security;
424 		if(s < sec)
425 			sec = s;
426 	}
427 	/* in additional, only unchecked triggers revalidation */
428 	for(i=num; i<rep->rrset_count; i++) {
429 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
430 			->security;
431 		if(s == sec_status_unchecked)
432 			return s;
433 	}
434 	return sec;
435 }
436 
437 /** remove nonsecure from a delegation referral additional section */
438 static void
439 deleg_remove_nonsecure_additional(struct reply_info* rep)
440 {
441 	/* we can simply edit it, since we are working in the scratch region */
442 	size_t i;
443 	enum sec_status s;
444 
445 	for(i = rep->an_numrrsets+rep->ns_numrrsets; i<rep->rrset_count; i++) {
446 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
447 			->security;
448 		if(s != sec_status_secure) {
449 			memmove(rep->rrsets+i, rep->rrsets+i+1,
450 				sizeof(struct ub_packed_rrset_key*)*
451 				(rep->rrset_count - i - 1));
452 			rep->ar_numrrsets--;
453 			rep->rrset_count--;
454 			i--;
455 		}
456 	}
457 }
458 
459 /** answer nonrecursive query from the cache */
460 static int
461 answer_norec_from_cache(struct worker* worker, struct query_info* qinfo,
462 	uint16_t id, uint16_t flags, struct comm_reply* repinfo,
463 	struct edns_data* edns)
464 {
465 	/* for a nonrecursive query return either:
466 	 * 	o an error (servfail; we try to avoid this)
467 	 * 	o a delegation (closest we have; this routine tries that)
468 	 * 	o the answer (checked by answer_from_cache)
469 	 *
470 	 * So, grab a delegation from the rrset cache.
471 	 * Then check if it needs validation, if so, this routine fails,
472 	 * so that iterator can prime and validator can verify rrsets.
473 	 */
474 	uint16_t udpsize = edns->udp_size;
475 	int secure = 0;
476 	time_t timenow = *worker->env.now;
477 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
478 		&& worker->env.need_to_validate;
479 	struct dns_msg *msg = NULL;
480 	struct delegpt *dp;
481 
482 	dp = dns_cache_find_delegation(&worker->env, qinfo->qname,
483 		qinfo->qname_len, qinfo->qtype, qinfo->qclass,
484 		worker->scratchpad, &msg, timenow);
485 	if(!dp) { /* no delegation, need to reprime */
486 		return 0;
487 	}
488 	if(must_validate) {
489 		switch(check_delegation_secure(msg->rep)) {
490 		case sec_status_unchecked:
491 			/* some rrsets have not been verified yet, go and
492 			 * let validator do that */
493 			return 0;
494 		case sec_status_bogus:
495 			/* some rrsets are bogus, reply servfail */
496 			edns->edns_version = EDNS_ADVERTISED_VERSION;
497 			edns->udp_size = EDNS_ADVERTISED_SIZE;
498 			edns->ext_rcode = 0;
499 			edns->bits &= EDNS_DO;
500 			if(!edns_opt_inplace_reply(edns, worker->scratchpad))
501 				return 0;
502 			error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
503 				&msg->qinfo, id, flags, edns);
504 			if(worker->stats.extended) {
505 				worker->stats.ans_bogus++;
506 				worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL]++;
507 			}
508 			return 1;
509 		case sec_status_secure:
510 			/* all rrsets are secure */
511 			/* remove non-secure rrsets from the add. section*/
512 			if(worker->env.cfg->val_clean_additional)
513 				deleg_remove_nonsecure_additional(msg->rep);
514 			secure = 1;
515 			break;
516 		case sec_status_indeterminate:
517 		case sec_status_insecure:
518 		default:
519 			/* not secure */
520 			secure = 0;
521 			break;
522 		}
523 	}
524 	/* return this delegation from the cache */
525 	edns->edns_version = EDNS_ADVERTISED_VERSION;
526 	edns->udp_size = EDNS_ADVERTISED_SIZE;
527 	edns->ext_rcode = 0;
528 	edns->bits &= EDNS_DO;
529 	if(!edns_opt_inplace_reply(edns, worker->scratchpad))
530 		return 0;
531 	msg->rep->flags |= BIT_QR|BIT_RA;
532 	if(!reply_info_answer_encode(&msg->qinfo, msg->rep, id, flags,
533 		repinfo->c->buffer, 0, 1, worker->scratchpad,
534 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
535 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
536 			&msg->qinfo, id, flags, edns);
537 	}
538 	if(worker->stats.extended) {
539 		if(secure) worker->stats.ans_secure++;
540 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
541 	}
542 	return 1;
543 }
544 
545 /** answer query from the cache */
546 static int
547 answer_from_cache(struct worker* worker, struct query_info* qinfo,
548 	struct reply_info* rep, uint16_t id, uint16_t flags,
549 	struct comm_reply* repinfo, struct edns_data* edns)
550 {
551 	time_t timenow = *worker->env.now;
552 	uint16_t udpsize = edns->udp_size;
553 	int secure;
554 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
555 		&& worker->env.need_to_validate;
556 	/* see if it is possible */
557 	if(rep->ttl < timenow) {
558 		/* the rrsets may have been updated in the meantime.
559 		 * we will refetch the message format from the
560 		 * authoritative server
561 		 */
562 		return 0;
563 	}
564 	if(!rrset_array_lock(rep->ref, rep->rrset_count, timenow))
565 		return 0;
566 	/* locked and ids and ttls are OK. */
567 	/* check CNAME chain (if any) */
568 	if(rep->an_numrrsets > 0 && (rep->rrsets[0]->rk.type ==
569 		htons(LDNS_RR_TYPE_CNAME) || rep->rrsets[0]->rk.type ==
570 		htons(LDNS_RR_TYPE_DNAME))) {
571 		if(!reply_check_cname_chain(qinfo, rep)) {
572 			/* cname chain invalid, redo iterator steps */
573 			verbose(VERB_ALGO, "Cache reply: cname chain broken");
574 		bail_out:
575 			rrset_array_unlock_touch(worker->env.rrset_cache,
576 				worker->scratchpad, rep->ref, rep->rrset_count);
577 			return 0;
578 		}
579 	}
580 	/* check security status of the cached answer */
581 	if( rep->security == sec_status_bogus && must_validate) {
582 		/* BAD cached */
583 		edns->edns_version = EDNS_ADVERTISED_VERSION;
584 		edns->udp_size = EDNS_ADVERTISED_SIZE;
585 		edns->ext_rcode = 0;
586 		edns->bits &= EDNS_DO;
587 		if(!edns_opt_inplace_reply(edns, worker->scratchpad))
588 			return 0;
589 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
590 			qinfo, id, flags, edns);
591 		rrset_array_unlock_touch(worker->env.rrset_cache,
592 			worker->scratchpad, rep->ref, rep->rrset_count);
593 		if(worker->stats.extended) {
594 			worker->stats.ans_bogus ++;
595 			worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL] ++;
596 		}
597 		return 1;
598 	} else if( rep->security == sec_status_unchecked && must_validate) {
599 		verbose(VERB_ALGO, "Cache reply: unchecked entry needs "
600 			"validation");
601 		goto bail_out; /* need to validate cache entry first */
602 	} else if(rep->security == sec_status_secure) {
603 		if(reply_all_rrsets_secure(rep))
604 			secure = 1;
605 		else	{
606 			if(must_validate) {
607 				verbose(VERB_ALGO, "Cache reply: secure entry"
608 					" changed status");
609 				goto bail_out; /* rrset changed, re-verify */
610 			}
611 			secure = 0;
612 		}
613 	} else	secure = 0;
614 
615 	edns->edns_version = EDNS_ADVERTISED_VERSION;
616 	edns->udp_size = EDNS_ADVERTISED_SIZE;
617 	edns->ext_rcode = 0;
618 	edns->bits &= EDNS_DO;
619 	if(!edns_opt_inplace_reply(edns, worker->scratchpad))
620 		return 0;
621 	if(!reply_info_answer_encode(qinfo, rep, id, flags,
622 		repinfo->c->buffer, timenow, 1, worker->scratchpad,
623 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
624 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
625 			qinfo, id, flags, edns);
626 	}
627 	/* cannot send the reply right now, because blocking network syscall
628 	 * is bad while holding locks. */
629 	rrset_array_unlock_touch(worker->env.rrset_cache, worker->scratchpad,
630 		rep->ref, rep->rrset_count);
631 	if(worker->stats.extended) {
632 		if(secure) worker->stats.ans_secure++;
633 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
634 	}
635 	/* go and return this buffer to the client */
636 	return 1;
637 }
638 
639 /** Reply to client and perform prefetch to keep cache up to date */
640 static void
641 reply_and_prefetch(struct worker* worker, struct query_info* qinfo,
642 	uint16_t flags, struct comm_reply* repinfo, time_t leeway)
643 {
644 	/* first send answer to client to keep its latency
645 	 * as small as a cachereply */
646 	comm_point_send_reply(repinfo);
647 	server_stats_prefetch(&worker->stats, worker);
648 
649 	/* create the prefetch in the mesh as a normal lookup without
650 	 * client addrs waiting, which has the cache blacklisted (to bypass
651 	 * the cache and go to the network for the data). */
652 	/* this (potentially) runs the mesh for the new query */
653 	mesh_new_prefetch(worker->env.mesh, qinfo, flags, leeway +
654 		PREFETCH_EXPIRY_ADD);
655 }
656 
657 /**
658  * Fill CH class answer into buffer. Keeps query.
659  * @param pkt: buffer
660  * @param str: string to put into text record (<255).
661  * @param edns: edns reply information.
662  * @param worker: worker with scratch region.
663  */
664 static void
665 chaos_replystr(sldns_buffer* pkt, const char* str, struct edns_data* edns,
666 	struct worker* worker)
667 {
668 	size_t len = strlen(str);
669 	unsigned int rd = LDNS_RD_WIRE(sldns_buffer_begin(pkt));
670 	unsigned int cd = LDNS_CD_WIRE(sldns_buffer_begin(pkt));
671 	if(len>255) len=255; /* cap size of TXT record */
672 	sldns_buffer_clear(pkt);
673 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip id */
674 	sldns_buffer_write_u16(pkt, (uint16_t)(BIT_QR|BIT_RA));
675 	if(rd) LDNS_RD_SET(sldns_buffer_begin(pkt));
676 	if(cd) LDNS_CD_SET(sldns_buffer_begin(pkt));
677 	sldns_buffer_write_u16(pkt, 1); /* qdcount */
678 	sldns_buffer_write_u16(pkt, 1); /* ancount */
679 	sldns_buffer_write_u16(pkt, 0); /* nscount */
680 	sldns_buffer_write_u16(pkt, 0); /* arcount */
681 	(void)query_dname_len(pkt); /* skip qname */
682 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qtype */
683 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qclass */
684 	sldns_buffer_write_u16(pkt, 0xc00c); /* compr ptr to query */
685 	sldns_buffer_write_u16(pkt, LDNS_RR_TYPE_TXT);
686 	sldns_buffer_write_u16(pkt, LDNS_RR_CLASS_CH);
687 	sldns_buffer_write_u32(pkt, 0); /* TTL */
688 	sldns_buffer_write_u16(pkt, sizeof(uint8_t) + len);
689 	sldns_buffer_write_u8(pkt, len);
690 	sldns_buffer_write(pkt, str, len);
691 	sldns_buffer_flip(pkt);
692 	edns->edns_version = EDNS_ADVERTISED_VERSION;
693 	edns->udp_size = EDNS_ADVERTISED_SIZE;
694 	edns->bits &= EDNS_DO;
695 	if(!edns_opt_inplace_reply(edns, worker->scratchpad))
696 		edns->opt_list = NULL;
697 	attach_edns_record(pkt, edns);
698 }
699 
700 /**
701  * Answer CH class queries.
702  * @param w: worker
703  * @param qinfo: query info. Pointer into packet buffer.
704  * @param edns: edns info from query.
705  * @param pkt: packet buffer.
706  * @return: true if a reply is to be sent.
707  */
708 static int
709 answer_chaos(struct worker* w, struct query_info* qinfo,
710 	struct edns_data* edns, sldns_buffer* pkt)
711 {
712 	struct config_file* cfg = w->env.cfg;
713 	if(qinfo->qtype != LDNS_RR_TYPE_ANY && qinfo->qtype != LDNS_RR_TYPE_TXT)
714 		return 0;
715 	if(query_dname_compare(qinfo->qname,
716 		(uint8_t*)"\002id\006server") == 0 ||
717 		query_dname_compare(qinfo->qname,
718 		(uint8_t*)"\010hostname\004bind") == 0)
719 	{
720 		if(cfg->hide_identity)
721 			return 0;
722 		if(cfg->identity==NULL || cfg->identity[0]==0) {
723 			char buf[MAXHOSTNAMELEN+1];
724 			if (gethostname(buf, MAXHOSTNAMELEN) == 0) {
725 				buf[MAXHOSTNAMELEN] = 0;
726 				chaos_replystr(pkt, buf, edns, w);
727 			} else 	{
728 				log_err("gethostname: %s", strerror(errno));
729 				chaos_replystr(pkt, "no hostname", edns, w);
730 			}
731 		}
732 		else 	chaos_replystr(pkt, cfg->identity, edns, w);
733 		return 1;
734 	}
735 	if(query_dname_compare(qinfo->qname,
736 		(uint8_t*)"\007version\006server") == 0 ||
737 		query_dname_compare(qinfo->qname,
738 		(uint8_t*)"\007version\004bind") == 0)
739 	{
740 		if(cfg->hide_version)
741 			return 0;
742 		if(cfg->version==NULL || cfg->version[0]==0)
743 			chaos_replystr(pkt, PACKAGE_STRING, edns, w);
744 		else 	chaos_replystr(pkt, cfg->version, edns, w);
745 		return 1;
746 	}
747 	return 0;
748 }
749 
750 static int
751 deny_refuse(struct comm_point* c, enum acl_access acl,
752 	enum acl_access deny, enum acl_access refuse,
753 	struct worker* worker, struct comm_reply* repinfo)
754 {
755 	if(acl == deny) {
756 		comm_point_drop_reply(repinfo);
757 		if(worker->stats.extended)
758 			worker->stats.unwanted_queries++;
759 		return 0;
760 	} else if(acl == refuse) {
761 		log_addr(VERB_ALGO, "refused query from",
762 			&repinfo->addr, repinfo->addrlen);
763 		log_buf(VERB_ALGO, "refuse", c->buffer);
764 		if(worker->stats.extended)
765 			worker->stats.unwanted_queries++;
766 		if(worker_check_request(c->buffer, worker) == -1) {
767 			comm_point_drop_reply(repinfo);
768 			return 0; /* discard this */
769 		}
770 		sldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
771 		sldns_buffer_write_at(c->buffer, 4,
772 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
773 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
774 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
775 			LDNS_RCODE_REFUSED);
776 		sldns_buffer_set_position(c->buffer, LDNS_HEADER_SIZE);
777 		sldns_buffer_flip(c->buffer);
778 		return 1;
779 	}
780 
781 	return -1;
782 }
783 
784 static int
785 deny_refuse_all(struct comm_point* c, enum acl_access acl,
786 	struct worker* worker, struct comm_reply* repinfo)
787 {
788 	return deny_refuse(c, acl, acl_deny, acl_refuse, worker, repinfo);
789 }
790 
791 static int
792 deny_refuse_non_local(struct comm_point* c, enum acl_access acl,
793 	struct worker* worker, struct comm_reply* repinfo)
794 {
795 	return deny_refuse(c, acl, acl_deny_non_local, acl_refuse_non_local, worker, repinfo);
796 }
797 
798 int
799 worker_handle_request(struct comm_point* c, void* arg, int error,
800 	struct comm_reply* repinfo)
801 {
802 	struct worker* worker = (struct worker*)arg;
803 	int ret;
804 	hashvalue_t h;
805 	struct lruhash_entry* e;
806 	struct query_info qinfo;
807 	struct edns_data edns;
808 	enum acl_access acl;
809 	struct acl_addr* acladdr;
810 	int rc = 0;
811 
812 	if(error != NETEVENT_NOERROR) {
813 		/* some bad tcp query DNS formats give these error calls */
814 		verbose(VERB_ALGO, "handle request called with err=%d", error);
815 		return 0;
816 	}
817 #ifdef USE_DNSTAP
818 	if(worker->dtenv.log_client_query_messages)
819 		dt_msg_send_client_query(&worker->dtenv, &repinfo->addr, c->type,
820 			c->buffer);
821 #endif
822 	acladdr = acl_addr_lookup(worker->daemon->acl, &repinfo->addr,
823 		repinfo->addrlen);
824 	acl = acl_get_control(acladdr);
825 	if((ret=deny_refuse_all(c, acl, worker, repinfo)) != -1)
826 	{
827 		if(ret == 1)
828 			goto send_reply;
829 		return ret;
830 	}
831 	if((ret=worker_check_request(c->buffer, worker)) != 0) {
832 		verbose(VERB_ALGO, "worker check request: bad query.");
833 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
834 		if(ret != -1) {
835 			LDNS_QR_SET(sldns_buffer_begin(c->buffer));
836 			LDNS_RCODE_SET(sldns_buffer_begin(c->buffer), ret);
837 			return 1;
838 		}
839 		comm_point_drop_reply(repinfo);
840 		return 0;
841 	}
842 	worker->stats.num_queries++;
843 	/* see if query is in the cache */
844 	if(!query_info_parse(&qinfo, c->buffer)) {
845 		verbose(VERB_ALGO, "worker parse request: formerror.");
846 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
847 		if(worker_err_ratelimit(worker, LDNS_RCODE_FORMERR) == -1) {
848 			comm_point_drop_reply(repinfo);
849 			return 0;
850 		}
851 		sldns_buffer_rewind(c->buffer);
852 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
853 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
854 			LDNS_RCODE_FORMERR);
855 		server_stats_insrcode(&worker->stats, c->buffer);
856 		goto send_reply;
857 	}
858 	if(worker->env.cfg->log_queries) {
859 		char ip[128];
860 		addr_to_str(&repinfo->addr, repinfo->addrlen, ip, sizeof(ip));
861 		log_nametypeclass(0, ip, qinfo.qname, qinfo.qtype, qinfo.qclass);
862 	}
863 	if(qinfo.qtype == LDNS_RR_TYPE_AXFR ||
864 		qinfo.qtype == LDNS_RR_TYPE_IXFR) {
865 		verbose(VERB_ALGO, "worker request: refused zone transfer.");
866 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
867 		sldns_buffer_rewind(c->buffer);
868 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
869 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
870 			LDNS_RCODE_REFUSED);
871 		if(worker->stats.extended) {
872 			worker->stats.qtype[qinfo.qtype]++;
873 			server_stats_insrcode(&worker->stats, c->buffer);
874 		}
875 		goto send_reply;
876 	}
877 	if((ret=parse_edns_from_pkt(c->buffer, &edns, worker->scratchpad)) != 0) {
878 		struct edns_data reply_edns;
879 		verbose(VERB_ALGO, "worker parse edns: formerror.");
880 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
881 		memset(&reply_edns, 0, sizeof(reply_edns));
882 		reply_edns.edns_present = 1;
883 		reply_edns.udp_size = EDNS_ADVERTISED_SIZE;
884 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer), ret);
885 		error_encode(c->buffer, ret, &qinfo,
886 			*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
887 			sldns_buffer_read_u16_at(c->buffer, 2), &reply_edns);
888 		regional_free_all(worker->scratchpad);
889 		server_stats_insrcode(&worker->stats, c->buffer);
890 		goto send_reply;
891 	}
892 	if(edns.edns_present && edns.edns_version != 0) {
893 		edns.ext_rcode = (uint8_t)(EDNS_RCODE_BADVERS>>4);
894 		edns.edns_version = EDNS_ADVERTISED_VERSION;
895 		edns.udp_size = EDNS_ADVERTISED_SIZE;
896 		edns.bits &= EDNS_DO;
897 		edns.opt_list = NULL;
898 		verbose(VERB_ALGO, "query with bad edns version.");
899 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
900 		error_encode(c->buffer, EDNS_RCODE_BADVERS&0xf, &qinfo,
901 			*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
902 			sldns_buffer_read_u16_at(c->buffer, 2), NULL);
903 		attach_edns_record(c->buffer, &edns);
904 		regional_free_all(worker->scratchpad);
905 		goto send_reply;
906 	}
907 	if(edns.edns_present && edns.udp_size < NORMAL_UDP_SIZE &&
908 		worker->daemon->cfg->harden_short_bufsize) {
909 		verbose(VERB_QUERY, "worker request: EDNS bufsize %d ignored",
910 			(int)edns.udp_size);
911 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
912 		edns.udp_size = NORMAL_UDP_SIZE;
913 	}
914 	if(edns.udp_size > worker->daemon->cfg->max_udp_size &&
915 		c->type == comm_udp) {
916 		verbose(VERB_QUERY,
917 			"worker request: max UDP reply size modified"
918 			" (%d to max-udp-size)", (int)edns.udp_size);
919 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
920 		edns.udp_size = worker->daemon->cfg->max_udp_size;
921 	}
922 	if(edns.udp_size < LDNS_HEADER_SIZE) {
923 		verbose(VERB_ALGO, "worker request: edns is too small.");
924 		log_addr(VERB_CLIENT, "from", &repinfo->addr, repinfo->addrlen);
925 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
926 		LDNS_TC_SET(sldns_buffer_begin(c->buffer));
927 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
928 			LDNS_RCODE_SERVFAIL);
929 		sldns_buffer_set_position(c->buffer, LDNS_HEADER_SIZE);
930 		sldns_buffer_write_at(c->buffer, 4,
931 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
932 		sldns_buffer_flip(c->buffer);
933 		regional_free_all(worker->scratchpad);
934 		goto send_reply;
935 	}
936 	if(worker->stats.extended)
937 		server_stats_insquery(&worker->stats, c, qinfo.qtype,
938 			qinfo.qclass, &edns, repinfo);
939 	if(c->type != comm_udp)
940 		edns.udp_size = 65535; /* max size for TCP replies */
941 	if(qinfo.qclass == LDNS_RR_CLASS_CH && answer_chaos(worker, &qinfo,
942 		&edns, c->buffer)) {
943 		server_stats_insrcode(&worker->stats, c->buffer);
944 		regional_free_all(worker->scratchpad);
945 		goto send_reply;
946 	}
947 	if(local_zones_answer(worker->daemon->local_zones, &qinfo, &edns,
948 		c->buffer, worker->scratchpad, repinfo,
949 		acladdr->taglist, acladdr->taglen, acladdr->tag_actions,
950 		acladdr->tag_actions_size, acladdr->tag_datas,
951 		acladdr->tag_datas_size, worker->daemon->cfg->tagname,
952 		worker->daemon->cfg->num_tags)) {
953 		regional_free_all(worker->scratchpad);
954 		if(sldns_buffer_limit(c->buffer) == 0) {
955 			comm_point_drop_reply(repinfo);
956 			return 0;
957 		}
958 		server_stats_insrcode(&worker->stats, c->buffer);
959 		goto send_reply;
960 	}
961 
962 	/* We've looked in our local zones. If the answer isn't there, we
963 	 * might need to bail out based on ACLs now. */
964 	if((ret=deny_refuse_non_local(c, acl, worker, repinfo)) != -1)
965 	{
966 		regional_free_all(worker->scratchpad);
967 		if(ret == 1)
968 			goto send_reply;
969 		return ret;
970 	}
971 
972 	/* If this request does not have the recursion bit set, verify
973 	 * ACLs allow the snooping. */
974 	if(!(LDNS_RD_WIRE(sldns_buffer_begin(c->buffer))) &&
975 		acl != acl_allow_snoop ) {
976 		sldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
977 		sldns_buffer_write_at(c->buffer, 4,
978 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
979 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
980 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
981 			LDNS_RCODE_REFUSED);
982 		sldns_buffer_flip(c->buffer);
983 		regional_free_all(worker->scratchpad);
984 		server_stats_insrcode(&worker->stats, c->buffer);
985 		log_addr(VERB_ALGO, "refused nonrec (cache snoop) query from",
986 			&repinfo->addr, repinfo->addrlen);
987 		goto send_reply;
988 	}
989 	h = query_info_hash(&qinfo, sldns_buffer_read_u16_at(c->buffer, 2));
990 	if((e=slabhash_lookup(worker->env.msg_cache, h, &qinfo, 0))) {
991 		/* answer from cache - we have acquired a readlock on it */
992 		if(answer_from_cache(worker, &qinfo,
993 			(struct reply_info*)e->data,
994 			*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
995 			sldns_buffer_read_u16_at(c->buffer, 2), repinfo,
996 			&edns)) {
997 			/* prefetch it if the prefetch TTL expired */
998 			if(worker->env.cfg->prefetch && *worker->env.now >=
999 				((struct reply_info*)e->data)->prefetch_ttl) {
1000 				time_t leeway = ((struct reply_info*)e->
1001 					data)->ttl - *worker->env.now;
1002 				lock_rw_unlock(&e->lock);
1003 				reply_and_prefetch(worker, &qinfo,
1004 					sldns_buffer_read_u16_at(c->buffer, 2),
1005 					repinfo, leeway);
1006 				rc = 0;
1007 				regional_free_all(worker->scratchpad);
1008 				goto send_reply_rc;
1009 			}
1010 			lock_rw_unlock(&e->lock);
1011 			regional_free_all(worker->scratchpad);
1012 			goto send_reply;
1013 		}
1014 		verbose(VERB_ALGO, "answer from the cache failed");
1015 		lock_rw_unlock(&e->lock);
1016 	}
1017 	if(!LDNS_RD_WIRE(sldns_buffer_begin(c->buffer))) {
1018 		if(answer_norec_from_cache(worker, &qinfo,
1019 			*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
1020 			sldns_buffer_read_u16_at(c->buffer, 2), repinfo,
1021 			&edns)) {
1022 			regional_free_all(worker->scratchpad);
1023 			goto send_reply;
1024 		}
1025 		verbose(VERB_ALGO, "answer norec from cache -- "
1026 			"need to validate or not primed");
1027 	}
1028 	sldns_buffer_rewind(c->buffer);
1029 	server_stats_querymiss(&worker->stats, worker);
1030 
1031 	if(verbosity >= VERB_CLIENT) {
1032 		if(c->type == comm_udp)
1033 			log_addr(VERB_CLIENT, "udp request from",
1034 				&repinfo->addr, repinfo->addrlen);
1035 		else	log_addr(VERB_CLIENT, "tcp request from",
1036 				&repinfo->addr, repinfo->addrlen);
1037 	}
1038 
1039 	/* grab a work request structure for this new request */
1040 	mesh_new_client(worker->env.mesh, &qinfo,
1041 		sldns_buffer_read_u16_at(c->buffer, 2),
1042 		&edns, repinfo, *(uint16_t*)(void *)sldns_buffer_begin(c->buffer));
1043 	regional_free_all(worker->scratchpad);
1044 	worker_mem_report(worker, NULL);
1045 	return 0;
1046 
1047 send_reply:
1048 	rc = 1;
1049 send_reply_rc:
1050 #ifdef USE_DNSTAP
1051 	if(worker->dtenv.log_client_response_messages)
1052 		dt_msg_send_client_response(&worker->dtenv, &repinfo->addr,
1053 			c->type, c->buffer);
1054 #endif
1055 	return rc;
1056 }
1057 
1058 void
1059 worker_sighandler(int sig, void* arg)
1060 {
1061 	/* note that log, print, syscalls here give race conditions.
1062 	 * And cause hangups if the log-lock is held by the application. */
1063 	struct worker* worker = (struct worker*)arg;
1064 	switch(sig) {
1065 #ifdef SIGHUP
1066 		case SIGHUP:
1067 			comm_base_exit(worker->base);
1068 			break;
1069 #endif
1070 		case SIGINT:
1071 			worker->need_to_exit = 1;
1072 			comm_base_exit(worker->base);
1073 			break;
1074 #ifdef SIGQUIT
1075 		case SIGQUIT:
1076 			worker->need_to_exit = 1;
1077 			comm_base_exit(worker->base);
1078 			break;
1079 #endif
1080 		case SIGTERM:
1081 			worker->need_to_exit = 1;
1082 			comm_base_exit(worker->base);
1083 			break;
1084 		default:
1085 			/* unknown signal, ignored */
1086 			break;
1087 	}
1088 }
1089 
1090 /** restart statistics timer for worker, if enabled */
1091 static void
1092 worker_restart_timer(struct worker* worker)
1093 {
1094 	if(worker->env.cfg->stat_interval > 0) {
1095 		struct timeval tv;
1096 #ifndef S_SPLINT_S
1097 		tv.tv_sec = worker->env.cfg->stat_interval;
1098 		tv.tv_usec = 0;
1099 #endif
1100 		comm_timer_set(worker->stat_timer, &tv);
1101 	}
1102 }
1103 
1104 void worker_stat_timer_cb(void* arg)
1105 {
1106 	struct worker* worker = (struct worker*)arg;
1107 	server_stats_log(&worker->stats, worker, worker->thread_num);
1108 	mesh_stats(worker->env.mesh, "mesh has");
1109 	worker_mem_report(worker, NULL);
1110 	if(!worker->daemon->cfg->stat_cumulative) {
1111 		worker_stats_clear(worker);
1112 	}
1113 	/* start next timer */
1114 	worker_restart_timer(worker);
1115 }
1116 
1117 void worker_probe_timer_cb(void* arg)
1118 {
1119 	struct worker* worker = (struct worker*)arg;
1120 	struct timeval tv;
1121 #ifndef S_SPLINT_S
1122 	tv.tv_sec = (time_t)autr_probe_timer(&worker->env);
1123 	tv.tv_usec = 0;
1124 #endif
1125 	if(tv.tv_sec != 0)
1126 		comm_timer_set(worker->env.probe_timer, &tv);
1127 }
1128 
1129 struct worker*
1130 worker_create(struct daemon* daemon, int id, int* ports, int n)
1131 {
1132 	unsigned int seed;
1133 	struct worker* worker = (struct worker*)calloc(1,
1134 		sizeof(struct worker));
1135 	if(!worker)
1136 		return NULL;
1137 	worker->numports = n;
1138 	worker->ports = (int*)memdup(ports, sizeof(int)*n);
1139 	if(!worker->ports) {
1140 		free(worker);
1141 		return NULL;
1142 	}
1143 	worker->daemon = daemon;
1144 	worker->thread_num = id;
1145 	if(!(worker->cmd = tube_create())) {
1146 		free(worker->ports);
1147 		free(worker);
1148 		return NULL;
1149 	}
1150 	/* create random state here to avoid locking trouble in RAND_bytes */
1151 	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
1152 		(((unsigned int)worker->thread_num)<<17);
1153 		/* shift thread_num so it does not match out pid bits */
1154 	if(!(worker->rndstate = ub_initstate(seed, daemon->rand))) {
1155 		seed = 0;
1156 		log_err("could not init random numbers.");
1157 		tube_delete(worker->cmd);
1158 		free(worker->ports);
1159 		free(worker);
1160 		return NULL;
1161 	}
1162 	seed = 0;
1163 #ifdef USE_DNSTAP
1164 	if(daemon->cfg->dnstap) {
1165 		log_assert(daemon->dtenv != NULL);
1166 		memcpy(&worker->dtenv, daemon->dtenv, sizeof(struct dt_env));
1167 		if(!dt_init(&worker->dtenv))
1168 			fatal_exit("dt_init failed");
1169 	}
1170 #endif
1171 	return worker;
1172 }
1173 
1174 int
1175 worker_init(struct worker* worker, struct config_file *cfg,
1176 	struct listen_port* ports, int do_sigs)
1177 {
1178 #ifdef USE_DNSTAP
1179 	struct dt_env* dtenv = &worker->dtenv;
1180 #else
1181 	void* dtenv = NULL;
1182 #endif
1183 	worker->need_to_exit = 0;
1184 	worker->base = comm_base_create(do_sigs);
1185 	if(!worker->base) {
1186 		log_err("could not create event handling base");
1187 		worker_delete(worker);
1188 		return 0;
1189 	}
1190 	comm_base_set_slow_accept_handlers(worker->base, &worker_stop_accept,
1191 		&worker_start_accept, worker);
1192 	if(do_sigs) {
1193 #ifdef SIGHUP
1194 		ub_thread_sig_unblock(SIGHUP);
1195 #endif
1196 		ub_thread_sig_unblock(SIGINT);
1197 #ifdef SIGQUIT
1198 		ub_thread_sig_unblock(SIGQUIT);
1199 #endif
1200 		ub_thread_sig_unblock(SIGTERM);
1201 #ifndef LIBEVENT_SIGNAL_PROBLEM
1202 		worker->comsig = comm_signal_create(worker->base,
1203 			worker_sighandler, worker);
1204 		if(!worker->comsig
1205 #ifdef SIGHUP
1206 			|| !comm_signal_bind(worker->comsig, SIGHUP)
1207 #endif
1208 #ifdef SIGQUIT
1209 			|| !comm_signal_bind(worker->comsig, SIGQUIT)
1210 #endif
1211 			|| !comm_signal_bind(worker->comsig, SIGTERM)
1212 			|| !comm_signal_bind(worker->comsig, SIGINT)) {
1213 			log_err("could not create signal handlers");
1214 			worker_delete(worker);
1215 			return 0;
1216 		}
1217 #endif /* LIBEVENT_SIGNAL_PROBLEM */
1218 		if(!daemon_remote_open_accept(worker->daemon->rc,
1219 			worker->daemon->rc_ports, worker)) {
1220 			worker_delete(worker);
1221 			return 0;
1222 		}
1223 #ifdef UB_ON_WINDOWS
1224 		wsvc_setup_worker(worker);
1225 #endif /* UB_ON_WINDOWS */
1226 	} else { /* !do_sigs */
1227 		worker->comsig = NULL;
1228 	}
1229 	worker->front = listen_create(worker->base, ports,
1230 		cfg->msg_buffer_size, (int)cfg->incoming_num_tcp,
1231 		worker->daemon->listen_sslctx, dtenv, worker_handle_request,
1232 		worker);
1233 	if(!worker->front) {
1234 		log_err("could not create listening sockets");
1235 		worker_delete(worker);
1236 		return 0;
1237 	}
1238 	worker->back = outside_network_create(worker->base,
1239 		cfg->msg_buffer_size, (size_t)cfg->outgoing_num_ports,
1240 		cfg->out_ifs, cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
1241 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
1242 		worker->daemon->env->infra_cache, worker->rndstate,
1243 		cfg->use_caps_bits_for_id, worker->ports, worker->numports,
1244 		cfg->unwanted_threshold, cfg->outgoing_tcp_mss,
1245 		&worker_alloc_cleanup, worker,
1246 		cfg->do_udp, worker->daemon->connect_sslctx, cfg->delay_close,
1247 		dtenv);
1248 	if(!worker->back) {
1249 		log_err("could not create outgoing sockets");
1250 		worker_delete(worker);
1251 		return 0;
1252 	}
1253 	/* start listening to commands */
1254 	if(!tube_setup_bg_listen(worker->cmd, worker->base,
1255 		&worker_handle_control_cmd, worker)) {
1256 		log_err("could not create control compt.");
1257 		worker_delete(worker);
1258 		return 0;
1259 	}
1260 	worker->stat_timer = comm_timer_create(worker->base,
1261 		worker_stat_timer_cb, worker);
1262 	if(!worker->stat_timer) {
1263 		log_err("could not create statistics timer");
1264 	}
1265 
1266 	/* we use the msg_buffer_size as a good estimate for what the
1267 	 * user wants for memory usage sizes */
1268 	worker->scratchpad = regional_create_custom(cfg->msg_buffer_size);
1269 	if(!worker->scratchpad) {
1270 		log_err("malloc failure");
1271 		worker_delete(worker);
1272 		return 0;
1273 	}
1274 
1275 	server_stats_init(&worker->stats, cfg);
1276 	alloc_init(&worker->alloc, &worker->daemon->superalloc,
1277 		worker->thread_num);
1278 	alloc_set_id_cleanup(&worker->alloc, &worker_alloc_cleanup, worker);
1279 	worker->env = *worker->daemon->env;
1280 	comm_base_timept(worker->base, &worker->env.now, &worker->env.now_tv);
1281 	if(worker->thread_num == 0)
1282 		log_set_time(worker->env.now);
1283 	worker->env.worker = worker;
1284 	worker->env.send_query = &worker_send_query;
1285 	worker->env.alloc = &worker->alloc;
1286 	worker->env.rnd = worker->rndstate;
1287 	worker->env.scratch = worker->scratchpad;
1288 	worker->env.mesh = mesh_create(&worker->daemon->mods, &worker->env);
1289 	worker->env.detach_subs = &mesh_detach_subs;
1290 	worker->env.attach_sub = &mesh_attach_sub;
1291 	worker->env.kill_sub = &mesh_state_delete;
1292 	worker->env.detect_cycle = &mesh_detect_cycle;
1293 	worker->env.scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
1294 	if(!(worker->env.fwds = forwards_create()) ||
1295 		!forwards_apply_cfg(worker->env.fwds, cfg)) {
1296 		log_err("Could not set forward zones");
1297 		worker_delete(worker);
1298 		return 0;
1299 	}
1300 	if(!(worker->env.hints = hints_create()) ||
1301 		!hints_apply_cfg(worker->env.hints, cfg)) {
1302 		log_err("Could not set root or stub hints");
1303 		worker_delete(worker);
1304 		return 0;
1305 	}
1306 	/* one probe timer per process -- if we have 5011 anchors */
1307 	if(autr_get_num_anchors(worker->env.anchors) > 0
1308 #ifndef THREADS_DISABLED
1309 		&& worker->thread_num == 0
1310 #endif
1311 		) {
1312 		struct timeval tv;
1313 		tv.tv_sec = 0;
1314 		tv.tv_usec = 0;
1315 		worker->env.probe_timer = comm_timer_create(worker->base,
1316 			worker_probe_timer_cb, worker);
1317 		if(!worker->env.probe_timer) {
1318 			log_err("could not create 5011-probe timer");
1319 		} else {
1320 			/* let timer fire, then it can reset itself */
1321 			comm_timer_set(worker->env.probe_timer, &tv);
1322 		}
1323 	}
1324 	if(!worker->env.mesh || !worker->env.scratch_buffer) {
1325 		worker_delete(worker);
1326 		return 0;
1327 	}
1328 	worker_mem_report(worker, NULL);
1329 	/* if statistics enabled start timer */
1330 	if(worker->env.cfg->stat_interval > 0) {
1331 		verbose(VERB_ALGO, "set statistics interval %d secs",
1332 			worker->env.cfg->stat_interval);
1333 		worker_restart_timer(worker);
1334 	}
1335 	return 1;
1336 }
1337 
1338 void
1339 worker_work(struct worker* worker)
1340 {
1341 	comm_base_dispatch(worker->base);
1342 }
1343 
1344 void
1345 worker_delete(struct worker* worker)
1346 {
1347 	if(!worker)
1348 		return;
1349 	if(worker->env.mesh && verbosity >= VERB_OPS) {
1350 		server_stats_log(&worker->stats, worker, worker->thread_num);
1351 		mesh_stats(worker->env.mesh, "mesh has");
1352 		worker_mem_report(worker, NULL);
1353 	}
1354 	outside_network_quit_prepare(worker->back);
1355 	mesh_delete(worker->env.mesh);
1356 	sldns_buffer_free(worker->env.scratch_buffer);
1357 	forwards_delete(worker->env.fwds);
1358 	hints_delete(worker->env.hints);
1359 	listen_delete(worker->front);
1360 	outside_network_delete(worker->back);
1361 	comm_signal_delete(worker->comsig);
1362 	tube_delete(worker->cmd);
1363 	comm_timer_delete(worker->stat_timer);
1364 	comm_timer_delete(worker->env.probe_timer);
1365 	free(worker->ports);
1366 	if(worker->thread_num == 0) {
1367 		log_set_time(NULL);
1368 #ifdef UB_ON_WINDOWS
1369 		wsvc_desetup_worker(worker);
1370 #endif /* UB_ON_WINDOWS */
1371 	}
1372 	comm_base_delete(worker->base);
1373 	ub_randfree(worker->rndstate);
1374 	alloc_clear(&worker->alloc);
1375 	regional_destroy(worker->scratchpad);
1376 	free(worker);
1377 }
1378 
1379 struct outbound_entry*
1380 worker_send_query(uint8_t* qname, size_t qnamelen, uint16_t qtype,
1381 	uint16_t qclass, uint16_t flags, int dnssec, int want_dnssec,
1382 	int nocaps, struct edns_option* opt_list,
1383 	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
1384 	size_t zonelen, struct module_qstate* q)
1385 {
1386 	struct worker* worker = q->env->worker;
1387 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
1388 		q->region, sizeof(*e));
1389 	if(!e)
1390 		return NULL;
1391 	e->qstate = q;
1392 	e->qsent = outnet_serviced_query(worker->back, qname,
1393 		qnamelen, qtype, qclass, flags, dnssec, want_dnssec, nocaps,
1394 		q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, opt_list,
1395 		addr, addrlen, zone, zonelen, worker_handle_service_reply, e,
1396 		worker->back->udp_buff);
1397 	if(!e->qsent) {
1398 		return NULL;
1399 	}
1400 	return e;
1401 }
1402 
1403 void
1404 worker_alloc_cleanup(void* arg)
1405 {
1406 	struct worker* worker = (struct worker*)arg;
1407 	slabhash_clear(&worker->env.rrset_cache->table);
1408 	slabhash_clear(worker->env.msg_cache);
1409 }
1410 
1411 void worker_stats_clear(struct worker* worker)
1412 {
1413 	server_stats_init(&worker->stats, worker->env.cfg);
1414 	mesh_stats_clear(worker->env.mesh);
1415 	worker->back->unwanted_replies = 0;
1416 	worker->back->num_tcp_outgoing = 0;
1417 }
1418 
1419 void worker_start_accept(void* arg)
1420 {
1421 	struct worker* worker = (struct worker*)arg;
1422 	listen_start_accept(worker->front);
1423 	if(worker->thread_num == 0)
1424 		daemon_remote_start_accept(worker->daemon->rc);
1425 }
1426 
1427 void worker_stop_accept(void* arg)
1428 {
1429 	struct worker* worker = (struct worker*)arg;
1430 	listen_stop_accept(worker->front);
1431 	if(worker->thread_num == 0)
1432 		daemon_remote_stop_accept(worker->daemon->rc);
1433 }
1434 
1435 /* --- fake callbacks for fptr_wlist to work --- */
1436 struct outbound_entry* libworker_send_query(uint8_t* ATTR_UNUSED(qname),
1437 	size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
1438 	uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
1439 	int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
1440 	int ATTR_UNUSED(nocaps), struct edns_option* ATTR_UNUSED(opt_list),
1441 	struct sockaddr_storage* ATTR_UNUSED(addr),
1442 	socklen_t ATTR_UNUSED(addrlen), uint8_t* ATTR_UNUSED(zone),
1443 	size_t ATTR_UNUSED(zonelen), struct module_qstate* ATTR_UNUSED(q))
1444 {
1445 	log_assert(0);
1446 	return 0;
1447 }
1448 
1449 int libworker_handle_reply(struct comm_point* ATTR_UNUSED(c),
1450 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1451         struct comm_reply* ATTR_UNUSED(reply_info))
1452 {
1453 	log_assert(0);
1454 	return 0;
1455 }
1456 
1457 int libworker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
1458 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1459         struct comm_reply* ATTR_UNUSED(reply_info))
1460 {
1461 	log_assert(0);
1462 	return 0;
1463 }
1464 
1465 void libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
1466         uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
1467         int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
1468 {
1469 	log_assert(0);
1470 }
1471 
1472 void libworker_fg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1473         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1474 	char* ATTR_UNUSED(why_bogus))
1475 {
1476 	log_assert(0);
1477 }
1478 
1479 void libworker_bg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1480         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1481 	char* ATTR_UNUSED(why_bogus))
1482 {
1483 	log_assert(0);
1484 }
1485 
1486 void libworker_event_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1487         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1488 	char* ATTR_UNUSED(why_bogus))
1489 {
1490 	log_assert(0);
1491 }
1492 
1493 int context_query_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1494 {
1495 	log_assert(0);
1496 	return 0;
1497 }
1498 
1499 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1500 {
1501         log_assert(0);
1502         return 0;
1503 }
1504 
1505 int codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1506 {
1507         log_assert(0);
1508         return 0;
1509 }
1510 
1511