xref: /freebsd/contrib/unbound/daemon/worker.c (revision 7e00348e7605b9906601438008341ffc37c00e2c)
1 /*
2  * daemon/worker.c - worker that handles a pending list of requests.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file implements the worker that handles callbacks on events, for
40  * pending requests.
41  */
42 #include "config.h"
43 #include "util/log.h"
44 #include "util/net_help.h"
45 #include "util/random.h"
46 #include "daemon/worker.h"
47 #include "daemon/daemon.h"
48 #include "daemon/remote.h"
49 #include "daemon/acl_list.h"
50 #include "util/netevent.h"
51 #include "util/config_file.h"
52 #include "util/module.h"
53 #include "util/regional.h"
54 #include "util/storage/slabhash.h"
55 #include "services/listen_dnsport.h"
56 #include "services/outside_network.h"
57 #include "services/outbound_list.h"
58 #include "services/cache/rrset.h"
59 #include "services/cache/infra.h"
60 #include "services/cache/dns.h"
61 #include "services/mesh.h"
62 #include "services/localzone.h"
63 #include "util/data/msgparse.h"
64 #include "util/data/msgencode.h"
65 #include "util/data/dname.h"
66 #include "util/fptr_wlist.h"
67 #include "util/tube.h"
68 #include "iterator/iter_fwd.h"
69 #include "iterator/iter_hints.h"
70 #include "validator/autotrust.h"
71 #include "validator/val_anchor.h"
72 #include "libunbound/context.h"
73 #include "libunbound/libworker.h"
74 #include "ldns/sbuffer.h"
75 
76 #ifdef HAVE_SYS_TYPES_H
77 #  include <sys/types.h>
78 #endif
79 #ifdef HAVE_NETDB_H
80 #include <netdb.h>
81 #endif
82 #include <signal.h>
83 #ifdef UB_ON_WINDOWS
84 #include "winrc/win_svc.h"
85 #endif
86 
87 /** Size of an UDP datagram */
88 #define NORMAL_UDP_SIZE	512 /* bytes */
89 
90 /**
91  * seconds to add to prefetch leeway.  This is a TTL that expires old rrsets
92  * earlier than they should in order to put the new update into the cache.
93  * This additional value is to make sure that if not all TTLs are equal in
94  * the message to be updated(and replaced), that rrsets with up to this much
95  * extra TTL are also replaced.  This means that the resulting new message
96  * will have (most likely) this TTL at least, avoiding very small 'split
97  * second' TTLs due to operators choosing relative primes for TTLs (or so).
98  * Also has to be at least one to break ties (and overwrite cached entry).
99  */
100 #define PREFETCH_EXPIRY_ADD 60
101 
102 #ifdef UNBOUND_ALLOC_STATS
103 /** measure memory leakage */
104 static void
105 debug_memleak(size_t accounted, size_t heap,
106 	size_t total_alloc, size_t total_free)
107 {
108 	static int init = 0;
109 	static size_t base_heap, base_accounted, base_alloc, base_free;
110 	size_t base_af, cur_af, grow_af, grow_acc;
111 	if(!init) {
112 		init = 1;
113 		base_heap = heap;
114 		base_accounted = accounted;
115 		base_alloc = total_alloc;
116 		base_free = total_free;
117 	}
118 	base_af = base_alloc - base_free;
119 	cur_af = total_alloc - total_free;
120 	grow_af = cur_af - base_af;
121 	grow_acc = accounted - base_accounted;
122 	log_info("Leakage: %d leaked. growth: %u use, %u acc, %u heap",
123 		(int)(grow_af - grow_acc), (unsigned)grow_af,
124 		(unsigned)grow_acc, (unsigned)(heap - base_heap));
125 }
126 
127 /** give debug heap size indication */
128 static void
129 debug_total_mem(size_t calctotal)
130 {
131 #ifdef HAVE_SBRK
132 	extern void* unbound_start_brk;
133 	extern size_t unbound_mem_alloc, unbound_mem_freed;
134 	void* cur = sbrk(0);
135 	int total = cur-unbound_start_brk;
136 	log_info("Total heap memory estimate: %u  total-alloc: %u  "
137 		"total-free: %u", (unsigned)total,
138 		(unsigned)unbound_mem_alloc, (unsigned)unbound_mem_freed);
139 	debug_memleak(calctotal, (size_t)total,
140 		unbound_mem_alloc, unbound_mem_freed);
141 #else
142 	(void)calctotal;
143 #endif /* HAVE_SBRK */
144 }
145 #endif /* UNBOUND_ALLOC_STATS */
146 
147 /** Report on memory usage by this thread and global */
148 static void
149 worker_mem_report(struct worker* ATTR_UNUSED(worker),
150 	struct serviced_query* ATTR_UNUSED(cur_serv))
151 {
152 #ifdef UNBOUND_ALLOC_STATS
153 	/* debug func in validator module */
154 	size_t total, front, back, mesh, msg, rrset, infra, ac, superac;
155 	size_t me, iter, val, anch;
156 	int i;
157 	if(verbosity < VERB_ALGO)
158 		return;
159 	front = listen_get_mem(worker->front);
160 	back = outnet_get_mem(worker->back);
161 	msg = slabhash_get_mem(worker->env.msg_cache);
162 	rrset = slabhash_get_mem(&worker->env.rrset_cache->table);
163 	infra = infra_get_mem(worker->env.infra_cache);
164 	mesh = mesh_get_mem(worker->env.mesh);
165 	ac = alloc_get_mem(&worker->alloc);
166 	superac = alloc_get_mem(&worker->daemon->superalloc);
167 	anch = anchors_get_mem(worker->env.anchors);
168 	iter = 0;
169 	val = 0;
170 	for(i=0; i<worker->env.mesh->mods.num; i++) {
171 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
172 			mods.mod[i]->get_mem));
173 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
174 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
175 				(&worker->env, i);
176 		else	iter += (*worker->env.mesh->mods.mod[i]->get_mem)
177 				(&worker->env, i);
178 	}
179 	me = sizeof(*worker) + sizeof(*worker->base) + sizeof(*worker->comsig)
180 		+ comm_point_get_mem(worker->cmd_com)
181 		+ sizeof(worker->rndstate)
182 		+ regional_get_mem(worker->scratchpad)
183 		+ sizeof(*worker->env.scratch_buffer)
184 		+ sldns_buffer_capacity(worker->env.scratch_buffer)
185 		+ forwards_get_mem(worker->env.fwds)
186 		+ hints_get_mem(worker->env.hints);
187 	if(worker->thread_num == 0)
188 		me += acl_list_get_mem(worker->daemon->acl);
189 	if(cur_serv) {
190 		me += serviced_get_mem(cur_serv);
191 	}
192 	total = front+back+mesh+msg+rrset+infra+iter+val+ac+superac+me;
193 	log_info("Memory conditions: %u front=%u back=%u mesh=%u msg=%u "
194 		"rrset=%u infra=%u iter=%u val=%u anchors=%u "
195 		"alloccache=%u globalalloccache=%u me=%u",
196 		(unsigned)total, (unsigned)front, (unsigned)back,
197 		(unsigned)mesh, (unsigned)msg, (unsigned)rrset,
198 		(unsigned)infra, (unsigned)iter, (unsigned)val, (unsigned)anch,
199 		(unsigned)ac, (unsigned)superac, (unsigned)me);
200 	debug_total_mem(total);
201 #else /* no UNBOUND_ALLOC_STATS */
202 	size_t val = 0;
203 	int i;
204 	if(verbosity < VERB_QUERY)
205 		return;
206 	for(i=0; i<worker->env.mesh->mods.num; i++) {
207 		fptr_ok(fptr_whitelist_mod_get_mem(worker->env.mesh->
208 			mods.mod[i]->get_mem));
209 		if(strcmp(worker->env.mesh->mods.mod[i]->name, "validator")==0)
210 			val += (*worker->env.mesh->mods.mod[i]->get_mem)
211 				(&worker->env, i);
212 	}
213 	verbose(VERB_QUERY, "cache memory msg=%u rrset=%u infra=%u val=%u",
214 		(unsigned)slabhash_get_mem(worker->env.msg_cache),
215 		(unsigned)slabhash_get_mem(&worker->env.rrset_cache->table),
216 		(unsigned)infra_get_mem(worker->env.infra_cache),
217 		(unsigned)val);
218 #endif /* UNBOUND_ALLOC_STATS */
219 }
220 
221 void
222 worker_send_cmd(struct worker* worker, enum worker_commands cmd)
223 {
224 	uint32_t c = (uint32_t)htonl(cmd);
225 	if(!tube_write_msg(worker->cmd, (uint8_t*)&c, sizeof(c), 0)) {
226 		log_err("worker send cmd %d failed", (int)cmd);
227 	}
228 }
229 
230 int
231 worker_handle_reply(struct comm_point* c, void* arg, int error,
232 	struct comm_reply* reply_info)
233 {
234 	struct module_qstate* q = (struct module_qstate*)arg;
235 	struct worker* worker = q->env->worker;
236 	struct outbound_entry e;
237 	e.qstate = q;
238 	e.qsent = NULL;
239 
240 	if(error != 0) {
241 		mesh_report_reply(worker->env.mesh, &e, reply_info, error);
242 		worker_mem_report(worker, NULL);
243 		return 0;
244 	}
245 	/* sanity check. */
246 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
247 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
248 			LDNS_PACKET_QUERY
249 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
250 		/* error becomes timeout for the module as if this reply
251 		 * never arrived. */
252 		mesh_report_reply(worker->env.mesh, &e, reply_info,
253 			NETEVENT_TIMEOUT);
254 		worker_mem_report(worker, NULL);
255 		return 0;
256 	}
257 	mesh_report_reply(worker->env.mesh, &e, reply_info, NETEVENT_NOERROR);
258 	worker_mem_report(worker, NULL);
259 	return 0;
260 }
261 
262 int
263 worker_handle_service_reply(struct comm_point* c, void* arg, int error,
264 	struct comm_reply* reply_info)
265 {
266 	struct outbound_entry* e = (struct outbound_entry*)arg;
267 	struct worker* worker = e->qstate->env->worker;
268 	struct serviced_query *sq = e->qsent;
269 
270 	verbose(VERB_ALGO, "worker svcd callback for qstate %p", e->qstate);
271 	if(error != 0) {
272 		mesh_report_reply(worker->env.mesh, e, reply_info, error);
273 		worker_mem_report(worker, sq);
274 		return 0;
275 	}
276 	/* sanity check. */
277 	if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
278 		|| LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
279 			LDNS_PACKET_QUERY
280 		|| LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
281 		/* error becomes timeout for the module as if this reply
282 		 * never arrived. */
283 		verbose(VERB_ALGO, "worker: bad reply handled as timeout");
284 		mesh_report_reply(worker->env.mesh, e, reply_info,
285 			NETEVENT_TIMEOUT);
286 		worker_mem_report(worker, sq);
287 		return 0;
288 	}
289 	mesh_report_reply(worker->env.mesh, e, reply_info, NETEVENT_NOERROR);
290 	worker_mem_report(worker, sq);
291 	return 0;
292 }
293 
294 /** check request sanity.
295  * @param pkt: the wire packet to examine for sanity.
296  * @param worker: parameters for checking.
297  * @return error code, 0 OK, or -1 discard.
298 */
299 static int
300 worker_check_request(sldns_buffer* pkt, struct worker* worker)
301 {
302 	if(sldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) {
303 		verbose(VERB_QUERY, "request too short, discarded");
304 		return -1;
305 	}
306 	if(sldns_buffer_limit(pkt) > NORMAL_UDP_SIZE &&
307 		worker->daemon->cfg->harden_large_queries) {
308 		verbose(VERB_QUERY, "request too large, discarded");
309 		return -1;
310 	}
311 	if(LDNS_QR_WIRE(sldns_buffer_begin(pkt))) {
312 		verbose(VERB_QUERY, "request has QR bit on, discarded");
313 		return -1;
314 	}
315 	if(LDNS_TC_WIRE(sldns_buffer_begin(pkt))) {
316 		LDNS_TC_CLR(sldns_buffer_begin(pkt));
317 		verbose(VERB_QUERY, "request bad, has TC bit on");
318 		return LDNS_RCODE_FORMERR;
319 	}
320 	if(LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)) != LDNS_PACKET_QUERY) {
321 		verbose(VERB_QUERY, "request unknown opcode %d",
322 			LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)));
323 		return LDNS_RCODE_NOTIMPL;
324 	}
325 	if(LDNS_QDCOUNT(sldns_buffer_begin(pkt)) != 1) {
326 		verbose(VERB_QUERY, "request wrong nr qd=%d",
327 			LDNS_QDCOUNT(sldns_buffer_begin(pkt)));
328 		return LDNS_RCODE_FORMERR;
329 	}
330 	if(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) != 0) {
331 		verbose(VERB_QUERY, "request wrong nr an=%d",
332 			LDNS_ANCOUNT(sldns_buffer_begin(pkt)));
333 		return LDNS_RCODE_FORMERR;
334 	}
335 	if(LDNS_NSCOUNT(sldns_buffer_begin(pkt)) != 0) {
336 		verbose(VERB_QUERY, "request wrong nr ns=%d",
337 			LDNS_NSCOUNT(sldns_buffer_begin(pkt)));
338 		return LDNS_RCODE_FORMERR;
339 	}
340 	if(LDNS_ARCOUNT(sldns_buffer_begin(pkt)) > 1) {
341 		verbose(VERB_QUERY, "request wrong nr ar=%d",
342 			LDNS_ARCOUNT(sldns_buffer_begin(pkt)));
343 		return LDNS_RCODE_FORMERR;
344 	}
345 	return 0;
346 }
347 
348 void
349 worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), uint8_t* msg,
350 	size_t len, int error, void* arg)
351 {
352 	struct worker* worker = (struct worker*)arg;
353 	enum worker_commands cmd;
354 	if(error != NETEVENT_NOERROR) {
355 		free(msg);
356 		if(error == NETEVENT_CLOSED)
357 			comm_base_exit(worker->base);
358 		else	log_info("control event: %d", error);
359 		return;
360 	}
361 	if(len != sizeof(uint32_t)) {
362 		fatal_exit("bad control msg length %d", (int)len);
363 	}
364 	cmd = sldns_read_uint32(msg);
365 	free(msg);
366 	switch(cmd) {
367 	case worker_cmd_quit:
368 		verbose(VERB_ALGO, "got control cmd quit");
369 		comm_base_exit(worker->base);
370 		break;
371 	case worker_cmd_stats:
372 		verbose(VERB_ALGO, "got control cmd stats");
373 		server_stats_reply(worker, 1);
374 		break;
375 	case worker_cmd_stats_noreset:
376 		verbose(VERB_ALGO, "got control cmd stats_noreset");
377 		server_stats_reply(worker, 0);
378 		break;
379 	case worker_cmd_remote:
380 		verbose(VERB_ALGO, "got control cmd remote");
381 		daemon_remote_exec(worker);
382 		break;
383 	default:
384 		log_err("bad command %d", (int)cmd);
385 		break;
386 	}
387 }
388 
389 /** check if a delegation is secure */
390 static enum sec_status
391 check_delegation_secure(struct reply_info *rep)
392 {
393 	/* return smallest security status */
394 	size_t i;
395 	enum sec_status sec = sec_status_secure;
396 	enum sec_status s;
397 	size_t num = rep->an_numrrsets + rep->ns_numrrsets;
398 	/* check if answer and authority are OK */
399 	for(i=0; i<num; i++) {
400 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
401 			->security;
402 		if(s < sec)
403 			sec = s;
404 	}
405 	/* in additional, only unchecked triggers revalidation */
406 	for(i=num; i<rep->rrset_count; i++) {
407 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
408 			->security;
409 		if(s == sec_status_unchecked)
410 			return s;
411 	}
412 	return sec;
413 }
414 
415 /** remove nonsecure from a delegation referral additional section */
416 static void
417 deleg_remove_nonsecure_additional(struct reply_info* rep)
418 {
419 	/* we can simply edit it, since we are working in the scratch region */
420 	size_t i;
421 	enum sec_status s;
422 
423 	for(i = rep->an_numrrsets+rep->ns_numrrsets; i<rep->rrset_count; i++) {
424 		s = ((struct packed_rrset_data*)rep->rrsets[i]->entry.data)
425 			->security;
426 		if(s != sec_status_secure) {
427 			memmove(rep->rrsets+i, rep->rrsets+i+1,
428 				sizeof(struct ub_packed_rrset_key*)*
429 				(rep->rrset_count - i - 1));
430 			rep->ar_numrrsets--;
431 			rep->rrset_count--;
432 			i--;
433 		}
434 	}
435 }
436 
437 /** answer nonrecursive query from the cache */
438 static int
439 answer_norec_from_cache(struct worker* worker, struct query_info* qinfo,
440 	uint16_t id, uint16_t flags, struct comm_reply* repinfo,
441 	struct edns_data* edns)
442 {
443 	/* for a nonrecursive query return either:
444 	 * 	o an error (servfail; we try to avoid this)
445 	 * 	o a delegation (closest we have; this routine tries that)
446 	 * 	o the answer (checked by answer_from_cache)
447 	 *
448 	 * So, grab a delegation from the rrset cache.
449 	 * Then check if it needs validation, if so, this routine fails,
450 	 * so that iterator can prime and validator can verify rrsets.
451 	 */
452 	uint16_t udpsize = edns->udp_size;
453 	int secure = 0;
454 	time_t timenow = *worker->env.now;
455 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
456 		&& worker->env.need_to_validate;
457 	struct dns_msg *msg = NULL;
458 	struct delegpt *dp;
459 
460 	dp = dns_cache_find_delegation(&worker->env, qinfo->qname,
461 		qinfo->qname_len, qinfo->qtype, qinfo->qclass,
462 		worker->scratchpad, &msg, timenow);
463 	if(!dp) { /* no delegation, need to reprime */
464 		regional_free_all(worker->scratchpad);
465 		return 0;
466 	}
467 	if(must_validate) {
468 		switch(check_delegation_secure(msg->rep)) {
469 		case sec_status_unchecked:
470 			/* some rrsets have not been verified yet, go and
471 			 * let validator do that */
472 			regional_free_all(worker->scratchpad);
473 			return 0;
474 		case sec_status_bogus:
475 			/* some rrsets are bogus, reply servfail */
476 			edns->edns_version = EDNS_ADVERTISED_VERSION;
477 			edns->udp_size = EDNS_ADVERTISED_SIZE;
478 			edns->ext_rcode = 0;
479 			edns->bits &= EDNS_DO;
480 			error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
481 				&msg->qinfo, id, flags, edns);
482 			regional_free_all(worker->scratchpad);
483 			if(worker->stats.extended) {
484 				worker->stats.ans_bogus++;
485 				worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL]++;
486 			}
487 			return 1;
488 		case sec_status_secure:
489 			/* all rrsets are secure */
490 			/* remove non-secure rrsets from the add. section*/
491 			if(worker->env.cfg->val_clean_additional)
492 				deleg_remove_nonsecure_additional(msg->rep);
493 			secure = 1;
494 			break;
495 		case sec_status_indeterminate:
496 		case sec_status_insecure:
497 		default:
498 			/* not secure */
499 			secure = 0;
500 			break;
501 		}
502 	}
503 	/* return this delegation from the cache */
504 	edns->edns_version = EDNS_ADVERTISED_VERSION;
505 	edns->udp_size = EDNS_ADVERTISED_SIZE;
506 	edns->ext_rcode = 0;
507 	edns->bits &= EDNS_DO;
508 	msg->rep->flags |= BIT_QR|BIT_RA;
509 	if(!reply_info_answer_encode(&msg->qinfo, msg->rep, id, flags,
510 		repinfo->c->buffer, 0, 1, worker->scratchpad,
511 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
512 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
513 			&msg->qinfo, id, flags, edns);
514 	}
515 	regional_free_all(worker->scratchpad);
516 	if(worker->stats.extended) {
517 		if(secure) worker->stats.ans_secure++;
518 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
519 	}
520 	return 1;
521 }
522 
523 /** answer query from the cache */
524 static int
525 answer_from_cache(struct worker* worker, struct query_info* qinfo,
526 	struct reply_info* rep, uint16_t id, uint16_t flags,
527 	struct comm_reply* repinfo, struct edns_data* edns)
528 {
529 	time_t timenow = *worker->env.now;
530 	uint16_t udpsize = edns->udp_size;
531 	int secure;
532 	int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
533 		&& worker->env.need_to_validate;
534 	/* see if it is possible */
535 	if(rep->ttl < timenow) {
536 		/* the rrsets may have been updated in the meantime.
537 		 * we will refetch the message format from the
538 		 * authoritative server
539 		 */
540 		return 0;
541 	}
542 	if(!rrset_array_lock(rep->ref, rep->rrset_count, timenow))
543 		return 0;
544 	/* locked and ids and ttls are OK. */
545 	/* check CNAME chain (if any) */
546 	if(rep->an_numrrsets > 0 && (rep->rrsets[0]->rk.type ==
547 		htons(LDNS_RR_TYPE_CNAME) || rep->rrsets[0]->rk.type ==
548 		htons(LDNS_RR_TYPE_DNAME))) {
549 		if(!reply_check_cname_chain(rep)) {
550 			/* cname chain invalid, redo iterator steps */
551 			verbose(VERB_ALGO, "Cache reply: cname chain broken");
552 		bail_out:
553 			rrset_array_unlock_touch(worker->env.rrset_cache,
554 				worker->scratchpad, rep->ref, rep->rrset_count);
555 			regional_free_all(worker->scratchpad);
556 			return 0;
557 		}
558 	}
559 	/* check security status of the cached answer */
560 	if( rep->security == sec_status_bogus && must_validate) {
561 		/* BAD cached */
562 		edns->edns_version = EDNS_ADVERTISED_VERSION;
563 		edns->udp_size = EDNS_ADVERTISED_SIZE;
564 		edns->ext_rcode = 0;
565 		edns->bits &= EDNS_DO;
566 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
567 			qinfo, id, flags, edns);
568 		rrset_array_unlock_touch(worker->env.rrset_cache,
569 			worker->scratchpad, rep->ref, rep->rrset_count);
570 		regional_free_all(worker->scratchpad);
571 		if(worker->stats.extended) {
572 			worker->stats.ans_bogus ++;
573 			worker->stats.ans_rcode[LDNS_RCODE_SERVFAIL] ++;
574 		}
575 		return 1;
576 	} else if( rep->security == sec_status_unchecked && must_validate) {
577 		verbose(VERB_ALGO, "Cache reply: unchecked entry needs "
578 			"validation");
579 		goto bail_out; /* need to validate cache entry first */
580 	} else if(rep->security == sec_status_secure) {
581 		if(reply_all_rrsets_secure(rep))
582 			secure = 1;
583 		else	{
584 			if(must_validate) {
585 				verbose(VERB_ALGO, "Cache reply: secure entry"
586 					" changed status");
587 				goto bail_out; /* rrset changed, re-verify */
588 			}
589 			secure = 0;
590 		}
591 	} else	secure = 0;
592 
593 	edns->edns_version = EDNS_ADVERTISED_VERSION;
594 	edns->udp_size = EDNS_ADVERTISED_SIZE;
595 	edns->ext_rcode = 0;
596 	edns->bits &= EDNS_DO;
597 	if(!reply_info_answer_encode(qinfo, rep, id, flags,
598 		repinfo->c->buffer, timenow, 1, worker->scratchpad,
599 		udpsize, edns, (int)(edns->bits & EDNS_DO), secure)) {
600 		error_encode(repinfo->c->buffer, LDNS_RCODE_SERVFAIL,
601 			qinfo, id, flags, edns);
602 	}
603 	/* cannot send the reply right now, because blocking network syscall
604 	 * is bad while holding locks. */
605 	rrset_array_unlock_touch(worker->env.rrset_cache, worker->scratchpad,
606 		rep->ref, rep->rrset_count);
607 	regional_free_all(worker->scratchpad);
608 	if(worker->stats.extended) {
609 		if(secure) worker->stats.ans_secure++;
610 		server_stats_insrcode(&worker->stats, repinfo->c->buffer);
611 	}
612 	/* go and return this buffer to the client */
613 	return 1;
614 }
615 
616 /** Reply to client and perform prefetch to keep cache up to date */
617 static void
618 reply_and_prefetch(struct worker* worker, struct query_info* qinfo,
619 	uint16_t flags, struct comm_reply* repinfo, time_t leeway)
620 {
621 	/* first send answer to client to keep its latency
622 	 * as small as a cachereply */
623 	comm_point_send_reply(repinfo);
624 	server_stats_prefetch(&worker->stats, worker);
625 
626 	/* create the prefetch in the mesh as a normal lookup without
627 	 * client addrs waiting, which has the cache blacklisted (to bypass
628 	 * the cache and go to the network for the data). */
629 	/* this (potentially) runs the mesh for the new query */
630 	mesh_new_prefetch(worker->env.mesh, qinfo, flags, leeway +
631 		PREFETCH_EXPIRY_ADD);
632 }
633 
634 /**
635  * Fill CH class answer into buffer. Keeps query.
636  * @param pkt: buffer
637  * @param str: string to put into text record (<255).
638  * @param edns: edns reply information.
639  */
640 static void
641 chaos_replystr(sldns_buffer* pkt, const char* str, struct edns_data* edns)
642 {
643 	size_t len = strlen(str);
644 	unsigned int rd = LDNS_RD_WIRE(sldns_buffer_begin(pkt));
645 	unsigned int cd = LDNS_CD_WIRE(sldns_buffer_begin(pkt));
646 	if(len>255) len=255; /* cap size of TXT record */
647 	sldns_buffer_clear(pkt);
648 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip id */
649 	sldns_buffer_write_u16(pkt, (uint16_t)(BIT_QR|BIT_RA));
650 	if(rd) LDNS_RD_SET(sldns_buffer_begin(pkt));
651 	if(cd) LDNS_CD_SET(sldns_buffer_begin(pkt));
652 	sldns_buffer_write_u16(pkt, 1); /* qdcount */
653 	sldns_buffer_write_u16(pkt, 1); /* ancount */
654 	sldns_buffer_write_u16(pkt, 0); /* nscount */
655 	sldns_buffer_write_u16(pkt, 0); /* arcount */
656 	(void)query_dname_len(pkt); /* skip qname */
657 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qtype */
658 	sldns_buffer_skip(pkt, (ssize_t)sizeof(uint16_t)); /* skip qclass */
659 	sldns_buffer_write_u16(pkt, 0xc00c); /* compr ptr to query */
660 	sldns_buffer_write_u16(pkt, LDNS_RR_TYPE_TXT);
661 	sldns_buffer_write_u16(pkt, LDNS_RR_CLASS_CH);
662 	sldns_buffer_write_u32(pkt, 0); /* TTL */
663 	sldns_buffer_write_u16(pkt, sizeof(uint8_t) + len);
664 	sldns_buffer_write_u8(pkt, len);
665 	sldns_buffer_write(pkt, str, len);
666 	sldns_buffer_flip(pkt);
667 	edns->edns_version = EDNS_ADVERTISED_VERSION;
668 	edns->udp_size = EDNS_ADVERTISED_SIZE;
669 	edns->bits &= EDNS_DO;
670 	attach_edns_record(pkt, edns);
671 }
672 
673 /**
674  * Answer CH class queries.
675  * @param w: worker
676  * @param qinfo: query info. Pointer into packet buffer.
677  * @param edns: edns info from query.
678  * @param pkt: packet buffer.
679  * @return: true if a reply is to be sent.
680  */
681 static int
682 answer_chaos(struct worker* w, struct query_info* qinfo,
683 	struct edns_data* edns, sldns_buffer* pkt)
684 {
685 	struct config_file* cfg = w->env.cfg;
686 	if(qinfo->qtype != LDNS_RR_TYPE_ANY && qinfo->qtype != LDNS_RR_TYPE_TXT)
687 		return 0;
688 	if(query_dname_compare(qinfo->qname,
689 		(uint8_t*)"\002id\006server") == 0 ||
690 		query_dname_compare(qinfo->qname,
691 		(uint8_t*)"\010hostname\004bind") == 0)
692 	{
693 		if(cfg->hide_identity)
694 			return 0;
695 		if(cfg->identity==NULL || cfg->identity[0]==0) {
696 			char buf[MAXHOSTNAMELEN+1];
697 			if (gethostname(buf, MAXHOSTNAMELEN) == 0) {
698 				buf[MAXHOSTNAMELEN] = 0;
699 				chaos_replystr(pkt, buf, edns);
700 			} else 	{
701 				log_err("gethostname: %s", strerror(errno));
702 				chaos_replystr(pkt, "no hostname", edns);
703 			}
704 		}
705 		else 	chaos_replystr(pkt, cfg->identity, edns);
706 		return 1;
707 	}
708 	if(query_dname_compare(qinfo->qname,
709 		(uint8_t*)"\007version\006server") == 0 ||
710 		query_dname_compare(qinfo->qname,
711 		(uint8_t*)"\007version\004bind") == 0)
712 	{
713 		if(cfg->hide_version)
714 			return 0;
715 		if(cfg->version==NULL || cfg->version[0]==0)
716 			chaos_replystr(pkt, PACKAGE_STRING, edns);
717 		else 	chaos_replystr(pkt, cfg->version, edns);
718 		return 1;
719 	}
720 	return 0;
721 }
722 
723 static int
724 deny_refuse(struct comm_point* c, enum acl_access acl,
725 	enum acl_access deny, enum acl_access refuse,
726 	struct worker* worker, struct comm_reply* repinfo)
727 {
728 	if(acl == deny) {
729 		comm_point_drop_reply(repinfo);
730 		if(worker->stats.extended)
731 			worker->stats.unwanted_queries++;
732 		return 0;
733 	} else if(acl == refuse) {
734 		log_addr(VERB_ALGO, "refused query from",
735 			&repinfo->addr, repinfo->addrlen);
736 		log_buf(VERB_ALGO, "refuse", c->buffer);
737 		if(worker->stats.extended)
738 			worker->stats.unwanted_queries++;
739 		if(worker_check_request(c->buffer, worker) == -1) {
740 			comm_point_drop_reply(repinfo);
741 			return 0; /* discard this */
742 		}
743 		sldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
744 		sldns_buffer_write_at(c->buffer, 4,
745 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
746 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
747 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
748 			LDNS_RCODE_REFUSED);
749 		return 1;
750 	}
751 
752 	return -1;
753 }
754 
755 static int
756 deny_refuse_all(struct comm_point* c, enum acl_access acl,
757 	struct worker* worker, struct comm_reply* repinfo)
758 {
759 	return deny_refuse(c, acl, acl_deny, acl_refuse, worker, repinfo);
760 }
761 
762 static int
763 deny_refuse_non_local(struct comm_point* c, enum acl_access acl,
764 	struct worker* worker, struct comm_reply* repinfo)
765 {
766 	return deny_refuse(c, acl, acl_deny_non_local, acl_refuse_non_local, worker, repinfo);
767 }
768 
769 int
770 worker_handle_request(struct comm_point* c, void* arg, int error,
771 	struct comm_reply* repinfo)
772 {
773 	struct worker* worker = (struct worker*)arg;
774 	int ret;
775 	hashvalue_t h;
776 	struct lruhash_entry* e;
777 	struct query_info qinfo;
778 	struct edns_data edns;
779 	enum acl_access acl;
780 
781 	if(error != NETEVENT_NOERROR) {
782 		/* some bad tcp query DNS formats give these error calls */
783 		verbose(VERB_ALGO, "handle request called with err=%d", error);
784 		return 0;
785 	}
786 	acl = acl_list_lookup(worker->daemon->acl, &repinfo->addr,
787 		repinfo->addrlen);
788 	if((ret=deny_refuse_all(c, acl, worker, repinfo)) != -1)
789 	{
790 		return ret;
791 	}
792 	if((ret=worker_check_request(c->buffer, worker)) != 0) {
793 		verbose(VERB_ALGO, "worker check request: bad query.");
794 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
795 		if(ret != -1) {
796 			LDNS_QR_SET(sldns_buffer_begin(c->buffer));
797 			LDNS_RCODE_SET(sldns_buffer_begin(c->buffer), ret);
798 			return 1;
799 		}
800 		comm_point_drop_reply(repinfo);
801 		return 0;
802 	}
803 	worker->stats.num_queries++;
804 	/* see if query is in the cache */
805 	if(!query_info_parse(&qinfo, c->buffer)) {
806 		verbose(VERB_ALGO, "worker parse request: formerror.");
807 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
808 		sldns_buffer_rewind(c->buffer);
809 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
810 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
811 			LDNS_RCODE_FORMERR);
812 		server_stats_insrcode(&worker->stats, c->buffer);
813 		return 1;
814 	}
815 	if(worker->env.cfg->log_queries) {
816 		char ip[128];
817 		addr_to_str(&repinfo->addr, repinfo->addrlen, ip, sizeof(ip));
818 		log_nametypeclass(0, ip, qinfo.qname, qinfo.qtype, qinfo.qclass);
819 	}
820 	if(qinfo.qtype == LDNS_RR_TYPE_AXFR ||
821 		qinfo.qtype == LDNS_RR_TYPE_IXFR) {
822 		verbose(VERB_ALGO, "worker request: refused zone transfer.");
823 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
824 		sldns_buffer_rewind(c->buffer);
825 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
826 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
827 			LDNS_RCODE_REFUSED);
828 		if(worker->stats.extended) {
829 			worker->stats.qtype[qinfo.qtype]++;
830 			server_stats_insrcode(&worker->stats, c->buffer);
831 		}
832 		return 1;
833 	}
834 	if((ret=parse_edns_from_pkt(c->buffer, &edns)) != 0) {
835 		verbose(VERB_ALGO, "worker parse edns: formerror.");
836 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
837 		sldns_buffer_rewind(c->buffer);
838 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
839 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer), ret);
840 		server_stats_insrcode(&worker->stats, c->buffer);
841 		return 1;
842 	}
843 	if(edns.edns_present && edns.edns_version != 0) {
844 		edns.ext_rcode = (uint8_t)(EDNS_RCODE_BADVERS>>4);
845 		edns.edns_version = EDNS_ADVERTISED_VERSION;
846 		edns.udp_size = EDNS_ADVERTISED_SIZE;
847 		edns.bits &= EDNS_DO;
848 		verbose(VERB_ALGO, "query with bad edns version.");
849 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
850 		error_encode(c->buffer, EDNS_RCODE_BADVERS&0xf, &qinfo,
851 		    *(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
852 			sldns_buffer_read_u16_at(c->buffer, 2), NULL);
853 		attach_edns_record(c->buffer, &edns);
854 		return 1;
855 	}
856 	if(edns.edns_present && edns.udp_size < NORMAL_UDP_SIZE &&
857 		worker->daemon->cfg->harden_short_bufsize) {
858 		verbose(VERB_QUERY, "worker request: EDNS bufsize %d ignored",
859 			(int)edns.udp_size);
860 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
861 		edns.udp_size = NORMAL_UDP_SIZE;
862 	}
863 	if(edns.udp_size > worker->daemon->cfg->max_udp_size &&
864 		c->type == comm_udp) {
865 		verbose(VERB_QUERY,
866 			"worker request: max UDP reply size modified"
867 			" (%d to max-udp-size)", (int)edns.udp_size);
868 		log_addr(VERB_CLIENT,"from",&repinfo->addr, repinfo->addrlen);
869 		edns.udp_size = worker->daemon->cfg->max_udp_size;
870 	}
871 	if(edns.udp_size < LDNS_HEADER_SIZE) {
872 		verbose(VERB_ALGO, "worker request: edns is too small.");
873 		log_addr(VERB_CLIENT, "from", &repinfo->addr, repinfo->addrlen);
874 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
875 		LDNS_TC_SET(sldns_buffer_begin(c->buffer));
876 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
877 			LDNS_RCODE_SERVFAIL);
878 		sldns_buffer_set_position(c->buffer, LDNS_HEADER_SIZE);
879 		sldns_buffer_write_at(c->buffer, 4,
880 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
881 		sldns_buffer_flip(c->buffer);
882 		return 1;
883 	}
884 	if(worker->stats.extended)
885 		server_stats_insquery(&worker->stats, c, qinfo.qtype,
886 			qinfo.qclass, &edns, repinfo);
887 	if(c->type != comm_udp)
888 		edns.udp_size = 65535; /* max size for TCP replies */
889 	if(qinfo.qclass == LDNS_RR_CLASS_CH && answer_chaos(worker, &qinfo,
890 		&edns, c->buffer)) {
891 		server_stats_insrcode(&worker->stats, c->buffer);
892 		return 1;
893 	}
894 	if(local_zones_answer(worker->daemon->local_zones, &qinfo, &edns,
895 		c->buffer, worker->scratchpad)) {
896 		regional_free_all(worker->scratchpad);
897 		if(sldns_buffer_limit(c->buffer) == 0) {
898 			comm_point_drop_reply(repinfo);
899 			return 0;
900 		}
901 		server_stats_insrcode(&worker->stats, c->buffer);
902 		return 1;
903 	}
904 
905 	/* We've looked in our local zones. If the answer isn't there, we
906 	 * might need to bail out based on ACLs now. */
907 	if((ret=deny_refuse_non_local(c, acl, worker, repinfo)) != -1)
908 	{
909 		return ret;
910 	}
911 
912 	/* If this request does not have the recursion bit set, verify
913 	 * ACLs allow the snooping. */
914 	if(!(LDNS_RD_WIRE(sldns_buffer_begin(c->buffer))) &&
915 		acl != acl_allow_snoop ) {
916 		sldns_buffer_set_limit(c->buffer, LDNS_HEADER_SIZE);
917 		sldns_buffer_write_at(c->buffer, 4,
918 			(uint8_t*)"\0\0\0\0\0\0\0\0", 8);
919 		LDNS_QR_SET(sldns_buffer_begin(c->buffer));
920 		LDNS_RCODE_SET(sldns_buffer_begin(c->buffer),
921 			LDNS_RCODE_REFUSED);
922 		sldns_buffer_flip(c->buffer);
923 		server_stats_insrcode(&worker->stats, c->buffer);
924 		log_addr(VERB_ALGO, "refused nonrec (cache snoop) query from",
925 			&repinfo->addr, repinfo->addrlen);
926 		return 1;
927 	}
928 	h = query_info_hash(&qinfo);
929 	if((e=slabhash_lookup(worker->env.msg_cache, h, &qinfo, 0))) {
930 		/* answer from cache - we have acquired a readlock on it */
931 		if(answer_from_cache(worker, &qinfo,
932 			(struct reply_info*)e->data,
933 			*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
934 			sldns_buffer_read_u16_at(c->buffer, 2), repinfo,
935 			&edns)) {
936 			/* prefetch it if the prefetch TTL expired */
937 			if(worker->env.cfg->prefetch && *worker->env.now >=
938 				((struct reply_info*)e->data)->prefetch_ttl) {
939 				time_t leeway = ((struct reply_info*)e->
940 					data)->ttl - *worker->env.now;
941 				lock_rw_unlock(&e->lock);
942 				reply_and_prefetch(worker, &qinfo,
943 					sldns_buffer_read_u16_at(c->buffer, 2),
944 					repinfo, leeway);
945 				return 0;
946 			}
947 			lock_rw_unlock(&e->lock);
948 			return 1;
949 		}
950 		verbose(VERB_ALGO, "answer from the cache failed");
951 		lock_rw_unlock(&e->lock);
952 	}
953 	if(!LDNS_RD_WIRE(sldns_buffer_begin(c->buffer))) {
954 		if(answer_norec_from_cache(worker, &qinfo,
955 			*(uint16_t*)(void *)sldns_buffer_begin(c->buffer),
956 			sldns_buffer_read_u16_at(c->buffer, 2), repinfo,
957 			&edns)) {
958 			return 1;
959 		}
960 		verbose(VERB_ALGO, "answer norec from cache -- "
961 			"need to validate or not primed");
962 	}
963 	sldns_buffer_rewind(c->buffer);
964 	server_stats_querymiss(&worker->stats, worker);
965 
966 	if(verbosity >= VERB_CLIENT) {
967 		if(c->type == comm_udp)
968 			log_addr(VERB_CLIENT, "udp request from",
969 				&repinfo->addr, repinfo->addrlen);
970 		else	log_addr(VERB_CLIENT, "tcp request from",
971 				&repinfo->addr, repinfo->addrlen);
972 	}
973 
974 	/* grab a work request structure for this new request */
975 	mesh_new_client(worker->env.mesh, &qinfo,
976 		sldns_buffer_read_u16_at(c->buffer, 2),
977 	    &edns, repinfo, *(uint16_t*)(void *)sldns_buffer_begin(c->buffer));
978 	worker_mem_report(worker, NULL);
979 	return 0;
980 }
981 
982 void
983 worker_sighandler(int sig, void* arg)
984 {
985 	/* note that log, print, syscalls here give race conditions. */
986 	/* we still print DETAIL logs, because this is extensive per message
987 	 * logging anyway, and the operator may then have an interest
988 	 * in the cause for unbound to exit */
989 	struct worker* worker = (struct worker*)arg;
990 	switch(sig) {
991 #ifdef SIGHUP
992 		case SIGHUP:
993 			verbose(VERB_QUERY, "caught signal SIGHUP");
994 			comm_base_exit(worker->base);
995 			break;
996 #endif
997 		case SIGINT:
998 			verbose(VERB_QUERY, "caught signal SIGINT");
999 			worker->need_to_exit = 1;
1000 			comm_base_exit(worker->base);
1001 			break;
1002 #ifdef SIGQUIT
1003 		case SIGQUIT:
1004 			verbose(VERB_QUERY, "caught signal SIGQUIT");
1005 			worker->need_to_exit = 1;
1006 			comm_base_exit(worker->base);
1007 			break;
1008 #endif
1009 		case SIGTERM:
1010 			verbose(VERB_QUERY, "caught signal SIGTERM");
1011 			worker->need_to_exit = 1;
1012 			comm_base_exit(worker->base);
1013 			break;
1014 		default:
1015 			log_err("unknown signal: %d, ignored", sig);
1016 			break;
1017 	}
1018 }
1019 
1020 /** restart statistics timer for worker, if enabled */
1021 static void
1022 worker_restart_timer(struct worker* worker)
1023 {
1024 	if(worker->env.cfg->stat_interval > 0) {
1025 		struct timeval tv;
1026 #ifndef S_SPLINT_S
1027 		tv.tv_sec = worker->env.cfg->stat_interval;
1028 		tv.tv_usec = 0;
1029 #endif
1030 		comm_timer_set(worker->stat_timer, &tv);
1031 	}
1032 }
1033 
1034 void worker_stat_timer_cb(void* arg)
1035 {
1036 	struct worker* worker = (struct worker*)arg;
1037 	server_stats_log(&worker->stats, worker, worker->thread_num);
1038 	mesh_stats(worker->env.mesh, "mesh has");
1039 	worker_mem_report(worker, NULL);
1040 	if(!worker->daemon->cfg->stat_cumulative) {
1041 		worker_stats_clear(worker);
1042 	}
1043 	/* start next timer */
1044 	worker_restart_timer(worker);
1045 }
1046 
1047 void worker_probe_timer_cb(void* arg)
1048 {
1049 	struct worker* worker = (struct worker*)arg;
1050 	struct timeval tv;
1051 #ifndef S_SPLINT_S
1052 	tv.tv_sec = (time_t)autr_probe_timer(&worker->env);
1053 	tv.tv_usec = 0;
1054 #endif
1055 	if(tv.tv_sec != 0)
1056 		comm_timer_set(worker->env.probe_timer, &tv);
1057 }
1058 
1059 struct worker*
1060 worker_create(struct daemon* daemon, int id, int* ports, int n)
1061 {
1062 	unsigned int seed;
1063 	struct worker* worker = (struct worker*)calloc(1,
1064 		sizeof(struct worker));
1065 	if(!worker)
1066 		return NULL;
1067 	worker->numports = n;
1068 	worker->ports = (int*)memdup(ports, sizeof(int)*n);
1069 	if(!worker->ports) {
1070 		free(worker);
1071 		return NULL;
1072 	}
1073 	worker->daemon = daemon;
1074 	worker->thread_num = id;
1075 	if(!(worker->cmd = tube_create())) {
1076 		free(worker->ports);
1077 		free(worker);
1078 		return NULL;
1079 	}
1080 	/* create random state here to avoid locking trouble in RAND_bytes */
1081 	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^
1082 		(((unsigned int)worker->thread_num)<<17);
1083 		/* shift thread_num so it does not match out pid bits */
1084 	if(!(worker->rndstate = ub_initstate(seed, daemon->rand))) {
1085 		seed = 0;
1086 		log_err("could not init random numbers.");
1087 		tube_delete(worker->cmd);
1088 		free(worker->ports);
1089 		free(worker);
1090 		return NULL;
1091 	}
1092 	seed = 0;
1093 	return worker;
1094 }
1095 
1096 int
1097 worker_init(struct worker* worker, struct config_file *cfg,
1098 	struct listen_port* ports, int do_sigs)
1099 {
1100 	worker->need_to_exit = 0;
1101 	worker->base = comm_base_create(do_sigs);
1102 	if(!worker->base) {
1103 		log_err("could not create event handling base");
1104 		worker_delete(worker);
1105 		return 0;
1106 	}
1107 	comm_base_set_slow_accept_handlers(worker->base, &worker_stop_accept,
1108 		&worker_start_accept, worker);
1109 	if(do_sigs) {
1110 #ifdef SIGHUP
1111 		ub_thread_sig_unblock(SIGHUP);
1112 #endif
1113 		ub_thread_sig_unblock(SIGINT);
1114 #ifdef SIGQUIT
1115 		ub_thread_sig_unblock(SIGQUIT);
1116 #endif
1117 		ub_thread_sig_unblock(SIGTERM);
1118 #ifndef LIBEVENT_SIGNAL_PROBLEM
1119 		worker->comsig = comm_signal_create(worker->base,
1120 			worker_sighandler, worker);
1121 		if(!worker->comsig
1122 #ifdef SIGHUP
1123 			|| !comm_signal_bind(worker->comsig, SIGHUP)
1124 #endif
1125 #ifdef SIGQUIT
1126 			|| !comm_signal_bind(worker->comsig, SIGQUIT)
1127 #endif
1128 			|| !comm_signal_bind(worker->comsig, SIGTERM)
1129 			|| !comm_signal_bind(worker->comsig, SIGINT)) {
1130 			log_err("could not create signal handlers");
1131 			worker_delete(worker);
1132 			return 0;
1133 		}
1134 #endif /* LIBEVENT_SIGNAL_PROBLEM */
1135 		if(!daemon_remote_open_accept(worker->daemon->rc,
1136 			worker->daemon->rc_ports, worker)) {
1137 			worker_delete(worker);
1138 			return 0;
1139 		}
1140 #ifdef UB_ON_WINDOWS
1141 		wsvc_setup_worker(worker);
1142 #endif /* UB_ON_WINDOWS */
1143 	} else { /* !do_sigs */
1144 		worker->comsig = NULL;
1145 	}
1146 	worker->front = listen_create(worker->base, ports,
1147 		cfg->msg_buffer_size, (int)cfg->incoming_num_tcp,
1148 		worker->daemon->listen_sslctx, worker_handle_request, worker);
1149 	if(!worker->front) {
1150 		log_err("could not create listening sockets");
1151 		worker_delete(worker);
1152 		return 0;
1153 	}
1154 	worker->back = outside_network_create(worker->base,
1155 		cfg->msg_buffer_size, (size_t)cfg->outgoing_num_ports,
1156 		cfg->out_ifs, cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
1157 		cfg->do_tcp?cfg->outgoing_num_tcp:0,
1158 		worker->daemon->env->infra_cache, worker->rndstate,
1159 		cfg->use_caps_bits_for_id, worker->ports, worker->numports,
1160 		cfg->unwanted_threshold, &worker_alloc_cleanup, worker,
1161 		cfg->do_udp, worker->daemon->connect_sslctx, cfg->delay_close);
1162 	if(!worker->back) {
1163 		log_err("could not create outgoing sockets");
1164 		worker_delete(worker);
1165 		return 0;
1166 	}
1167 	/* start listening to commands */
1168 	if(!tube_setup_bg_listen(worker->cmd, worker->base,
1169 		&worker_handle_control_cmd, worker)) {
1170 		log_err("could not create control compt.");
1171 		worker_delete(worker);
1172 		return 0;
1173 	}
1174 	worker->stat_timer = comm_timer_create(worker->base,
1175 		worker_stat_timer_cb, worker);
1176 	if(!worker->stat_timer) {
1177 		log_err("could not create statistics timer");
1178 	}
1179 
1180 	/* we use the msg_buffer_size as a good estimate for what the
1181 	 * user wants for memory usage sizes */
1182 	worker->scratchpad = regional_create_custom(cfg->msg_buffer_size);
1183 	if(!worker->scratchpad) {
1184 		log_err("malloc failure");
1185 		worker_delete(worker);
1186 		return 0;
1187 	}
1188 
1189 	server_stats_init(&worker->stats, cfg);
1190 	alloc_init(&worker->alloc, &worker->daemon->superalloc,
1191 		worker->thread_num);
1192 	alloc_set_id_cleanup(&worker->alloc, &worker_alloc_cleanup, worker);
1193 	worker->env = *worker->daemon->env;
1194 	comm_base_timept(worker->base, &worker->env.now, &worker->env.now_tv);
1195 	if(worker->thread_num == 0)
1196 		log_set_time(worker->env.now);
1197 	worker->env.worker = worker;
1198 	worker->env.send_query = &worker_send_query;
1199 	worker->env.alloc = &worker->alloc;
1200 	worker->env.rnd = worker->rndstate;
1201 	worker->env.scratch = worker->scratchpad;
1202 	worker->env.mesh = mesh_create(&worker->daemon->mods, &worker->env);
1203 	worker->env.detach_subs = &mesh_detach_subs;
1204 	worker->env.attach_sub = &mesh_attach_sub;
1205 	worker->env.kill_sub = &mesh_state_delete;
1206 	worker->env.detect_cycle = &mesh_detect_cycle;
1207 	worker->env.scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
1208 	if(!(worker->env.fwds = forwards_create()) ||
1209 		!forwards_apply_cfg(worker->env.fwds, cfg)) {
1210 		log_err("Could not set forward zones");
1211 		worker_delete(worker);
1212 		return 0;
1213 	}
1214 	if(!(worker->env.hints = hints_create()) ||
1215 		!hints_apply_cfg(worker->env.hints, cfg)) {
1216 		log_err("Could not set root or stub hints");
1217 		worker_delete(worker);
1218 		return 0;
1219 	}
1220 	/* one probe timer per process -- if we have 5011 anchors */
1221 	if(autr_get_num_anchors(worker->env.anchors) > 0
1222 #ifndef THREADS_DISABLED
1223 		&& worker->thread_num == 0
1224 #endif
1225 		) {
1226 		struct timeval tv;
1227 		tv.tv_sec = 0;
1228 		tv.tv_usec = 0;
1229 		worker->env.probe_timer = comm_timer_create(worker->base,
1230 			worker_probe_timer_cb, worker);
1231 		if(!worker->env.probe_timer) {
1232 			log_err("could not create 5011-probe timer");
1233 		} else {
1234 			/* let timer fire, then it can reset itself */
1235 			comm_timer_set(worker->env.probe_timer, &tv);
1236 		}
1237 	}
1238 	if(!worker->env.mesh || !worker->env.scratch_buffer) {
1239 		worker_delete(worker);
1240 		return 0;
1241 	}
1242 	worker_mem_report(worker, NULL);
1243 	/* if statistics enabled start timer */
1244 	if(worker->env.cfg->stat_interval > 0) {
1245 		verbose(VERB_ALGO, "set statistics interval %d secs",
1246 			worker->env.cfg->stat_interval);
1247 		worker_restart_timer(worker);
1248 	}
1249 	return 1;
1250 }
1251 
1252 void
1253 worker_work(struct worker* worker)
1254 {
1255 	comm_base_dispatch(worker->base);
1256 }
1257 
1258 void
1259 worker_delete(struct worker* worker)
1260 {
1261 	if(!worker)
1262 		return;
1263 	if(worker->env.mesh && verbosity >= VERB_OPS) {
1264 		server_stats_log(&worker->stats, worker, worker->thread_num);
1265 		mesh_stats(worker->env.mesh, "mesh has");
1266 		worker_mem_report(worker, NULL);
1267 	}
1268 	outside_network_quit_prepare(worker->back);
1269 	mesh_delete(worker->env.mesh);
1270 	sldns_buffer_free(worker->env.scratch_buffer);
1271 	forwards_delete(worker->env.fwds);
1272 	hints_delete(worker->env.hints);
1273 	listen_delete(worker->front);
1274 	outside_network_delete(worker->back);
1275 	comm_signal_delete(worker->comsig);
1276 	tube_delete(worker->cmd);
1277 	comm_timer_delete(worker->stat_timer);
1278 	comm_timer_delete(worker->env.probe_timer);
1279 	free(worker->ports);
1280 	if(worker->thread_num == 0) {
1281 		log_set_time(NULL);
1282 #ifdef UB_ON_WINDOWS
1283 		wsvc_desetup_worker(worker);
1284 #endif /* UB_ON_WINDOWS */
1285 	}
1286 	comm_base_delete(worker->base);
1287 	ub_randfree(worker->rndstate);
1288 	alloc_clear(&worker->alloc);
1289 	regional_destroy(worker->scratchpad);
1290 	free(worker);
1291 }
1292 
1293 struct outbound_entry*
1294 worker_send_query(uint8_t* qname, size_t qnamelen, uint16_t qtype,
1295 	uint16_t qclass, uint16_t flags, int dnssec, int want_dnssec,
1296 	struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
1297 	size_t zonelen, struct module_qstate* q)
1298 {
1299 	struct worker* worker = q->env->worker;
1300 	struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
1301 		q->region, sizeof(*e));
1302 	if(!e)
1303 		return NULL;
1304 	e->qstate = q;
1305 	e->qsent = outnet_serviced_query(worker->back, qname,
1306 		qnamelen, qtype, qclass, flags, dnssec, want_dnssec,
1307 		q->env->cfg->tcp_upstream, q->env->cfg->ssl_upstream, addr,
1308 		addrlen, zone, zonelen, worker_handle_service_reply, e,
1309 		worker->back->udp_buff);
1310 	if(!e->qsent) {
1311 		return NULL;
1312 	}
1313 	return e;
1314 }
1315 
1316 void
1317 worker_alloc_cleanup(void* arg)
1318 {
1319 	struct worker* worker = (struct worker*)arg;
1320 	slabhash_clear(&worker->env.rrset_cache->table);
1321 	slabhash_clear(worker->env.msg_cache);
1322 }
1323 
1324 void worker_stats_clear(struct worker* worker)
1325 {
1326 	server_stats_init(&worker->stats, worker->env.cfg);
1327 	mesh_stats_clear(worker->env.mesh);
1328 	worker->back->unwanted_replies = 0;
1329 }
1330 
1331 void worker_start_accept(void* arg)
1332 {
1333 	struct worker* worker = (struct worker*)arg;
1334 	listen_start_accept(worker->front);
1335 	if(worker->thread_num == 0)
1336 		daemon_remote_start_accept(worker->daemon->rc);
1337 }
1338 
1339 void worker_stop_accept(void* arg)
1340 {
1341 	struct worker* worker = (struct worker*)arg;
1342 	listen_stop_accept(worker->front);
1343 	if(worker->thread_num == 0)
1344 		daemon_remote_stop_accept(worker->daemon->rc);
1345 }
1346 
1347 /* --- fake callbacks for fptr_wlist to work --- */
1348 struct outbound_entry* libworker_send_query(uint8_t* ATTR_UNUSED(qname),
1349 	size_t ATTR_UNUSED(qnamelen), uint16_t ATTR_UNUSED(qtype),
1350 	uint16_t ATTR_UNUSED(qclass), uint16_t ATTR_UNUSED(flags),
1351 	int ATTR_UNUSED(dnssec), int ATTR_UNUSED(want_dnssec),
1352 	struct sockaddr_storage* ATTR_UNUSED(addr),
1353 	socklen_t ATTR_UNUSED(addrlen), uint8_t* ATTR_UNUSED(zone),
1354 	size_t ATTR_UNUSED(zonelen), struct module_qstate* ATTR_UNUSED(q))
1355 {
1356 	log_assert(0);
1357 	return 0;
1358 }
1359 
1360 int libworker_handle_reply(struct comm_point* ATTR_UNUSED(c),
1361 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1362         struct comm_reply* ATTR_UNUSED(reply_info))
1363 {
1364 	log_assert(0);
1365 	return 0;
1366 }
1367 
1368 int libworker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
1369 	void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
1370         struct comm_reply* ATTR_UNUSED(reply_info))
1371 {
1372 	log_assert(0);
1373 	return 0;
1374 }
1375 
1376 void libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
1377         uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
1378         int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
1379 {
1380 	log_assert(0);
1381 }
1382 
1383 void libworker_fg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1384         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1385 	char* ATTR_UNUSED(why_bogus))
1386 {
1387 	log_assert(0);
1388 }
1389 
1390 void libworker_bg_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1391         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1392 	char* ATTR_UNUSED(why_bogus))
1393 {
1394 	log_assert(0);
1395 }
1396 
1397 void libworker_event_done_cb(void* ATTR_UNUSED(arg), int ATTR_UNUSED(rcode),
1398         sldns_buffer* ATTR_UNUSED(buf), enum sec_status ATTR_UNUSED(s),
1399 	char* ATTR_UNUSED(why_bogus))
1400 {
1401 	log_assert(0);
1402 }
1403 
1404 int context_query_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1405 {
1406 	log_assert(0);
1407 	return 0;
1408 }
1409 
1410 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1411 {
1412         log_assert(0);
1413         return 0;
1414 }
1415 
1416 int codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1417 {
1418         log_assert(0);
1419         return 0;
1420 }
1421 
1422