xref: /freebsd/contrib/unbound/services/outside_network.c (revision 1f1e2261e341e6ca6862f82261066ef1705f0a7a)
1 /*
2  * services/outside_network.c - implement sending of queries and wait answer.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file has functions to send queries to authoritative servers and
40  * wait for the pending answer events.
41  */
42 #include "config.h"
43 #include <ctype.h>
44 #ifdef HAVE_SYS_TYPES_H
45 #  include <sys/types.h>
46 #endif
47 #include <sys/time.h>
48 #include "services/outside_network.h"
49 #include "services/listen_dnsport.h"
50 #include "services/cache/infra.h"
51 #include "iterator/iterator.h"
52 #include "util/data/msgparse.h"
53 #include "util/data/msgreply.h"
54 #include "util/data/msgencode.h"
55 #include "util/data/dname.h"
56 #include "util/netevent.h"
57 #include "util/log.h"
58 #include "util/net_help.h"
59 #include "util/random.h"
60 #include "util/fptr_wlist.h"
61 #include "util/edns.h"
62 #include "sldns/sbuffer.h"
63 #include "dnstap/dnstap.h"
64 #ifdef HAVE_OPENSSL_SSL_H
65 #include <openssl/ssl.h>
66 #endif
67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST
68 #include <openssl/x509v3.h>
69 #endif
70 
71 #ifdef HAVE_NETDB_H
72 #include <netdb.h>
73 #endif
74 #include <fcntl.h>
75 
76 /** number of times to retry making a random ID that is unique. */
77 #define MAX_ID_RETRY 1000
78 /** number of times to retry finding interface, port that can be opened. */
79 #define MAX_PORT_RETRY 10000
80 /** number of retries on outgoing UDP queries */
81 #define OUTBOUND_UDP_RETRY 1
82 
83 /** initiate TCP transaction for serviced query */
84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff);
85 /** with a fd available, randomize and send UDP */
86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet,
87 	int timeout);
88 
89 /** remove waiting tcp from the outnet waiting list */
90 static void waiting_list_remove(struct outside_network* outnet,
91 	struct waiting_tcp* w);
92 
93 /** select a DNS ID for a TCP stream */
94 static uint16_t tcp_select_id(struct outside_network* outnet,
95 	struct reuse_tcp* reuse);
96 
97 /** Perform serviced query UDP sending operation */
98 static int serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff);
99 
100 /** Send serviced query over TCP return false on initial failure */
101 static int serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff);
102 
103 /** call the callbacks for a serviced query */
104 static void serviced_callbacks(struct serviced_query* sq, int error,
105 	struct comm_point* c, struct comm_reply* rep);
106 
107 int
108 pending_cmp(const void* key1, const void* key2)
109 {
110 	struct pending *p1 = (struct pending*)key1;
111 	struct pending *p2 = (struct pending*)key2;
112 	if(p1->id < p2->id)
113 		return -1;
114 	if(p1->id > p2->id)
115 		return 1;
116 	log_assert(p1->id == p2->id);
117 	return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen);
118 }
119 
120 int
121 serviced_cmp(const void* key1, const void* key2)
122 {
123 	struct serviced_query* q1 = (struct serviced_query*)key1;
124 	struct serviced_query* q2 = (struct serviced_query*)key2;
125 	int r;
126 	if(q1->qbuflen < q2->qbuflen)
127 		return -1;
128 	if(q1->qbuflen > q2->qbuflen)
129 		return 1;
130 	log_assert(q1->qbuflen == q2->qbuflen);
131 	log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */);
132 	/* alternate casing of qname is still the same query */
133 	if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0)
134 		return r;
135 	if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0)
136 		return r;
137 	if(q1->dnssec != q2->dnssec) {
138 		if(q1->dnssec < q2->dnssec)
139 			return -1;
140 		return 1;
141 	}
142 	if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
143 		return r;
144 	if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
145 		return r;
146 	return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
147 }
148 
149 /** compare if the reuse element has the same address, port and same ssl-is
150  * used-for-it characteristic */
151 static int
152 reuse_cmp_addrportssl(const void* key1, const void* key2)
153 {
154 	struct reuse_tcp* r1 = (struct reuse_tcp*)key1;
155 	struct reuse_tcp* r2 = (struct reuse_tcp*)key2;
156 	int r;
157 	/* compare address and port */
158 	r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen);
159 	if(r != 0)
160 		return r;
161 
162 	/* compare if SSL-enabled */
163 	if(r1->is_ssl && !r2->is_ssl)
164 		return 1;
165 	if(!r1->is_ssl && r2->is_ssl)
166 		return -1;
167 	return 0;
168 }
169 
170 int
171 reuse_cmp(const void* key1, const void* key2)
172 {
173 	int r;
174 	r = reuse_cmp_addrportssl(key1, key2);
175 	if(r != 0)
176 		return r;
177 
178 	/* compare ptr value */
179 	if(key1 < key2) return -1;
180 	if(key1 > key2) return 1;
181 	return 0;
182 }
183 
184 int reuse_id_cmp(const void* key1, const void* key2)
185 {
186 	struct waiting_tcp* w1 = (struct waiting_tcp*)key1;
187 	struct waiting_tcp* w2 = (struct waiting_tcp*)key2;
188 	if(w1->id < w2->id)
189 		return -1;
190 	if(w1->id > w2->id)
191 		return 1;
192 	return 0;
193 }
194 
195 /** delete waiting_tcp entry. Does not unlink from waiting list.
196  * @param w: to delete.
197  */
198 static void
199 waiting_tcp_delete(struct waiting_tcp* w)
200 {
201 	if(!w) return;
202 	if(w->timer)
203 		comm_timer_delete(w->timer);
204 	free(w);
205 }
206 
207 /**
208  * Pick random outgoing-interface of that family, and bind it.
209  * port set to 0 so OS picks a port number for us.
210  * if it is the ANY address, do not bind.
211  * @param pend: pending tcp structure, for storing the local address choice.
212  * @param w: tcp structure with destination address.
213  * @param s: socket fd.
214  * @return false on error, socket closed.
215  */
216 static int
217 pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s)
218 {
219 	struct port_if* pi = NULL;
220 	int num;
221 	pend->pi = NULL;
222 #ifdef INET6
223 	if(addr_is_ip6(&w->addr, w->addrlen))
224 		num = w->outnet->num_ip6;
225 	else
226 #endif
227 		num = w->outnet->num_ip4;
228 	if(num == 0) {
229 		log_err("no TCP outgoing interfaces of family");
230 		log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen);
231 		sock_close(s);
232 		return 0;
233 	}
234 #ifdef INET6
235 	if(addr_is_ip6(&w->addr, w->addrlen))
236 		pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)];
237 	else
238 #endif
239 		pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)];
240 	log_assert(pi);
241 	pend->pi = pi;
242 	if(addr_is_any(&pi->addr, pi->addrlen)) {
243 		/* binding to the ANY interface is for listening sockets */
244 		return 1;
245 	}
246 	/* set port to 0 */
247 	if(addr_is_ip6(&pi->addr, pi->addrlen))
248 		((struct sockaddr_in6*)&pi->addr)->sin6_port = 0;
249 	else	((struct sockaddr_in*)&pi->addr)->sin_port = 0;
250 	if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) {
251 #ifndef USE_WINSOCK
252 #ifdef EADDRNOTAVAIL
253 		if(!(verbosity < 4 && errno == EADDRNOTAVAIL))
254 #endif
255 #else /* USE_WINSOCK */
256 		if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL))
257 #endif
258 		    log_err("outgoing tcp: bind: %s", sock_strerror(errno));
259 		sock_close(s);
260 		return 0;
261 	}
262 	log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen);
263 	return 1;
264 }
265 
266 /** get TCP file descriptor for address, returns -1 on failure,
267  * tcp_mss is 0 or maxseg size to set for TCP packets. */
268 int
269 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp)
270 {
271 	int s;
272 	int af;
273 	char* err;
274 #ifdef SO_REUSEADDR
275 	int on = 1;
276 #endif
277 #ifdef INET6
278 	if(addr_is_ip6(addr, addrlen)){
279 		s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP);
280 		af = AF_INET6;
281 	} else {
282 #else
283 	{
284 #endif
285 		af = AF_INET;
286 		s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
287 	}
288 	if(s == -1) {
289 		log_err_addr("outgoing tcp: socket", sock_strerror(errno),
290 			addr, addrlen);
291 		return -1;
292 	}
293 
294 #ifdef SO_REUSEADDR
295 	if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on,
296 		(socklen_t)sizeof(on)) < 0) {
297 		verbose(VERB_ALGO, "outgoing tcp:"
298 			" setsockopt(.. SO_REUSEADDR ..) failed");
299 	}
300 #endif
301 
302 	err = set_ip_dscp(s, af, dscp);
303 	if(err != NULL) {
304 		verbose(VERB_ALGO, "outgoing tcp:"
305 			"error setting IP DiffServ codepoint on socket");
306 	}
307 
308 	if(tcp_mss > 0) {
309 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG)
310 		if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG,
311 			(void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) {
312 			verbose(VERB_ALGO, "outgoing tcp:"
313 				" setsockopt(.. TCP_MAXSEG ..) failed");
314 		}
315 #else
316 		verbose(VERB_ALGO, "outgoing tcp:"
317 			" setsockopt(TCP_MAXSEG) unsupported");
318 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */
319 	}
320 
321 	return s;
322 }
323 
324 /** connect tcp connection to addr, 0 on failure */
325 int
326 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen)
327 {
328 	if(connect(s, (struct sockaddr*)addr, addrlen) == -1) {
329 #ifndef USE_WINSOCK
330 #ifdef EINPROGRESS
331 		if(errno != EINPROGRESS) {
332 #endif
333 			if(tcp_connect_errno_needs_log(
334 				(struct sockaddr*)addr, addrlen))
335 				log_err_addr("outgoing tcp: connect",
336 					strerror(errno), addr, addrlen);
337 			close(s);
338 			return 0;
339 #ifdef EINPROGRESS
340 		}
341 #endif
342 #else /* USE_WINSOCK */
343 		if(WSAGetLastError() != WSAEINPROGRESS &&
344 			WSAGetLastError() != WSAEWOULDBLOCK) {
345 			closesocket(s);
346 			return 0;
347 		}
348 #endif
349 	}
350 	return 1;
351 }
352 
353 /** log reuse item addr and ptr with message */
354 static void
355 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse)
356 {
357 	uint16_t port;
358 	char addrbuf[128];
359 	if(verbosity < v) return;
360 	if(!reuse || !reuse->pending || !reuse->pending->c)
361 		return;
362 	addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf));
363 	port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port);
364 	verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port,
365 		reuse->pending->c->fd);
366 }
367 
368 /** pop the first element from the writewait list */
369 static struct waiting_tcp* reuse_write_wait_pop(struct reuse_tcp* reuse)
370 {
371 	struct waiting_tcp* w = reuse->write_wait_first;
372 	if(!w)
373 		return NULL;
374 	log_assert(w->write_wait_queued);
375 	log_assert(!w->write_wait_prev);
376 	reuse->write_wait_first = w->write_wait_next;
377 	if(w->write_wait_next)
378 		w->write_wait_next->write_wait_prev = NULL;
379 	else	reuse->write_wait_last = NULL;
380 	w->write_wait_queued = 0;
381 	w->write_wait_next = NULL;
382 	w->write_wait_prev = NULL;
383 	return w;
384 }
385 
386 /** remove the element from the writewait list */
387 static void reuse_write_wait_remove(struct reuse_tcp* reuse,
388 	struct waiting_tcp* w)
389 {
390 	log_assert(w);
391 	log_assert(w->write_wait_queued);
392 	if(!w)
393 		return;
394 	if(!w->write_wait_queued)
395 		return;
396 	if(w->write_wait_prev)
397 		w->write_wait_prev->write_wait_next = w->write_wait_next;
398 	else	reuse->write_wait_first = w->write_wait_next;
399 	log_assert(!w->write_wait_prev ||
400 		w->write_wait_prev->write_wait_next != w->write_wait_prev);
401 	if(w->write_wait_next)
402 		w->write_wait_next->write_wait_prev = w->write_wait_prev;
403 	else	reuse->write_wait_last = w->write_wait_prev;
404 	log_assert(!w->write_wait_next
405 		|| w->write_wait_next->write_wait_prev != w->write_wait_next);
406 	w->write_wait_queued = 0;
407 	w->write_wait_next = NULL;
408 	w->write_wait_prev = NULL;
409 }
410 
411 /** push the element after the last on the writewait list */
412 static void reuse_write_wait_push_back(struct reuse_tcp* reuse,
413 	struct waiting_tcp* w)
414 {
415 	if(!w) return;
416 	log_assert(!w->write_wait_queued);
417 	if(reuse->write_wait_last) {
418 		reuse->write_wait_last->write_wait_next = w;
419 		log_assert(reuse->write_wait_last->write_wait_next !=
420 			reuse->write_wait_last);
421 		w->write_wait_prev = reuse->write_wait_last;
422 	} else {
423 		reuse->write_wait_first = w;
424 	}
425 	reuse->write_wait_last = w;
426 	w->write_wait_queued = 1;
427 }
428 
429 /** insert element in tree by id */
430 void
431 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w)
432 {
433 #ifdef UNBOUND_DEBUG
434 	rbnode_type* added;
435 #endif
436 	log_assert(w->id_node.key == NULL);
437 	w->id_node.key = w;
438 #ifdef UNBOUND_DEBUG
439 	added =
440 #else
441 	(void)
442 #endif
443 	rbtree_insert(&reuse->tree_by_id, &w->id_node);
444 	log_assert(added);  /* should have been added */
445 }
446 
447 /** find element in tree by id */
448 struct waiting_tcp*
449 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id)
450 {
451 	struct waiting_tcp key_w;
452 	rbnode_type* n;
453 	memset(&key_w, 0, sizeof(key_w));
454 	key_w.id_node.key = &key_w;
455 	key_w.id = id;
456 	n = rbtree_search(&reuse->tree_by_id, &key_w);
457 	if(!n) return NULL;
458 	return (struct waiting_tcp*)n->key;
459 }
460 
461 /** return ID value of rbnode in tree_by_id */
462 static uint16_t
463 tree_by_id_get_id(rbnode_type* node)
464 {
465 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
466 	return w->id;
467 }
468 
469 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */
470 int
471 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp)
472 {
473 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse);
474 	if(pend_tcp->reuse.item_on_lru_list) {
475 		if(!pend_tcp->reuse.node.key)
476 			log_err("internal error: reuse_tcp_insert: "
477 				"in lru list without key");
478 		return 1;
479 	}
480 	pend_tcp->reuse.node.key = &pend_tcp->reuse;
481 	pend_tcp->reuse.pending = pend_tcp;
482 	if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) {
483 		/* We are not in the LRU list but we are already in the
484 		 * tcp_reuse tree, strange.
485 		 * Continue to add ourselves to the LRU list. */
486 		log_err("internal error: reuse_tcp_insert: in lru list but "
487 			"not in the tree");
488 	}
489 	/* insert into LRU, first is newest */
490 	pend_tcp->reuse.lru_prev = NULL;
491 	if(outnet->tcp_reuse_first) {
492 		pend_tcp->reuse.lru_next = outnet->tcp_reuse_first;
493 		log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse);
494 		outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse;
495 		log_assert(outnet->tcp_reuse_first->lru_prev !=
496 			outnet->tcp_reuse_first);
497 	} else {
498 		pend_tcp->reuse.lru_next = NULL;
499 		outnet->tcp_reuse_last = &pend_tcp->reuse;
500 	}
501 	outnet->tcp_reuse_first = &pend_tcp->reuse;
502 	pend_tcp->reuse.item_on_lru_list = 1;
503 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
504 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
505 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
506 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
507 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
508 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
509 	return 1;
510 }
511 
512 /** find reuse tcp stream to destination for query, or NULL if none */
513 static struct reuse_tcp*
514 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr,
515 	socklen_t addrlen, int use_ssl)
516 {
517 	struct waiting_tcp key_w;
518 	struct pending_tcp key_p;
519 	struct comm_point c;
520 	rbnode_type* result = NULL, *prev;
521 	verbose(VERB_CLIENT, "reuse_tcp_find");
522 	memset(&key_w, 0, sizeof(key_w));
523 	memset(&key_p, 0, sizeof(key_p));
524 	memset(&c, 0, sizeof(c));
525 	key_p.query = &key_w;
526 	key_p.c = &c;
527 	key_p.reuse.pending = &key_p;
528 	key_p.reuse.node.key = &key_p.reuse;
529 	if(use_ssl)
530 		key_p.reuse.is_ssl = 1;
531 	if(addrlen > (socklen_t)sizeof(key_p.reuse.addr))
532 		return NULL;
533 	memmove(&key_p.reuse.addr, addr, addrlen);
534 	key_p.reuse.addrlen = addrlen;
535 
536 	verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u",
537 		(unsigned)outnet->tcp_reuse.count);
538 	if(outnet->tcp_reuse.root == NULL ||
539 		outnet->tcp_reuse.root == RBTREE_NULL)
540 		return NULL;
541 	if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse,
542 		&result)) {
543 		/* exact match */
544 		/* but the key is on stack, and ptr is compared, impossible */
545 		log_assert(&key_p.reuse != (struct reuse_tcp*)result);
546 		log_assert(&key_p != ((struct reuse_tcp*)result)->pending);
547 	}
548 	/* not found, return null */
549 	if(!result || result == RBTREE_NULL)
550 		return NULL;
551 	verbose(VERB_CLIENT, "reuse_tcp_find check inexact match");
552 	/* inexact match, find one of possibly several connections to the
553 	 * same destination address, with the correct port, ssl, and
554 	 * also less than max number of open queries, or else, fail to open
555 	 * a new one */
556 	/* rewind to start of sequence of same address,port,ssl */
557 	prev = rbtree_previous(result);
558 	while(prev && prev != RBTREE_NULL &&
559 		reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) {
560 		result = prev;
561 		prev = rbtree_previous(result);
562 	}
563 
564 	/* loop to find first one that has correct characteristics */
565 	while(result && result != RBTREE_NULL &&
566 		reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) {
567 		if(((struct reuse_tcp*)result)->tree_by_id.count <
568 			outnet->max_reuse_tcp_queries) {
569 			/* same address, port, ssl-yes-or-no, and has
570 			 * space for another query */
571 			return (struct reuse_tcp*)result;
572 		}
573 		result = rbtree_next(result);
574 	}
575 	return NULL;
576 }
577 
578 /** use the buffer to setup writing the query */
579 static void
580 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend,
581 	struct waiting_tcp* w)
582 {
583 	struct timeval tv;
584 	verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write "
585 		"len %d timeout %d msec",
586 		(int)w->pkt_len, w->timeout);
587 	pend->c->tcp_write_pkt = w->pkt;
588 	pend->c->tcp_write_pkt_len = w->pkt_len;
589 	pend->c->tcp_write_and_read = 1;
590 	pend->c->tcp_write_byte_count = 0;
591 	pend->c->tcp_is_reading = 0;
592 	comm_point_start_listening(pend->c, s, -1);
593 	/* set timer on the waiting_tcp entry, this is the write timeout
594 	 * for the written packet.  The timer on pend->c is the timer
595 	 * for when there is no written packet and we have readtimeouts */
596 #ifndef S_SPLINT_S
597 	tv.tv_sec = w->timeout/1000;
598 	tv.tv_usec = (w->timeout%1000)*1000;
599 #endif
600 	/* if the waiting_tcp was previously waiting for a buffer in the
601 	 * outside_network.tcpwaitlist, then the timer is reset now that
602 	 * we start writing it */
603 	comm_timer_set(w->timer, &tv);
604 }
605 
606 /** use next free buffer to service a tcp query */
607 static int
608 outnet_tcp_take_into_use(struct waiting_tcp* w)
609 {
610 	struct pending_tcp* pend = w->outnet->tcp_free;
611 	int s;
612 	log_assert(pend);
613 	log_assert(w->pkt);
614 	log_assert(w->pkt_len > 0);
615 	log_assert(w->addrlen > 0);
616 	pend->c->tcp_do_toggle_rw = 0;
617 	pend->c->tcp_do_close = 0;
618 	/* open socket */
619 	s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp);
620 
621 	if(s == -1)
622 		return 0;
623 
624 	if(!pick_outgoing_tcp(pend, w, s))
625 		return 0;
626 
627 	fd_set_nonblock(s);
628 #ifdef USE_OSX_MSG_FASTOPEN
629 	/* API for fast open is different here. We use a connectx() function and
630 	   then writes can happen as normal even using SSL.*/
631 	/* connectx requires that the len be set in the sockaddr struct*/
632 	struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr;
633 	addr_in->sin_len = w->addrlen;
634 	sa_endpoints_t endpoints;
635 	endpoints.sae_srcif = 0;
636 	endpoints.sae_srcaddr = NULL;
637 	endpoints.sae_srcaddrlen = 0;
638 	endpoints.sae_dstaddr = (struct sockaddr *)&w->addr;
639 	endpoints.sae_dstaddrlen = w->addrlen;
640 	if (connectx(s, &endpoints, SAE_ASSOCID_ANY,
641 	             CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE,
642 	             NULL, 0, NULL, NULL) == -1) {
643 		/* if fails, failover to connect for OSX 10.10 */
644 #ifdef EINPROGRESS
645 		if(errno != EINPROGRESS) {
646 #else
647 		if(1) {
648 #endif
649 			if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
650 #else /* USE_OSX_MSG_FASTOPEN*/
651 #ifdef USE_MSG_FASTOPEN
652 	pend->c->tcp_do_fastopen = 1;
653 	/* Only do TFO for TCP in which case no connect() is required here.
654 	   Don't combine client TFO with SSL, since OpenSSL can't
655 	   currently support doing a handshake on fd that already isn't connected*/
656 	if (w->outnet->sslctx && w->ssl_upstream) {
657 		if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
658 #else /* USE_MSG_FASTOPEN*/
659 	if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
660 #endif /* USE_MSG_FASTOPEN*/
661 #endif /* USE_OSX_MSG_FASTOPEN*/
662 #ifndef USE_WINSOCK
663 #ifdef EINPROGRESS
664 		if(errno != EINPROGRESS) {
665 #else
666 		if(1) {
667 #endif
668 			if(tcp_connect_errno_needs_log(
669 				(struct sockaddr*)&w->addr, w->addrlen))
670 				log_err_addr("outgoing tcp: connect",
671 					strerror(errno), &w->addr, w->addrlen);
672 			close(s);
673 #else /* USE_WINSOCK */
674 		if(WSAGetLastError() != WSAEINPROGRESS &&
675 			WSAGetLastError() != WSAEWOULDBLOCK) {
676 			closesocket(s);
677 #endif
678 			return 0;
679 		}
680 	}
681 #ifdef USE_MSG_FASTOPEN
682 	}
683 #endif /* USE_MSG_FASTOPEN */
684 #ifdef USE_OSX_MSG_FASTOPEN
685 		}
686 	}
687 #endif /* USE_OSX_MSG_FASTOPEN */
688 	if(w->outnet->sslctx && w->ssl_upstream) {
689 		pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s);
690 		if(!pend->c->ssl) {
691 			pend->c->fd = s;
692 			comm_point_close(pend->c);
693 			return 0;
694 		}
695 		verbose(VERB_ALGO, "the query is using TLS encryption, for %s",
696 			(w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection"));
697 #ifdef USE_WINSOCK
698 		comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl);
699 #endif
700 		pend->c->ssl_shake_state = comm_ssl_shake_write;
701 		if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name,
702 			w->outnet->tls_use_sni)) {
703 			pend->c->fd = s;
704 #ifdef HAVE_SSL
705 			SSL_free(pend->c->ssl);
706 #endif
707 			pend->c->ssl = NULL;
708 			comm_point_close(pend->c);
709 			return 0;
710 		}
711 	}
712 	w->next_waiting = (void*)pend;
713 	w->outnet->num_tcp_outgoing++;
714 	w->outnet->tcp_free = pend->next_free;
715 	pend->next_free = NULL;
716 	pend->query = w;
717 	pend->reuse.outnet = w->outnet;
718 	pend->c->repinfo.addrlen = w->addrlen;
719 	pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again;
720 	pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again;
721 	pend->reuse.cp_more_read_again = 0;
722 	pend->reuse.cp_more_write_again = 0;
723 	memcpy(&pend->c->repinfo.addr, &w->addr, w->addrlen);
724 	pend->reuse.pending = pend;
725 
726 	/* Remove from tree in case the is_ssl will be different and causes the
727 	 * identity of the reuse_tcp to change; could result in nodes not being
728 	 * deleted from the tree (because the new identity does not match the
729 	 * previous node) but their ->key would be changed to NULL. */
730 	if(pend->reuse.node.key)
731 		reuse_tcp_remove_tree_list(w->outnet, &pend->reuse);
732 
733 	if(pend->c->ssl)
734 		pend->reuse.is_ssl = 1;
735 	else	pend->reuse.is_ssl = 0;
736 	/* insert in reuse by address tree if not already inserted there */
737 	(void)reuse_tcp_insert(w->outnet, pend);
738 	reuse_tree_by_id_insert(&pend->reuse, w);
739 	outnet_tcp_take_query_setup(s, pend, w);
740 	return 1;
741 }
742 
743 /** Touch the lru of a reuse_tcp element, it is in use.
744  * This moves it to the front of the list, where it is not likely to
745  * be closed.  Items at the back of the list are closed to make space. */
746 void
747 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse)
748 {
749 	if(!reuse->item_on_lru_list) {
750 		log_err("internal error: we need to touch the lru_list but item not in list");
751 		return; /* not on the list, no lru to modify */
752 	}
753 	log_assert(reuse->lru_prev ||
754 		(!reuse->lru_prev && outnet->tcp_reuse_first == reuse));
755 	if(!reuse->lru_prev)
756 		return; /* already first in the list */
757 	/* remove at current position */
758 	/* since it is not first, there is a previous element */
759 	reuse->lru_prev->lru_next = reuse->lru_next;
760 	log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
761 	if(reuse->lru_next)
762 		reuse->lru_next->lru_prev = reuse->lru_prev;
763 	else	outnet->tcp_reuse_last = reuse->lru_prev;
764 	log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next);
765 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
766 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
767 	/* insert at the front */
768 	reuse->lru_prev = NULL;
769 	reuse->lru_next = outnet->tcp_reuse_first;
770 	if(outnet->tcp_reuse_first) {
771 		outnet->tcp_reuse_first->lru_prev = reuse;
772 	}
773 	log_assert(reuse->lru_next != reuse);
774 	/* since it is not first, it is not the only element and
775 	 * lru_next is thus not NULL and thus reuse is now not the last in
776 	 * the list, so outnet->tcp_reuse_last does not need to be modified */
777 	outnet->tcp_reuse_first = reuse;
778 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
779 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
780 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
781 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
782 }
783 
784 /** Snip the last reuse_tcp element off of the LRU list */
785 struct reuse_tcp*
786 reuse_tcp_lru_snip(struct outside_network* outnet)
787 {
788 	struct reuse_tcp* reuse = outnet->tcp_reuse_last;
789 	if(!reuse) return NULL;
790 	/* snip off of LRU */
791 	log_assert(reuse->lru_next == NULL);
792 	if(reuse->lru_prev) {
793 		outnet->tcp_reuse_last = reuse->lru_prev;
794 		reuse->lru_prev->lru_next = NULL;
795 	} else {
796 		outnet->tcp_reuse_last = NULL;
797 		outnet->tcp_reuse_first = NULL;
798 	}
799 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
800 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
801 	reuse->item_on_lru_list = 0;
802 	reuse->lru_next = NULL;
803 	reuse->lru_prev = NULL;
804 	return reuse;
805 }
806 
807 /** call callback on waiting_tcp, if not NULL */
808 static void
809 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error,
810 	struct comm_reply* reply_info)
811 {
812 	if(w && w->cb) {
813 		fptr_ok(fptr_whitelist_pending_tcp(w->cb));
814 		(void)(*w->cb)(c, w->cb_arg, error, reply_info);
815 	}
816 }
817 
818 /** add waiting_tcp element to the outnet tcp waiting list */
819 static void
820 outnet_add_tcp_waiting(struct outside_network* outnet, struct waiting_tcp* w)
821 {
822 	struct timeval tv;
823 	log_assert(!w->on_tcp_waiting_list);
824 	if(w->on_tcp_waiting_list)
825 		return;
826 	w->next_waiting = NULL;
827 	if(outnet->tcp_wait_last)
828 		outnet->tcp_wait_last->next_waiting = w;
829 	else	outnet->tcp_wait_first = w;
830 	outnet->tcp_wait_last = w;
831 	w->on_tcp_waiting_list = 1;
832 #ifndef S_SPLINT_S
833 	tv.tv_sec = w->timeout/1000;
834 	tv.tv_usec = (w->timeout%1000)*1000;
835 #endif
836 	comm_timer_set(w->timer, &tv);
837 }
838 
839 /** add waiting_tcp element as first to the outnet tcp waiting list */
840 static void
841 outnet_add_tcp_waiting_first(struct outside_network* outnet,
842 	struct waiting_tcp* w, int reset_timer)
843 {
844 	struct timeval tv;
845 	log_assert(!w->on_tcp_waiting_list);
846 	if(w->on_tcp_waiting_list)
847 		return;
848 	w->next_waiting = outnet->tcp_wait_first;
849 	log_assert(w->next_waiting != w);
850 	if(!outnet->tcp_wait_last)
851 		outnet->tcp_wait_last = w;
852 	outnet->tcp_wait_first = w;
853 	w->on_tcp_waiting_list = 1;
854 	if(reset_timer) {
855 #ifndef S_SPLINT_S
856 		tv.tv_sec = w->timeout/1000;
857 		tv.tv_usec = (w->timeout%1000)*1000;
858 #endif
859 		comm_timer_set(w->timer, &tv);
860 	}
861 	log_assert(
862 		(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
863 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
864 }
865 
866 /** see if buffers can be used to service TCP queries */
867 static void
868 use_free_buffer(struct outside_network* outnet)
869 {
870 	struct waiting_tcp* w;
871 	while(outnet->tcp_wait_first && !outnet->want_to_quit) {
872 #ifdef USE_DNSTAP
873 		struct pending_tcp* pend_tcp = NULL;
874 #endif
875 		struct reuse_tcp* reuse = NULL;
876 		w = outnet->tcp_wait_first;
877 		log_assert(w->on_tcp_waiting_list);
878 		outnet->tcp_wait_first = w->next_waiting;
879 		if(outnet->tcp_wait_last == w)
880 			outnet->tcp_wait_last = NULL;
881 		log_assert(
882 			(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
883 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
884 		w->on_tcp_waiting_list = 0;
885 		reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen,
886 			w->ssl_upstream);
887 		/* re-select an ID when moving to a new TCP buffer */
888 		w->id = tcp_select_id(outnet, reuse);
889 		LDNS_ID_SET(w->pkt, w->id);
890 		if(reuse) {
891 			log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: "
892 				"found reuse", reuse);
893 #ifdef USE_DNSTAP
894 			pend_tcp = reuse->pending;
895 #endif
896 			reuse_tcp_lru_touch(outnet, reuse);
897 			comm_timer_disable(w->timer);
898 			w->next_waiting = (void*)reuse->pending;
899 			reuse_tree_by_id_insert(reuse, w);
900 			if(reuse->pending->query) {
901 				/* on the write wait list */
902 				reuse_write_wait_push_back(reuse, w);
903 			} else {
904 				/* write straight away */
905 				/* stop the timer on read of the fd */
906 				comm_point_stop_listening(reuse->pending->c);
907 				reuse->pending->query = w;
908 				outnet_tcp_take_query_setup(
909 					reuse->pending->c->fd, reuse->pending,
910 					w);
911 			}
912 		} else if(outnet->tcp_free) {
913 			struct pending_tcp* pend = w->outnet->tcp_free;
914 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
915 			pend->reuse.pending = pend;
916 			memcpy(&pend->reuse.addr, &w->addr, w->addrlen);
917 			pend->reuse.addrlen = w->addrlen;
918 			if(!outnet_tcp_take_into_use(w)) {
919 				waiting_tcp_callback(w, NULL, NETEVENT_CLOSED,
920 					NULL);
921 				waiting_tcp_delete(w);
922 #ifdef USE_DNSTAP
923 				w = NULL;
924 #endif
925 			}
926 #ifdef USE_DNSTAP
927 			pend_tcp = pend;
928 #endif
929 		} else {
930 			/* no reuse and no free buffer, put back at the start */
931 			outnet_add_tcp_waiting_first(outnet, w, 0);
932 			break;
933 		}
934 #ifdef USE_DNSTAP
935 		if(outnet->dtenv && pend_tcp && w && w->sq &&
936 			(outnet->dtenv->log_resolver_query_messages ||
937 			outnet->dtenv->log_forwarder_query_messages)) {
938 			sldns_buffer tmp;
939 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
940 			dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr,
941 				&pend_tcp->pi->addr, comm_tcp, w->sq->zone,
942 				w->sq->zonelen, &tmp);
943 		}
944 #endif
945 	}
946 }
947 
948 /** delete element from tree by id */
949 static void
950 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w)
951 {
952 #ifdef UNBOUND_DEBUG
953 	rbnode_type* rem;
954 #endif
955 	log_assert(w->id_node.key != NULL);
956 #ifdef UNBOUND_DEBUG
957 	rem =
958 #else
959 	(void)
960 #endif
961 	rbtree_delete(&reuse->tree_by_id, w);
962 	log_assert(rem);  /* should have been there */
963 	w->id_node.key = NULL;
964 }
965 
966 /** move writewait list to go for another connection. */
967 static void
968 reuse_move_writewait_away(struct outside_network* outnet,
969 	struct pending_tcp* pend)
970 {
971 	/* the writewait list has not been written yet, so if the
972 	 * stream was closed, they have not actually been failed, only
973 	 * the queries written.  Other queries can get written to another
974 	 * stream.  For upstreams that do not support multiple queries
975 	 * and answers, the stream can get closed, and then the queries
976 	 * can get written on a new socket */
977 	struct waiting_tcp* w;
978 	if(pend->query && pend->query->error_count == 0 &&
979 		pend->c->tcp_write_pkt == pend->query->pkt &&
980 		pend->c->tcp_write_pkt_len == pend->query->pkt_len) {
981 		/* since the current query is not written, it can also
982 		 * move to a free buffer */
983 		if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 &&
984 			LDNS_QDCOUNT(pend->query->pkt) > 0 &&
985 			dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) {
986 			char buf[LDNS_MAX_DOMAINLEN+1];
987 			dname_str(pend->query->pkt+12, buf);
988 			verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written",
989 				buf, (int)pend->c->tcp_write_byte_count);
990 		}
991 		pend->c->tcp_write_pkt = NULL;
992 		pend->c->tcp_write_pkt_len = 0;
993 		pend->c->tcp_write_and_read = 0;
994 		pend->reuse.cp_more_read_again = 0;
995 		pend->reuse.cp_more_write_again = 0;
996 		pend->c->tcp_is_reading = 1;
997 		w = pend->query;
998 		pend->query = NULL;
999 		/* increase error count, so that if the next socket fails too
1000 		 * the server selection is run again with this query failed
1001 		 * and it can select a different server (if possible), or
1002 		 * fail the query */
1003 		w->error_count ++;
1004 		reuse_tree_by_id_delete(&pend->reuse, w);
1005 		outnet_add_tcp_waiting(outnet, w);
1006 	}
1007 	while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) {
1008 		if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 &&
1009 			LDNS_QDCOUNT(w->pkt) > 0 &&
1010 			dname_valid(w->pkt+12, w->pkt_len-12)) {
1011 			char buf[LDNS_MAX_DOMAINLEN+1];
1012 			dname_str(w->pkt+12, buf);
1013 			verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf);
1014 		}
1015 		reuse_tree_by_id_delete(&pend->reuse, w);
1016 		outnet_add_tcp_waiting(outnet, w);
1017 	}
1018 }
1019 
1020 /** remove reused element from tree and lru list */
1021 void
1022 reuse_tcp_remove_tree_list(struct outside_network* outnet,
1023 	struct reuse_tcp* reuse)
1024 {
1025 	verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list");
1026 	if(reuse->node.key) {
1027 		/* delete it from reuse tree */
1028 		if(!rbtree_delete(&outnet->tcp_reuse, reuse)) {
1029 			/* should not be possible, it should be there */
1030 			char buf[256];
1031 			addr_to_str(&reuse->addr, reuse->addrlen, buf,
1032 				sizeof(buf));
1033 			log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list);
1034 		}
1035 		reuse->node.key = NULL;
1036 		/* defend against loops on broken tree by zeroing the
1037 		 * rbnode structure */
1038 		memset(&reuse->node, 0, sizeof(reuse->node));
1039 	}
1040 	/* delete from reuse list */
1041 	if(reuse->item_on_lru_list) {
1042 		if(reuse->lru_prev) {
1043 			/* assert that members of the lru list are waiting
1044 			 * and thus have a pending pointer to the struct */
1045 			log_assert(reuse->lru_prev->pending);
1046 			reuse->lru_prev->lru_next = reuse->lru_next;
1047 			log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
1048 		} else {
1049 			log_assert(!reuse->lru_next || reuse->lru_next->pending);
1050 			outnet->tcp_reuse_first = reuse->lru_next;
1051 			log_assert(!outnet->tcp_reuse_first ||
1052 				(outnet->tcp_reuse_first !=
1053 				 outnet->tcp_reuse_first->lru_next &&
1054 				 outnet->tcp_reuse_first !=
1055 				 outnet->tcp_reuse_first->lru_prev));
1056 		}
1057 		if(reuse->lru_next) {
1058 			/* assert that members of the lru list are waiting
1059 			 * and thus have a pending pointer to the struct */
1060 			log_assert(reuse->lru_next->pending);
1061 			reuse->lru_next->lru_prev = reuse->lru_prev;
1062 			log_assert(reuse->lru_next->lru_prev != reuse->lru_next);
1063 		} else {
1064 			log_assert(!reuse->lru_prev || reuse->lru_prev->pending);
1065 			outnet->tcp_reuse_last = reuse->lru_prev;
1066 			log_assert(!outnet->tcp_reuse_last ||
1067 				(outnet->tcp_reuse_last !=
1068 				 outnet->tcp_reuse_last->lru_next &&
1069 				 outnet->tcp_reuse_last !=
1070 				 outnet->tcp_reuse_last->lru_prev));
1071 		}
1072 		log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
1073 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
1074 		reuse->item_on_lru_list = 0;
1075 		reuse->lru_next = NULL;
1076 		reuse->lru_prev = NULL;
1077 	}
1078 	reuse->pending = NULL;
1079 }
1080 
1081 /** helper function that deletes an element from the tree of readwait
1082  * elements in tcp reuse structure */
1083 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg))
1084 {
1085 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1086 	waiting_tcp_delete(w);
1087 }
1088 
1089 /** delete readwait waiting_tcp elements, deletes the elements in the list */
1090 void reuse_del_readwait(rbtree_type* tree_by_id)
1091 {
1092 	if(tree_by_id->root == NULL ||
1093 		tree_by_id->root == RBTREE_NULL)
1094 		return;
1095 	traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL);
1096 	rbtree_init(tree_by_id, reuse_id_cmp);
1097 }
1098 
1099 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */
1100 static void
1101 decommission_pending_tcp(struct outside_network* outnet,
1102 	struct pending_tcp* pend)
1103 {
1104 	verbose(VERB_CLIENT, "decommission_pending_tcp");
1105 	/* A certain code path can lead here twice for the same pending_tcp
1106 	 * creating a loop in the free pending_tcp list. */
1107 	if(outnet->tcp_free != pend) {
1108 		pend->next_free = outnet->tcp_free;
1109 		outnet->tcp_free = pend;
1110 	}
1111 	if(pend->reuse.node.key) {
1112 		/* needs unlink from the reuse tree to get deleted */
1113 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1114 	}
1115 	/* free SSL structure after remove from outnet tcp reuse tree,
1116 	 * because the c->ssl null or not is used for sorting in the tree */
1117 	if(pend->c->ssl) {
1118 #ifdef HAVE_SSL
1119 		SSL_shutdown(pend->c->ssl);
1120 		SSL_free(pend->c->ssl);
1121 		pend->c->ssl = NULL;
1122 #endif
1123 	}
1124 	comm_point_close(pend->c);
1125 	pend->reuse.cp_more_read_again = 0;
1126 	pend->reuse.cp_more_write_again = 0;
1127 	/* unlink the query and writewait list, it is part of the tree
1128 	 * nodes and is deleted */
1129 	pend->query = NULL;
1130 	pend->reuse.write_wait_first = NULL;
1131 	pend->reuse.write_wait_last = NULL;
1132 	reuse_del_readwait(&pend->reuse.tree_by_id);
1133 }
1134 
1135 /** perform failure callbacks for waiting queries in reuse read rbtree */
1136 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err)
1137 {
1138 	rbnode_type* node;
1139 	if(tree_by_id->root == NULL ||
1140 		tree_by_id->root == RBTREE_NULL)
1141 		return;
1142 	node = rbtree_first(tree_by_id);
1143 	while(node && node != RBTREE_NULL) {
1144 		struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1145 		waiting_tcp_callback(w, NULL, err, NULL);
1146 		node = rbtree_next(node);
1147 	}
1148 }
1149 
1150 /** mark the entry for being in the cb_and_decommission stage */
1151 static void mark_for_cb_and_decommission(rbnode_type* node,
1152 	void* ATTR_UNUSED(arg))
1153 {
1154 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1155 	/* Mark the waiting_tcp to signal later code (serviced_delete) that
1156 	 * this item is part of the backed up tree_by_id and will be deleted
1157 	 * later. */
1158 	w->in_cb_and_decommission = 1;
1159 	/* Mark the serviced_query for deletion so that later code through
1160 	 * callbacks (iter_clear .. outnet_serviced_query_stop) won't
1161 	 * prematurely delete it. */
1162 	if(w->cb)
1163 		((struct serviced_query*)w->cb_arg)->to_be_deleted = 1;
1164 }
1165 
1166 /** perform callbacks for failure and also decommission pending tcp.
1167  * the callbacks remove references in sq->pending to the waiting_tcp
1168  * members of the tree_by_id in the pending tcp.  The pending_tcp is
1169  * removed before the callbacks, so that the callbacks do not modify
1170  * the pending_tcp due to its reference in the outside_network reuse tree */
1171 static void reuse_cb_and_decommission(struct outside_network* outnet,
1172 	struct pending_tcp* pend, int error)
1173 {
1174 	rbtree_type store;
1175 	store = pend->reuse.tree_by_id;
1176 	pend->query = NULL;
1177 	rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1178 	pend->reuse.write_wait_first = NULL;
1179 	pend->reuse.write_wait_last = NULL;
1180 	decommission_pending_tcp(outnet, pend);
1181 	if(store.root != NULL && store.root != RBTREE_NULL) {
1182 		traverse_postorder(&store, &mark_for_cb_and_decommission, NULL);
1183 	}
1184 	reuse_cb_readwait_for_failure(&store, error);
1185 	reuse_del_readwait(&store);
1186 }
1187 
1188 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1189 static void
1190 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1191 {
1192 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse);
1193 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1194 }
1195 
1196 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1197 static void
1198 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1199 {
1200 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse);
1201 	sldns_buffer_clear(pend_tcp->c->buffer);
1202 	pend_tcp->c->tcp_is_reading = 1;
1203 	pend_tcp->c->tcp_byte_count = 0;
1204 	comm_point_stop_listening(pend_tcp->c);
1205 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1206 }
1207 
1208 int
1209 outnet_tcp_cb(struct comm_point* c, void* arg, int error,
1210 	struct comm_reply *reply_info)
1211 {
1212 	struct pending_tcp* pend = (struct pending_tcp*)arg;
1213 	struct outside_network* outnet = pend->reuse.outnet;
1214 	struct waiting_tcp* w = NULL;
1215 	log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key);
1216 	verbose(VERB_ALGO, "outnettcp cb");
1217 	if(error == NETEVENT_TIMEOUT) {
1218 		if(pend->c->tcp_write_and_read) {
1219 			verbose(VERB_QUERY, "outnettcp got tcp timeout "
1220 				"for read, ignored because write underway");
1221 			/* if we are writing, ignore readtimer, wait for write timer
1222 			 * or write is done */
1223 			return 0;
1224 		} else {
1225 			verbose(VERB_QUERY, "outnettcp got tcp timeout %s",
1226 				(pend->reuse.tree_by_id.count?"for reading pkt":
1227 				"for keepalive for reuse"));
1228 		}
1229 		/* must be timeout for reading or keepalive reuse,
1230 		 * close it. */
1231 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1232 	} else if(error == NETEVENT_PKT_WRITTEN) {
1233 		/* the packet we want to write has been written. */
1234 		verbose(VERB_ALGO, "outnet tcp pkt was written event");
1235 		log_assert(c == pend->c);
1236 		log_assert(pend->query->pkt == pend->c->tcp_write_pkt);
1237 		log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len);
1238 		pend->c->tcp_write_pkt = NULL;
1239 		pend->c->tcp_write_pkt_len = 0;
1240 		/* the pend.query is already in tree_by_id */
1241 		log_assert(pend->query->id_node.key);
1242 		pend->query = NULL;
1243 		/* setup to write next packet or setup read timeout */
1244 		if(pend->reuse.write_wait_first) {
1245 			verbose(VERB_ALGO, "outnet tcp setup next pkt");
1246 			/* we can write it straight away perhaps, set flag
1247 			 * because this callback called after a tcp write
1248 			 * succeeded and likely more buffer space is available
1249 			 * and we can write some more. */
1250 			pend->reuse.cp_more_write_again = 1;
1251 			pend->query = reuse_write_wait_pop(&pend->reuse);
1252 			comm_point_stop_listening(pend->c);
1253 			outnet_tcp_take_query_setup(pend->c->fd, pend,
1254 				pend->query);
1255 		} else {
1256 			verbose(VERB_ALGO, "outnet tcp writes done, wait");
1257 			pend->c->tcp_write_and_read = 0;
1258 			pend->reuse.cp_more_read_again = 0;
1259 			pend->reuse.cp_more_write_again = 0;
1260 			pend->c->tcp_is_reading = 1;
1261 			comm_point_stop_listening(pend->c);
1262 			reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout);
1263 		}
1264 		return 0;
1265 	} else if(error != NETEVENT_NOERROR) {
1266 		verbose(VERB_QUERY, "outnettcp got tcp error %d", error);
1267 		reuse_move_writewait_away(outnet, pend);
1268 		/* pass error below and exit */
1269 	} else {
1270 		/* check ID */
1271 		if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) {
1272 			log_addr(VERB_QUERY,
1273 				"outnettcp: bad ID in reply, too short, from:",
1274 				&pend->reuse.addr, pend->reuse.addrlen);
1275 			error = NETEVENT_CLOSED;
1276 		} else {
1277 			uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin(
1278 				c->buffer));
1279 			/* find the query the reply is for */
1280 			w = reuse_tcp_by_id_find(&pend->reuse, id);
1281 			/* Make sure that the reply we got is at least for a
1282 			 * sent query with the same ID; the waiting_tcp that
1283 			 * gets a reply is assumed to not be waiting to be
1284 			 * sent. */
1285 			if(w && (w->on_tcp_waiting_list || w->write_wait_queued))
1286 				w = NULL;
1287 		}
1288 	}
1289 	if(error == NETEVENT_NOERROR && !w) {
1290 		/* no struct waiting found in tree, no reply to call */
1291 		log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:",
1292 			&pend->reuse.addr, pend->reuse.addrlen);
1293 		error = NETEVENT_CLOSED;
1294 	}
1295 	if(error == NETEVENT_NOERROR) {
1296 		/* add to reuse tree so it can be reused, if not a failure.
1297 		 * This is possible if the state machine wants to make a tcp
1298 		 * query again to the same destination. */
1299 		if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) {
1300 			(void)reuse_tcp_insert(outnet, pend);
1301 		}
1302 	}
1303 	if(w) {
1304 		log_assert(!w->on_tcp_waiting_list);
1305 		log_assert(!w->write_wait_queued);
1306 		reuse_tree_by_id_delete(&pend->reuse, w);
1307 		verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d",
1308 			error, (int)sldns_buffer_limit(c->buffer));
1309 		waiting_tcp_callback(w, c, error, reply_info);
1310 		waiting_tcp_delete(w);
1311 	}
1312 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb");
1313 	if(error == NETEVENT_NOERROR && pend->reuse.node.key) {
1314 		verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it");
1315 		/* it is in the reuse_tcp tree, with other queries, or
1316 		 * on the empty list. do not decommission it */
1317 		/* if there are more outstanding queries, we could try to
1318 		 * read again, to see if it is on the input,
1319 		 * because this callback called after a successful read
1320 		 * and there could be more bytes to read on the input */
1321 		if(pend->reuse.tree_by_id.count != 0)
1322 			pend->reuse.cp_more_read_again = 1;
1323 		reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout);
1324 		return 0;
1325 	}
1326 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it");
1327 	/* no queries on it, no space to keep it. or timeout or closed due
1328 	 * to error.  Close it */
1329 	reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT?
1330 		NETEVENT_TIMEOUT:NETEVENT_CLOSED));
1331 	use_free_buffer(outnet);
1332 	return 0;
1333 }
1334 
1335 /** lower use count on pc, see if it can be closed */
1336 static void
1337 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc)
1338 {
1339 	struct port_if* pif;
1340 	pc->num_outstanding--;
1341 	if(pc->num_outstanding > 0) {
1342 		return;
1343 	}
1344 	/* close it and replace in unused list */
1345 	verbose(VERB_ALGO, "close of port %d", pc->number);
1346 	comm_point_close(pc->cp);
1347 	pif = pc->pif;
1348 	log_assert(pif->inuse > 0);
1349 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1350 	pif->avail_ports[pif->avail_total - pif->inuse] = pc->number;
1351 #endif
1352 	pif->inuse--;
1353 	pif->out[pc->index] = pif->out[pif->inuse];
1354 	pif->out[pc->index]->index = pc->index;
1355 	pc->next = outnet->unused_fds;
1356 	outnet->unused_fds = pc;
1357 }
1358 
1359 /** try to send waiting UDP queries */
1360 static void
1361 outnet_send_wait_udp(struct outside_network* outnet)
1362 {
1363 	struct pending* pend;
1364 	/* process waiting queries */
1365 	while(outnet->udp_wait_first && outnet->unused_fds
1366 		&& !outnet->want_to_quit) {
1367 		pend = outnet->udp_wait_first;
1368 		outnet->udp_wait_first = pend->next_waiting;
1369 		if(!pend->next_waiting) outnet->udp_wait_last = NULL;
1370 		sldns_buffer_clear(outnet->udp_buff);
1371 		sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len);
1372 		sldns_buffer_flip(outnet->udp_buff);
1373 		free(pend->pkt); /* freeing now makes get_mem correct */
1374 		pend->pkt = NULL;
1375 		pend->pkt_len = 0;
1376 		log_assert(!pend->sq->busy);
1377 		pend->sq->busy = 1;
1378 		if(!randomize_and_send_udp(pend, outnet->udp_buff,
1379 			pend->timeout)) {
1380 			/* callback error on pending */
1381 			if(pend->cb) {
1382 				fptr_ok(fptr_whitelist_pending_udp(pend->cb));
1383 				(void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg,
1384 					NETEVENT_CLOSED, NULL);
1385 			}
1386 			pending_delete(outnet, pend);
1387 		} else {
1388 			pend->sq->busy = 0;
1389 		}
1390 	}
1391 }
1392 
1393 int
1394 outnet_udp_cb(struct comm_point* c, void* arg, int error,
1395 	struct comm_reply *reply_info)
1396 {
1397 	struct outside_network* outnet = (struct outside_network*)arg;
1398 	struct pending key;
1399 	struct pending* p;
1400 	verbose(VERB_ALGO, "answer cb");
1401 
1402 	if(error != NETEVENT_NOERROR) {
1403 		verbose(VERB_QUERY, "outnetudp got udp error %d", error);
1404 		return 0;
1405 	}
1406 	if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1407 		verbose(VERB_QUERY, "outnetudp udp too short");
1408 		return 0;
1409 	}
1410 	log_assert(reply_info);
1411 
1412 	/* setup lookup key */
1413 	key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer));
1414 	memcpy(&key.addr, &reply_info->addr, reply_info->addrlen);
1415 	key.addrlen = reply_info->addrlen;
1416 	verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id);
1417 	log_addr(VERB_ALGO, "Incoming reply addr =",
1418 		&reply_info->addr, reply_info->addrlen);
1419 
1420 	/* find it, see if this thing is a valid query response */
1421 	verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count);
1422 	p = (struct pending*)rbtree_search(outnet->pending, &key);
1423 	if(!p) {
1424 		verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped.");
1425 		log_buf(VERB_ALGO, "dropped message", c->buffer);
1426 		outnet->unwanted_replies++;
1427 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1428 			>= outnet->unwanted_threshold) {
1429 			log_warn("unwanted reply total reached threshold (%u)"
1430 				" you may be under attack."
1431 				" defensive action: clearing the cache",
1432 				(unsigned)outnet->unwanted_threshold);
1433 			fptr_ok(fptr_whitelist_alloc_cleanup(
1434 				outnet->unwanted_action));
1435 			(*outnet->unwanted_action)(outnet->unwanted_param);
1436 			outnet->unwanted_total = 0;
1437 		}
1438 		return 0;
1439 	}
1440 
1441 	verbose(VERB_ALGO, "received udp reply.");
1442 	log_buf(VERB_ALGO, "udp message", c->buffer);
1443 	if(p->pc->cp != c) {
1444 		verbose(VERB_QUERY, "received reply id,addr on wrong port. "
1445 			"dropped.");
1446 		outnet->unwanted_replies++;
1447 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1448 			>= outnet->unwanted_threshold) {
1449 			log_warn("unwanted reply total reached threshold (%u)"
1450 				" you may be under attack."
1451 				" defensive action: clearing the cache",
1452 				(unsigned)outnet->unwanted_threshold);
1453 			fptr_ok(fptr_whitelist_alloc_cleanup(
1454 				outnet->unwanted_action));
1455 			(*outnet->unwanted_action)(outnet->unwanted_param);
1456 			outnet->unwanted_total = 0;
1457 		}
1458 		return 0;
1459 	}
1460 	comm_timer_disable(p->timer);
1461 	verbose(VERB_ALGO, "outnet handle udp reply");
1462 	/* delete from tree first in case callback creates a retry */
1463 	(void)rbtree_delete(outnet->pending, p->node.key);
1464 	if(p->cb) {
1465 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1466 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info);
1467 	}
1468 	portcomm_loweruse(outnet, p->pc);
1469 	pending_delete(NULL, p);
1470 	outnet_send_wait_udp(outnet);
1471 	return 0;
1472 }
1473 
1474 /** calculate number of ip4 and ip6 interfaces*/
1475 static void
1476 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6,
1477 	int* num_ip4, int* num_ip6)
1478 {
1479 	int i;
1480 	*num_ip4 = 0;
1481 	*num_ip6 = 0;
1482 	if(num_ifs <= 0) {
1483 		if(do_ip4)
1484 			*num_ip4 = 1;
1485 		if(do_ip6)
1486 			*num_ip6 = 1;
1487 		return;
1488 	}
1489 	for(i=0; i<num_ifs; i++)
1490 	{
1491 		if(str_is_ip6(ifs[i])) {
1492 			if(do_ip6)
1493 				(*num_ip6)++;
1494 		} else {
1495 			if(do_ip4)
1496 				(*num_ip4)++;
1497 		}
1498 	}
1499 }
1500 
1501 void
1502 pending_udp_timer_delay_cb(void* arg)
1503 {
1504 	struct pending* p = (struct pending*)arg;
1505 	struct outside_network* outnet = p->outnet;
1506 	verbose(VERB_ALGO, "timeout udp with delay");
1507 	portcomm_loweruse(outnet, p->pc);
1508 	pending_delete(outnet, p);
1509 	outnet_send_wait_udp(outnet);
1510 }
1511 
1512 void
1513 pending_udp_timer_cb(void *arg)
1514 {
1515 	struct pending* p = (struct pending*)arg;
1516 	struct outside_network* outnet = p->outnet;
1517 	/* it timed out */
1518 	verbose(VERB_ALGO, "timeout udp");
1519 	if(p->cb) {
1520 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1521 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL);
1522 	}
1523 	/* if delayclose, keep port open for a longer time.
1524 	 * But if the udpwaitlist exists, then we are struggling to
1525 	 * keep up with demand for sockets, so do not wait, but service
1526 	 * the customer (customer service more important than portICMPs) */
1527 	if(outnet->delayclose && !outnet->udp_wait_first) {
1528 		p->cb = NULL;
1529 		p->timer->callback = &pending_udp_timer_delay_cb;
1530 		comm_timer_set(p->timer, &outnet->delay_tv);
1531 		return;
1532 	}
1533 	portcomm_loweruse(outnet, p->pc);
1534 	pending_delete(outnet, p);
1535 	outnet_send_wait_udp(outnet);
1536 }
1537 
1538 /** create pending_tcp buffers */
1539 static int
1540 create_pending_tcp(struct outside_network* outnet, size_t bufsize)
1541 {
1542 	size_t i;
1543 	if(outnet->num_tcp == 0)
1544 		return 1; /* no tcp needed, nothing to do */
1545 	if(!(outnet->tcp_conns = (struct pending_tcp **)calloc(
1546 			outnet->num_tcp, sizeof(struct pending_tcp*))))
1547 		return 0;
1548 	for(i=0; i<outnet->num_tcp; i++) {
1549 		if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1,
1550 			sizeof(struct pending_tcp))))
1551 			return 0;
1552 		outnet->tcp_conns[i]->next_free = outnet->tcp_free;
1553 		outnet->tcp_free = outnet->tcp_conns[i];
1554 		outnet->tcp_conns[i]->c = comm_point_create_tcp_out(
1555 			outnet->base, bufsize, outnet_tcp_cb,
1556 			outnet->tcp_conns[i]);
1557 		if(!outnet->tcp_conns[i]->c)
1558 			return 0;
1559 	}
1560 	return 1;
1561 }
1562 
1563 /** setup an outgoing interface, ready address */
1564 static int setup_if(struct port_if* pif, const char* addrstr,
1565 	int* avail, int numavail, size_t numfd)
1566 {
1567 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1568 	pif->avail_total = numavail;
1569 	pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int));
1570 	if(!pif->avail_ports)
1571 		return 0;
1572 #endif
1573 	if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) &&
1574 	   !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT,
1575 			      &pif->addr, &pif->addrlen, &pif->pfxlen))
1576 		return 0;
1577 	pif->maxout = (int)numfd;
1578 	pif->inuse = 0;
1579 	pif->out = (struct port_comm**)calloc(numfd,
1580 		sizeof(struct port_comm*));
1581 	if(!pif->out)
1582 		return 0;
1583 	return 1;
1584 }
1585 
1586 struct outside_network*
1587 outside_network_create(struct comm_base *base, size_t bufsize,
1588 	size_t num_ports, char** ifs, int num_ifs, int do_ip4,
1589 	int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra,
1590 	struct ub_randstate* rnd, int use_caps_for_id, int* availports,
1591 	int numavailports, size_t unwanted_threshold, int tcp_mss,
1592 	void (*unwanted_action)(void*), void* unwanted_param, int do_udp,
1593 	void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv,
1594 	int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout,
1595 	int tcp_auth_query_timeout)
1596 {
1597 	struct outside_network* outnet = (struct outside_network*)
1598 		calloc(1, sizeof(struct outside_network));
1599 	size_t k;
1600 	if(!outnet) {
1601 		log_err("malloc failed");
1602 		return NULL;
1603 	}
1604 	comm_base_timept(base, &outnet->now_secs, &outnet->now_tv);
1605 	outnet->base = base;
1606 	outnet->num_tcp = num_tcp;
1607 	outnet->max_reuse_tcp_queries = max_reuse_tcp_queries;
1608 	outnet->tcp_reuse_timeout= tcp_reuse_timeout;
1609 	outnet->tcp_auth_query_timeout = tcp_auth_query_timeout;
1610 	outnet->num_tcp_outgoing = 0;
1611 	outnet->infra = infra;
1612 	outnet->rnd = rnd;
1613 	outnet->sslctx = sslctx;
1614 	outnet->tls_use_sni = tls_use_sni;
1615 #ifdef USE_DNSTAP
1616 	outnet->dtenv = dtenv;
1617 #else
1618 	(void)dtenv;
1619 #endif
1620 	outnet->svcd_overhead = 0;
1621 	outnet->want_to_quit = 0;
1622 	outnet->unwanted_threshold = unwanted_threshold;
1623 	outnet->unwanted_action = unwanted_action;
1624 	outnet->unwanted_param = unwanted_param;
1625 	outnet->use_caps_for_id = use_caps_for_id;
1626 	outnet->do_udp = do_udp;
1627 	outnet->tcp_mss = tcp_mss;
1628 	outnet->ip_dscp = dscp;
1629 #ifndef S_SPLINT_S
1630 	if(delayclose) {
1631 		outnet->delayclose = 1;
1632 		outnet->delay_tv.tv_sec = delayclose/1000;
1633 		outnet->delay_tv.tv_usec = (delayclose%1000)*1000;
1634 	}
1635 #endif
1636 	if(udp_connect) {
1637 		outnet->udp_connect = 1;
1638 	}
1639 	if(numavailports == 0 || num_ports == 0) {
1640 		log_err("no outgoing ports available");
1641 		outside_network_delete(outnet);
1642 		return NULL;
1643 	}
1644 #ifndef INET6
1645 	do_ip6 = 0;
1646 #endif
1647 	calc_num46(ifs, num_ifs, do_ip4, do_ip6,
1648 		&outnet->num_ip4, &outnet->num_ip6);
1649 	if(outnet->num_ip4 != 0) {
1650 		if(!(outnet->ip4_ifs = (struct port_if*)calloc(
1651 			(size_t)outnet->num_ip4, sizeof(struct port_if)))) {
1652 			log_err("malloc failed");
1653 			outside_network_delete(outnet);
1654 			return NULL;
1655 		}
1656 	}
1657 	if(outnet->num_ip6 != 0) {
1658 		if(!(outnet->ip6_ifs = (struct port_if*)calloc(
1659 			(size_t)outnet->num_ip6, sizeof(struct port_if)))) {
1660 			log_err("malloc failed");
1661 			outside_network_delete(outnet);
1662 			return NULL;
1663 		}
1664 	}
1665 	if(	!(outnet->udp_buff = sldns_buffer_new(bufsize)) ||
1666 		!(outnet->pending = rbtree_create(pending_cmp)) ||
1667 		!(outnet->serviced = rbtree_create(serviced_cmp)) ||
1668 		!create_pending_tcp(outnet, bufsize)) {
1669 		log_err("malloc failed");
1670 		outside_network_delete(outnet);
1671 		return NULL;
1672 	}
1673 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1674 	outnet->tcp_reuse_max = num_tcp;
1675 
1676 	/* allocate commpoints */
1677 	for(k=0; k<num_ports; k++) {
1678 		struct port_comm* pc;
1679 		pc = (struct port_comm*)calloc(1, sizeof(*pc));
1680 		if(!pc) {
1681 			log_err("malloc failed");
1682 			outside_network_delete(outnet);
1683 			return NULL;
1684 		}
1685 		pc->cp = comm_point_create_udp(outnet->base, -1,
1686 			outnet->udp_buff, outnet_udp_cb, outnet, NULL);
1687 		if(!pc->cp) {
1688 			log_err("malloc failed");
1689 			free(pc);
1690 			outside_network_delete(outnet);
1691 			return NULL;
1692 		}
1693 		pc->next = outnet->unused_fds;
1694 		outnet->unused_fds = pc;
1695 	}
1696 
1697 	/* allocate interfaces */
1698 	if(num_ifs == 0) {
1699 		if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0",
1700 			availports, numavailports, num_ports)) {
1701 			log_err("malloc failed");
1702 			outside_network_delete(outnet);
1703 			return NULL;
1704 		}
1705 		if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::",
1706 			availports, numavailports, num_ports)) {
1707 			log_err("malloc failed");
1708 			outside_network_delete(outnet);
1709 			return NULL;
1710 		}
1711 	} else {
1712 		size_t done_4 = 0, done_6 = 0;
1713 		int i;
1714 		for(i=0; i<num_ifs; i++) {
1715 			if(str_is_ip6(ifs[i]) && do_ip6) {
1716 				if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i],
1717 					availports, numavailports, num_ports)){
1718 					log_err("malloc failed");
1719 					outside_network_delete(outnet);
1720 					return NULL;
1721 				}
1722 				done_6++;
1723 			}
1724 			if(!str_is_ip6(ifs[i]) && do_ip4) {
1725 				if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i],
1726 					availports, numavailports, num_ports)){
1727 					log_err("malloc failed");
1728 					outside_network_delete(outnet);
1729 					return NULL;
1730 				}
1731 				done_4++;
1732 			}
1733 		}
1734 	}
1735 	return outnet;
1736 }
1737 
1738 /** helper pending delete */
1739 static void
1740 pending_node_del(rbnode_type* node, void* arg)
1741 {
1742 	struct pending* pend = (struct pending*)node;
1743 	struct outside_network* outnet = (struct outside_network*)arg;
1744 	pending_delete(outnet, pend);
1745 }
1746 
1747 /** helper serviced delete */
1748 static void
1749 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg))
1750 {
1751 	struct serviced_query* sq = (struct serviced_query*)node;
1752 	alloc_reg_release(sq->alloc, sq->region);
1753 	if(sq->timer)
1754 		comm_timer_delete(sq->timer);
1755 	free(sq);
1756 }
1757 
1758 void
1759 outside_network_quit_prepare(struct outside_network* outnet)
1760 {
1761 	if(!outnet)
1762 		return;
1763 	/* prevent queued items from being sent */
1764 	outnet->want_to_quit = 1;
1765 }
1766 
1767 void
1768 outside_network_delete(struct outside_network* outnet)
1769 {
1770 	if(!outnet)
1771 		return;
1772 	outnet->want_to_quit = 1;
1773 	/* check every element, since we can be called on malloc error */
1774 	if(outnet->pending) {
1775 		/* free pending elements, but do no unlink from tree. */
1776 		traverse_postorder(outnet->pending, pending_node_del, NULL);
1777 		free(outnet->pending);
1778 	}
1779 	if(outnet->serviced) {
1780 		traverse_postorder(outnet->serviced, serviced_node_del, NULL);
1781 		free(outnet->serviced);
1782 	}
1783 	if(outnet->udp_buff)
1784 		sldns_buffer_free(outnet->udp_buff);
1785 	if(outnet->unused_fds) {
1786 		struct port_comm* p = outnet->unused_fds, *np;
1787 		while(p) {
1788 			np = p->next;
1789 			comm_point_delete(p->cp);
1790 			free(p);
1791 			p = np;
1792 		}
1793 		outnet->unused_fds = NULL;
1794 	}
1795 	if(outnet->ip4_ifs) {
1796 		int i, k;
1797 		for(i=0; i<outnet->num_ip4; i++) {
1798 			for(k=0; k<outnet->ip4_ifs[i].inuse; k++) {
1799 				struct port_comm* pc = outnet->ip4_ifs[i].
1800 					out[k];
1801 				comm_point_delete(pc->cp);
1802 				free(pc);
1803 			}
1804 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1805 			free(outnet->ip4_ifs[i].avail_ports);
1806 #endif
1807 			free(outnet->ip4_ifs[i].out);
1808 		}
1809 		free(outnet->ip4_ifs);
1810 	}
1811 	if(outnet->ip6_ifs) {
1812 		int i, k;
1813 		for(i=0; i<outnet->num_ip6; i++) {
1814 			for(k=0; k<outnet->ip6_ifs[i].inuse; k++) {
1815 				struct port_comm* pc = outnet->ip6_ifs[i].
1816 					out[k];
1817 				comm_point_delete(pc->cp);
1818 				free(pc);
1819 			}
1820 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1821 			free(outnet->ip6_ifs[i].avail_ports);
1822 #endif
1823 			free(outnet->ip6_ifs[i].out);
1824 		}
1825 		free(outnet->ip6_ifs);
1826 	}
1827 	if(outnet->tcp_conns) {
1828 		size_t i;
1829 		for(i=0; i<outnet->num_tcp; i++)
1830 			if(outnet->tcp_conns[i]) {
1831 				struct pending_tcp* pend;
1832 				pend = outnet->tcp_conns[i];
1833 				if(pend->reuse.item_on_lru_list) {
1834 					/* delete waiting_tcp elements that
1835 					 * the tcp conn is working on */
1836 					decommission_pending_tcp(outnet, pend);
1837 				}
1838 				comm_point_delete(outnet->tcp_conns[i]->c);
1839 				free(outnet->tcp_conns[i]);
1840 				outnet->tcp_conns[i] = NULL;
1841 			}
1842 		free(outnet->tcp_conns);
1843 		outnet->tcp_conns = NULL;
1844 	}
1845 	if(outnet->tcp_wait_first) {
1846 		struct waiting_tcp* p = outnet->tcp_wait_first, *np;
1847 		while(p) {
1848 			np = p->next_waiting;
1849 			waiting_tcp_delete(p);
1850 			p = np;
1851 		}
1852 	}
1853 	/* was allocated in struct pending that was deleted above */
1854 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1855 	outnet->tcp_reuse_first = NULL;
1856 	outnet->tcp_reuse_last = NULL;
1857 	if(outnet->udp_wait_first) {
1858 		struct pending* p = outnet->udp_wait_first, *np;
1859 		while(p) {
1860 			np = p->next_waiting;
1861 			pending_delete(NULL, p);
1862 			p = np;
1863 		}
1864 	}
1865 	free(outnet);
1866 }
1867 
1868 void
1869 pending_delete(struct outside_network* outnet, struct pending* p)
1870 {
1871 	if(!p)
1872 		return;
1873 	if(outnet && outnet->udp_wait_first &&
1874 		(p->next_waiting || p == outnet->udp_wait_last) ) {
1875 		/* delete from waiting list, if it is in the waiting list */
1876 		struct pending* prev = NULL, *x = outnet->udp_wait_first;
1877 		while(x && x != p) {
1878 			prev = x;
1879 			x = x->next_waiting;
1880 		}
1881 		if(x) {
1882 			log_assert(x == p);
1883 			if(prev)
1884 				prev->next_waiting = p->next_waiting;
1885 			else	outnet->udp_wait_first = p->next_waiting;
1886 			if(outnet->udp_wait_last == p)
1887 				outnet->udp_wait_last = prev;
1888 		}
1889 	}
1890 	if(outnet) {
1891 		(void)rbtree_delete(outnet->pending, p->node.key);
1892 	}
1893 	if(p->timer)
1894 		comm_timer_delete(p->timer);
1895 	free(p->pkt);
1896 	free(p);
1897 }
1898 
1899 static void
1900 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd)
1901 {
1902 	int i, last;
1903 	if(!(pfxlen > 0 && pfxlen < 128))
1904 		return;
1905 	for(i = 0; i < (128 - pfxlen) / 8; i++) {
1906 		sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256);
1907 	}
1908 	last = pfxlen & 7;
1909 	if(last != 0) {
1910 		sa->sin6_addr.s6_addr[15-i] |=
1911 			((0xFF >> last) & ub_random_max(rnd, 256));
1912 	}
1913 }
1914 
1915 /**
1916  * Try to open a UDP socket for outgoing communication.
1917  * Sets sockets options as needed.
1918  * @param addr: socket address.
1919  * @param addrlen: length of address.
1920  * @param pfxlen: length of network prefix (for address randomisation).
1921  * @param port: port override for addr.
1922  * @param inuse: if -1 is returned, this bool means the port was in use.
1923  * @param rnd: random state (for address randomisation).
1924  * @param dscp: DSCP to use.
1925  * @return fd or -1
1926  */
1927 static int
1928 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen,
1929 	int port, int* inuse, struct ub_randstate* rnd, int dscp)
1930 {
1931 	int fd, noproto;
1932 	if(addr_is_ip6(addr, addrlen)) {
1933 		int freebind = 0;
1934 		struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
1935 		sa.sin6_port = (in_port_t)htons((uint16_t)port);
1936 		sa.sin6_flowinfo = 0;
1937 		sa.sin6_scope_id = 0;
1938 		if(pfxlen != 0) {
1939 			freebind = 1;
1940 			sai6_putrandom(&sa, pfxlen, rnd);
1941 		}
1942 		fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
1943 			(struct sockaddr*)&sa, addrlen, 1, inuse, &noproto,
1944 			0, 0, 0, NULL, 0, freebind, 0, dscp);
1945 	} else {
1946 		struct sockaddr_in* sa = (struct sockaddr_in*)addr;
1947 		sa->sin_port = (in_port_t)htons((uint16_t)port);
1948 		fd = create_udp_sock(AF_INET, SOCK_DGRAM,
1949 			(struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
1950 			0, 0, 0, NULL, 0, 0, 0, dscp);
1951 	}
1952 	return fd;
1953 }
1954 
1955 /** Select random ID */
1956 static int
1957 select_id(struct outside_network* outnet, struct pending* pend,
1958 	sldns_buffer* packet)
1959 {
1960 	int id_tries = 0;
1961 	pend->id = GET_RANDOM_ID(outnet->rnd);
1962 	LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
1963 
1964 	/* insert in tree */
1965 	pend->node.key = pend;
1966 	while(!rbtree_insert(outnet->pending, &pend->node)) {
1967 		/* change ID to avoid collision */
1968 		pend->id = GET_RANDOM_ID(outnet->rnd);
1969 		LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
1970 		id_tries++;
1971 		if(id_tries == MAX_ID_RETRY) {
1972 			pend->id=99999; /* non existent ID */
1973 			log_err("failed to generate unique ID, drop msg");
1974 			return 0;
1975 		}
1976 	}
1977 	verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id);
1978 	return 1;
1979 }
1980 
1981 /** return true is UDP connect error needs to be logged */
1982 static int udp_connect_needs_log(int err)
1983 {
1984 	switch(err) {
1985 	case ECONNREFUSED:
1986 #  ifdef ENETUNREACH
1987 	case ENETUNREACH:
1988 #  endif
1989 #  ifdef EHOSTDOWN
1990 	case EHOSTDOWN:
1991 #  endif
1992 #  ifdef EHOSTUNREACH
1993 	case EHOSTUNREACH:
1994 #  endif
1995 #  ifdef ENETDOWN
1996 	case ENETDOWN:
1997 #  endif
1998 #  ifdef EADDRNOTAVAIL
1999 	case EADDRNOTAVAIL:
2000 #  endif
2001 	case EPERM:
2002 	case EACCES:
2003 		if(verbosity >= VERB_ALGO)
2004 			return 1;
2005 		return 0;
2006 	default:
2007 		break;
2008 	}
2009 	return 1;
2010 }
2011 
2012 
2013 /** Select random interface and port */
2014 static int
2015 select_ifport(struct outside_network* outnet, struct pending* pend,
2016 	int num_if, struct port_if* ifs)
2017 {
2018 	int my_if, my_port, fd, portno, inuse, tries=0;
2019 	struct port_if* pif;
2020 	/* randomly select interface and port */
2021 	if(num_if == 0) {
2022 		verbose(VERB_QUERY, "Need to send query but have no "
2023 			"outgoing interfaces of that family");
2024 		return 0;
2025 	}
2026 	log_assert(outnet->unused_fds);
2027 	tries = 0;
2028 	while(1) {
2029 		my_if = ub_random_max(outnet->rnd, num_if);
2030 		pif = &ifs[my_if];
2031 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2032 		if(outnet->udp_connect) {
2033 			/* if we connect() we cannot reuse fds for a port */
2034 			if(pif->inuse >= pif->avail_total) {
2035 				tries++;
2036 				if(tries < MAX_PORT_RETRY)
2037 					continue;
2038 				log_err("failed to find an open port, drop msg");
2039 				return 0;
2040 			}
2041 			my_port = pif->inuse + ub_random_max(outnet->rnd,
2042 				pif->avail_total - pif->inuse);
2043 		} else  {
2044 			my_port = ub_random_max(outnet->rnd, pif->avail_total);
2045 			if(my_port < pif->inuse) {
2046 				/* port already open */
2047 				pend->pc = pif->out[my_port];
2048 				verbose(VERB_ALGO, "using UDP if=%d port=%d",
2049 					my_if, pend->pc->number);
2050 				break;
2051 			}
2052 		}
2053 		/* try to open new port, if fails, loop to try again */
2054 		log_assert(pif->inuse < pif->maxout);
2055 		portno = pif->avail_ports[my_port - pif->inuse];
2056 #else
2057 		my_port = portno = 0;
2058 #endif
2059 		fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen,
2060 			portno, &inuse, outnet->rnd, outnet->ip_dscp);
2061 		if(fd == -1 && !inuse) {
2062 			/* nonrecoverable error making socket */
2063 			return 0;
2064 		}
2065 		if(fd != -1) {
2066 			verbose(VERB_ALGO, "opened UDP if=%d port=%d",
2067 				my_if, portno);
2068 			if(outnet->udp_connect) {
2069 				/* connect() to the destination */
2070 				if(connect(fd, (struct sockaddr*)&pend->addr,
2071 					pend->addrlen) < 0) {
2072 					if(udp_connect_needs_log(errno)) {
2073 						log_err_addr("udp connect failed",
2074 							strerror(errno), &pend->addr,
2075 							pend->addrlen);
2076 					}
2077 					sock_close(fd);
2078 					return 0;
2079 				}
2080 			}
2081 			/* grab fd */
2082 			pend->pc = outnet->unused_fds;
2083 			outnet->unused_fds = pend->pc->next;
2084 
2085 			/* setup portcomm */
2086 			pend->pc->next = NULL;
2087 			pend->pc->number = portno;
2088 			pend->pc->pif = pif;
2089 			pend->pc->index = pif->inuse;
2090 			pend->pc->num_outstanding = 0;
2091 			comm_point_start_listening(pend->pc->cp, fd, -1);
2092 
2093 			/* grab port in interface */
2094 			pif->out[pif->inuse] = pend->pc;
2095 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2096 			pif->avail_ports[my_port - pif->inuse] =
2097 				pif->avail_ports[pif->avail_total-pif->inuse-1];
2098 #endif
2099 			pif->inuse++;
2100 			break;
2101 		}
2102 		/* failed, already in use */
2103 		verbose(VERB_QUERY, "port %d in use, trying another", portno);
2104 		tries++;
2105 		if(tries == MAX_PORT_RETRY) {
2106 			log_err("failed to find an open port, drop msg");
2107 			return 0;
2108 		}
2109 	}
2110 	log_assert(pend->pc);
2111 	pend->pc->num_outstanding++;
2112 
2113 	return 1;
2114 }
2115 
2116 static int
2117 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
2118 {
2119 	struct timeval tv;
2120 	struct outside_network* outnet = pend->sq->outnet;
2121 
2122 	/* select id */
2123 	if(!select_id(outnet, pend, packet)) {
2124 		return 0;
2125 	}
2126 
2127 	/* select src_if, port */
2128 	if(addr_is_ip6(&pend->addr, pend->addrlen)) {
2129 		if(!select_ifport(outnet, pend,
2130 			outnet->num_ip6, outnet->ip6_ifs))
2131 			return 0;
2132 	} else {
2133 		if(!select_ifport(outnet, pend,
2134 			outnet->num_ip4, outnet->ip4_ifs))
2135 			return 0;
2136 	}
2137 	log_assert(pend->pc && pend->pc->cp);
2138 
2139 	/* send it over the commlink */
2140 	if(!comm_point_send_udp_msg(pend->pc->cp, packet,
2141 		(struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) {
2142 		portcomm_loweruse(outnet, pend->pc);
2143 		return 0;
2144 	}
2145 
2146 	/* system calls to set timeout after sending UDP to make roundtrip
2147 	   smaller. */
2148 #ifndef S_SPLINT_S
2149 	tv.tv_sec = timeout/1000;
2150 	tv.tv_usec = (timeout%1000)*1000;
2151 #endif
2152 	comm_timer_set(pend->timer, &tv);
2153 
2154 #ifdef USE_DNSTAP
2155 	/*
2156 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
2157 	 * There are no chances to get the src (local service) addr if unbound
2158 	 * is not configured with specific outgoing IP-addresses. So we will
2159 	 * pass 0.0.0.0 (::) to argument for
2160 	 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls.
2161 	 */
2162 	if(outnet->dtenv &&
2163 	   (outnet->dtenv->log_resolver_query_messages ||
2164 		outnet->dtenv->log_forwarder_query_messages)) {
2165 			log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen);
2166 			log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen);
2167 			dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp,
2168 				pend->sq->zone, pend->sq->zonelen, packet);
2169 	}
2170 #endif
2171 	return 1;
2172 }
2173 
2174 struct pending*
2175 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
2176 	int timeout, comm_point_callback_type* cb, void* cb_arg)
2177 {
2178 	struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
2179 	if(!pend) return NULL;
2180 	pend->outnet = sq->outnet;
2181 	pend->sq = sq;
2182 	pend->addrlen = sq->addrlen;
2183 	memmove(&pend->addr, &sq->addr, sq->addrlen);
2184 	pend->cb = cb;
2185 	pend->cb_arg = cb_arg;
2186 	pend->node.key = pend;
2187 	pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb,
2188 		pend);
2189 	if(!pend->timer) {
2190 		free(pend);
2191 		return NULL;
2192 	}
2193 
2194 	if(sq->outnet->unused_fds == NULL) {
2195 		/* no unused fd, cannot create a new port (randomly) */
2196 		verbose(VERB_ALGO, "no fds available, udp query waiting");
2197 		pend->timeout = timeout;
2198 		pend->pkt_len = sldns_buffer_limit(packet);
2199 		pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet),
2200 			pend->pkt_len);
2201 		if(!pend->pkt) {
2202 			comm_timer_delete(pend->timer);
2203 			free(pend);
2204 			return NULL;
2205 		}
2206 		/* put at end of waiting list */
2207 		if(sq->outnet->udp_wait_last)
2208 			sq->outnet->udp_wait_last->next_waiting = pend;
2209 		else
2210 			sq->outnet->udp_wait_first = pend;
2211 		sq->outnet->udp_wait_last = pend;
2212 		return pend;
2213 	}
2214 	log_assert(!sq->busy);
2215 	sq->busy = 1;
2216 	if(!randomize_and_send_udp(pend, packet, timeout)) {
2217 		pending_delete(sq->outnet, pend);
2218 		return NULL;
2219 	}
2220 	sq->busy = 0;
2221 	return pend;
2222 }
2223 
2224 void
2225 outnet_tcptimer(void* arg)
2226 {
2227 	struct waiting_tcp* w = (struct waiting_tcp*)arg;
2228 	struct outside_network* outnet = w->outnet;
2229 	verbose(VERB_CLIENT, "outnet_tcptimer");
2230 	if(w->on_tcp_waiting_list) {
2231 		/* it is on the waiting list */
2232 		waiting_list_remove(outnet, w);
2233 		waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL);
2234 		waiting_tcp_delete(w);
2235 	} else {
2236 		/* it was in use */
2237 		struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting;
2238 		reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT);
2239 	}
2240 	use_free_buffer(outnet);
2241 }
2242 
2243 /** close the oldest reuse_tcp connection to make a fd and struct pend
2244  * available for a new stream connection */
2245 static void
2246 reuse_tcp_close_oldest(struct outside_network* outnet)
2247 {
2248 	struct reuse_tcp* reuse;
2249 	verbose(VERB_CLIENT, "reuse_tcp_close_oldest");
2250 	reuse = reuse_tcp_lru_snip(outnet);
2251 	if(!reuse) return;
2252 	/* free up */
2253 	reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED);
2254 }
2255 
2256 static uint16_t
2257 tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse)
2258 {
2259 	if(reuse)
2260 		return reuse_tcp_select_id(reuse, outnet);
2261 	return GET_RANDOM_ID(outnet->rnd);
2262 }
2263 
2264 /** find spare ID value for reuse tcp stream.  That is random and also does
2265  * not collide with an existing query ID that is in use or waiting */
2266 uint16_t
2267 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet)
2268 {
2269 	uint16_t id = 0, curid, nextid;
2270 	const int try_random = 2000;
2271 	int i;
2272 	unsigned select, count, space;
2273 	rbnode_type* node;
2274 
2275 	/* make really sure the tree is not empty */
2276 	if(reuse->tree_by_id.count == 0) {
2277 		id = GET_RANDOM_ID(outnet->rnd);
2278 		return id;
2279 	}
2280 
2281 	/* try to find random empty spots by picking them */
2282 	for(i = 0; i<try_random; i++) {
2283 		id = GET_RANDOM_ID(outnet->rnd);
2284 		if(!reuse_tcp_by_id_find(reuse, id)) {
2285 			return id;
2286 		}
2287 	}
2288 
2289 	/* equally pick a random unused element from the tree that is
2290 	 * not in use.  Pick a the n-th index of an unused number,
2291 	 * then loop over the empty spaces in the tree and find it */
2292 	log_assert(reuse->tree_by_id.count < 0xffff);
2293 	select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count);
2294 	/* select value now in 0 .. num free - 1 */
2295 
2296 	count = 0; /* number of free spaces passed by */
2297 	node = rbtree_first(&reuse->tree_by_id);
2298 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2299 	/* see if select is before first node */
2300 	if(select < (unsigned)tree_by_id_get_id(node))
2301 		return select;
2302 	count += tree_by_id_get_id(node);
2303 	/* perhaps select is between nodes */
2304 	while(node && node != RBTREE_NULL) {
2305 		rbnode_type* next = rbtree_next(node);
2306 		if(next && next != RBTREE_NULL) {
2307 			curid = tree_by_id_get_id(node);
2308 			nextid = tree_by_id_get_id(next);
2309 			log_assert(curid < nextid);
2310 			if(curid != 0xffff && curid + 1 < nextid) {
2311 				/* space between nodes */
2312 				space = nextid - curid - 1;
2313 				log_assert(select >= count);
2314 				if(select < count + space) {
2315 					/* here it is */
2316 					return curid + 1 + (select - count);
2317 				}
2318 				count += space;
2319 			}
2320 		}
2321 		node = next;
2322 	}
2323 
2324 	/* select is after the last node */
2325 	/* count is the number of free positions before the nodes in the
2326 	 * tree */
2327 	node = rbtree_last(&reuse->tree_by_id);
2328 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2329 	curid = tree_by_id_get_id(node);
2330 	log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff);
2331 	return curid + 1 + (select - count);
2332 }
2333 
2334 struct waiting_tcp*
2335 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
2336 	int timeout, comm_point_callback_type* callback, void* callback_arg)
2337 {
2338 	struct pending_tcp* pend = sq->outnet->tcp_free;
2339 	struct reuse_tcp* reuse = NULL;
2340 	struct waiting_tcp* w;
2341 
2342 	verbose(VERB_CLIENT, "pending_tcp_query");
2343 	if(sldns_buffer_limit(packet) < sizeof(uint16_t)) {
2344 		verbose(VERB_ALGO, "pending tcp query with too short buffer < 2");
2345 		return NULL;
2346 	}
2347 
2348 	/* find out if a reused stream to the target exists */
2349 	/* if so, take it into use */
2350 	reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen,
2351 		sq->ssl_upstream);
2352 	if(reuse) {
2353 		log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse);
2354 		log_assert(reuse->pending);
2355 		pend = reuse->pending;
2356 		reuse_tcp_lru_touch(sq->outnet, reuse);
2357 	}
2358 
2359 	log_assert(!reuse || (reuse && pend));
2360 	/* if !pend but we have reuse streams, close a reuse stream
2361 	 * to be able to open a new one to this target, no use waiting
2362 	 * to reuse a file descriptor while another query needs to use
2363 	 * that buffer and file descriptor now. */
2364 	if(!pend) {
2365 		reuse_tcp_close_oldest(sq->outnet);
2366 		pend = sq->outnet->tcp_free;
2367 		log_assert(!reuse || (pend == reuse->pending));
2368 	}
2369 
2370 	/* allocate space to store query */
2371 	w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp)
2372 		+ sldns_buffer_limit(packet));
2373 	if(!w) {
2374 		return NULL;
2375 	}
2376 	if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) {
2377 		free(w);
2378 		return NULL;
2379 	}
2380 	w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
2381 	w->pkt_len = sldns_buffer_limit(packet);
2382 	memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len);
2383 	w->id = tcp_select_id(sq->outnet, reuse);
2384 	LDNS_ID_SET(w->pkt, w->id);
2385 	memcpy(&w->addr, &sq->addr, sq->addrlen);
2386 	w->addrlen = sq->addrlen;
2387 	w->outnet = sq->outnet;
2388 	w->on_tcp_waiting_list = 0;
2389 	w->next_waiting = NULL;
2390 	w->cb = callback;
2391 	w->cb_arg = callback_arg;
2392 	w->ssl_upstream = sq->ssl_upstream;
2393 	w->tls_auth_name = sq->tls_auth_name;
2394 	w->timeout = timeout;
2395 	w->id_node.key = NULL;
2396 	w->write_wait_prev = NULL;
2397 	w->write_wait_next = NULL;
2398 	w->write_wait_queued = 0;
2399 	w->error_count = 0;
2400 #ifdef USE_DNSTAP
2401 	w->sq = NULL;
2402 #endif
2403 	w->in_cb_and_decommission = 0;
2404 	if(pend) {
2405 		/* we have a buffer available right now */
2406 		if(reuse) {
2407 			log_assert(reuse == &pend->reuse);
2408 			/* reuse existing fd, write query and continue */
2409 			/* store query in tree by id */
2410 			verbose(VERB_CLIENT, "pending_tcp_query: reuse, store");
2411 			w->next_waiting = (void*)pend;
2412 			reuse_tree_by_id_insert(&pend->reuse, w);
2413 			/* can we write right now? */
2414 			if(pend->query == NULL) {
2415 				/* write straight away */
2416 				/* stop the timer on read of the fd */
2417 				comm_point_stop_listening(pend->c);
2418 				pend->query = w;
2419 				outnet_tcp_take_query_setup(pend->c->fd, pend,
2420 					w);
2421 			} else {
2422 				/* put it in the waiting list for
2423 				 * this stream */
2424 				reuse_write_wait_push_back(&pend->reuse, w);
2425 			}
2426 		} else {
2427 			/* create new fd and connect to addr, setup to
2428 			 * write query */
2429 			verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect");
2430 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
2431 			pend->reuse.pending = pend;
2432 			memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen);
2433 			pend->reuse.addrlen = sq->addrlen;
2434 			if(!outnet_tcp_take_into_use(w)) {
2435 				waiting_tcp_delete(w);
2436 				return NULL;
2437 			}
2438 		}
2439 #ifdef USE_DNSTAP
2440 		if(sq->outnet->dtenv &&
2441 		   (sq->outnet->dtenv->log_resolver_query_messages ||
2442 		    sq->outnet->dtenv->log_forwarder_query_messages)) {
2443 			/* use w->pkt, because it has the ID value */
2444 			sldns_buffer tmp;
2445 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
2446 			dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr,
2447 				&pend->pi->addr, comm_tcp, sq->zone,
2448 				sq->zonelen, &tmp);
2449 		}
2450 #endif
2451 	} else {
2452 		/* queue up */
2453 		/* waiting for a buffer on the outside network buffer wait
2454 		 * list */
2455 		verbose(VERB_CLIENT, "pending_tcp_query: queue to wait");
2456 #ifdef USE_DNSTAP
2457 		w->sq = sq;
2458 #endif
2459 		outnet_add_tcp_waiting(sq->outnet, w);
2460 	}
2461 	return w;
2462 }
2463 
2464 /** create query for serviced queries */
2465 static void
2466 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen,
2467 	uint16_t qtype, uint16_t qclass, uint16_t flags)
2468 {
2469 	sldns_buffer_clear(buff);
2470 	/* skip id */
2471 	sldns_buffer_write_u16(buff, flags);
2472 	sldns_buffer_write_u16(buff, 1); /* qdcount */
2473 	sldns_buffer_write_u16(buff, 0); /* ancount */
2474 	sldns_buffer_write_u16(buff, 0); /* nscount */
2475 	sldns_buffer_write_u16(buff, 0); /* arcount */
2476 	sldns_buffer_write(buff, qname, qnamelen);
2477 	sldns_buffer_write_u16(buff, qtype);
2478 	sldns_buffer_write_u16(buff, qclass);
2479 	sldns_buffer_flip(buff);
2480 }
2481 
2482 /** lookup serviced query in serviced query rbtree */
2483 static struct serviced_query*
2484 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2485 	struct sockaddr_storage* addr, socklen_t addrlen,
2486 	struct edns_option* opt_list)
2487 {
2488 	struct serviced_query key;
2489 	key.node.key = &key;
2490 	key.qbuf = sldns_buffer_begin(buff);
2491 	key.qbuflen = sldns_buffer_limit(buff);
2492 	key.dnssec = dnssec;
2493 	memcpy(&key.addr, addr, addrlen);
2494 	key.addrlen = addrlen;
2495 	key.outnet = outnet;
2496 	key.opt_list = opt_list;
2497 	return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
2498 }
2499 
2500 void
2501 serviced_timer_cb(void* arg)
2502 {
2503 	struct serviced_query* sq = (struct serviced_query*)arg;
2504 	struct outside_network* outnet = sq->outnet;
2505 	verbose(VERB_ALGO, "serviced send timer");
2506 	/* By the time this cb is called, if we don't have any registered
2507 	 * callbacks for this serviced_query anymore; do not send. */
2508 	if(!sq->cblist)
2509 		goto delete;
2510 	/* perform first network action */
2511 	if(outnet->do_udp && !(sq->tcp_upstream || sq->ssl_upstream)) {
2512 		if(!serviced_udp_send(sq, outnet->udp_buff))
2513 			goto delete;
2514 	} else {
2515 		if(!serviced_tcp_send(sq, outnet->udp_buff))
2516 			goto delete;
2517 	}
2518 	/* Maybe by this time we don't have callbacks attached anymore. Don't
2519 	 * proactively try to delete; let it run and maybe another callback
2520 	 * will get attached by the time we get an answer. */
2521 	return;
2522 delete:
2523 	serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
2524 }
2525 
2526 /** Create new serviced entry */
2527 static struct serviced_query*
2528 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2529 	int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
2530 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
2531 	uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list,
2532 	size_t pad_queries_block_size, struct alloc_cache* alloc,
2533 	struct regional* region)
2534 {
2535 	struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
2536 	struct timeval t;
2537 #ifdef UNBOUND_DEBUG
2538 	rbnode_type* ins;
2539 #endif
2540 	if(!sq)
2541 		return NULL;
2542 	sq->node.key = sq;
2543 	sq->alloc = alloc;
2544 	sq->region = region;
2545 	sq->qbuf = regional_alloc_init(region, sldns_buffer_begin(buff),
2546 		sldns_buffer_limit(buff));
2547 	if(!sq->qbuf) {
2548 		alloc_reg_release(alloc, region);
2549 		free(sq);
2550 		return NULL;
2551 	}
2552 	sq->qbuflen = sldns_buffer_limit(buff);
2553 	sq->zone = regional_alloc_init(region, zone, zonelen);
2554 	if(!sq->zone) {
2555 		alloc_reg_release(alloc, region);
2556 		free(sq);
2557 		return NULL;
2558 	}
2559 	sq->zonelen = zonelen;
2560 	sq->qtype = qtype;
2561 	sq->dnssec = dnssec;
2562 	sq->want_dnssec = want_dnssec;
2563 	sq->nocaps = nocaps;
2564 	sq->tcp_upstream = tcp_upstream;
2565 	sq->ssl_upstream = ssl_upstream;
2566 	if(tls_auth_name) {
2567 		sq->tls_auth_name = regional_strdup(region, tls_auth_name);
2568 		if(!sq->tls_auth_name) {
2569 			alloc_reg_release(alloc, region);
2570 			free(sq);
2571 			return NULL;
2572 		}
2573 	} else {
2574 		sq->tls_auth_name = NULL;
2575 	}
2576 	memcpy(&sq->addr, addr, addrlen);
2577 	sq->addrlen = addrlen;
2578 	sq->opt_list = opt_list;
2579 	sq->busy = 0;
2580 	sq->timer = comm_timer_create(outnet->base, serviced_timer_cb, sq);
2581 	if(!sq->timer) {
2582 		alloc_reg_release(alloc, region);
2583 		free(sq);
2584 		return NULL;
2585 	}
2586 	memset(&t, 0, sizeof(t));
2587 	comm_timer_set(sq->timer, &t);
2588 	sq->outnet = outnet;
2589 	sq->cblist = NULL;
2590 	sq->pending = NULL;
2591 	sq->status = serviced_initial;
2592 	sq->retry = 0;
2593 	sq->to_be_deleted = 0;
2594 	sq->padding_block_size = pad_queries_block_size;
2595 #ifdef UNBOUND_DEBUG
2596 	ins =
2597 #else
2598 	(void)
2599 #endif
2600 	rbtree_insert(outnet->serviced, &sq->node);
2601 	log_assert(ins != NULL); /* must not be already present */
2602 	return sq;
2603 }
2604 
2605 /** remove waiting tcp from the outnet waiting list */
2606 static void
2607 waiting_list_remove(struct outside_network* outnet, struct waiting_tcp* w)
2608 {
2609 	struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL;
2610 	w->on_tcp_waiting_list = 0;
2611 	while(p) {
2612 		if(p == w) {
2613 			/* remove w */
2614 			if(prev)
2615 				prev->next_waiting = w->next_waiting;
2616 			else	outnet->tcp_wait_first = w->next_waiting;
2617 			if(outnet->tcp_wait_last == w)
2618 				outnet->tcp_wait_last = prev;
2619 			return;
2620 		}
2621 		prev = p;
2622 		p = p->next_waiting;
2623 	}
2624 	/* waiting_list_remove is currently called only with items that are
2625 	 * already in the waiting list. */
2626 	log_assert(0);
2627 }
2628 
2629 /** reuse tcp stream, remove serviced query from stream,
2630  * return true if the stream is kept, false if it is to be closed */
2631 static int
2632 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w,
2633 	struct serviced_query* sq)
2634 {
2635 	struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting;
2636 	verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep");
2637 	/* remove the callback. let query continue to write to not cancel
2638 	 * the stream itself.  also keep it as an entry in the tree_by_id,
2639 	 * in case the answer returns (that we no longer want), but we cannot
2640 	 * pick the same ID number meanwhile */
2641 	w->cb = NULL;
2642 	/* see if can be entered in reuse tree
2643 	 * for that the FD has to be non-1 */
2644 	if(pend_tcp->c->fd == -1) {
2645 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd");
2646 		return 0;
2647 	}
2648 	/* if in tree and used by other queries */
2649 	if(pend_tcp->reuse.node.key) {
2650 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries");
2651 		/* do not reset the keepalive timer, for that
2652 		 * we'd need traffic, and this is where the serviced is
2653 		 * removed due to state machine internal reasons,
2654 		 * eg. iterator no longer interested in this query */
2655 		return 1;
2656 	}
2657 	/* if still open and want to keep it open */
2658 	if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count <
2659 		sq->outnet->tcp_reuse_max) {
2660 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open");
2661 		/* set a keepalive timer on it */
2662 		if(!reuse_tcp_insert(sq->outnet, pend_tcp)) {
2663 			return 0;
2664 		}
2665 		reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout);
2666 		return 1;
2667 	}
2668 	return 0;
2669 }
2670 
2671 /** cleanup serviced query entry */
2672 static void
2673 serviced_delete(struct serviced_query* sq)
2674 {
2675 	verbose(VERB_CLIENT, "serviced_delete");
2676 	if(sq->pending) {
2677 		/* clear up the pending query */
2678 		if(sq->status == serviced_query_UDP_EDNS ||
2679 			sq->status == serviced_query_UDP ||
2680 			sq->status == serviced_query_UDP_EDNS_FRAG ||
2681 			sq->status == serviced_query_UDP_EDNS_fallback) {
2682 			struct pending* p = (struct pending*)sq->pending;
2683 			verbose(VERB_CLIENT, "serviced_delete: UDP");
2684 			if(p->pc)
2685 				portcomm_loweruse(sq->outnet, p->pc);
2686 			pending_delete(sq->outnet, p);
2687 			/* this call can cause reentrant calls back into the
2688 			 * mesh */
2689 			outnet_send_wait_udp(sq->outnet);
2690 		} else {
2691 			struct waiting_tcp* w = (struct waiting_tcp*)
2692 				sq->pending;
2693 			verbose(VERB_CLIENT, "serviced_delete: TCP");
2694 			log_assert(!(w->write_wait_queued && w->on_tcp_waiting_list));
2695 			/* if on stream-write-waiting list then
2696 			 * remove from waiting list and waiting_tcp_delete */
2697 			if(w->write_wait_queued) {
2698 				struct pending_tcp* pend =
2699 					(struct pending_tcp*)w->next_waiting;
2700 				verbose(VERB_CLIENT, "serviced_delete: writewait");
2701 				if(!w->in_cb_and_decommission)
2702 					reuse_tree_by_id_delete(&pend->reuse, w);
2703 				reuse_write_wait_remove(&pend->reuse, w);
2704 				if(!w->in_cb_and_decommission)
2705 					waiting_tcp_delete(w);
2706 			} else if(!w->on_tcp_waiting_list) {
2707 				struct pending_tcp* pend =
2708 					(struct pending_tcp*)w->next_waiting;
2709 				verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep");
2710 				/* w needs to stay on tree_by_id to not assign
2711 				 * the same ID; remove the callback since its
2712 				 * serviced_query will be gone. */
2713 				w->cb = NULL;
2714 				if(!reuse_tcp_remove_serviced_keep(w, sq)) {
2715 					if(!w->in_cb_and_decommission)
2716 						reuse_cb_and_decommission(sq->outnet,
2717 							pend, NETEVENT_CLOSED);
2718 					use_free_buffer(sq->outnet);
2719 				}
2720 				sq->pending = NULL;
2721 			} else {
2722 				verbose(VERB_CLIENT, "serviced_delete: tcpwait");
2723 				waiting_list_remove(sq->outnet, w);
2724 				if(!w->in_cb_and_decommission)
2725 					waiting_tcp_delete(w);
2726 			}
2727 		}
2728 	}
2729 	/* does not delete from tree, caller has to do that */
2730 	serviced_node_del(&sq->node, NULL);
2731 }
2732 
2733 /** perturb a dname capitalization randomly */
2734 static void
2735 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len)
2736 {
2737 	uint8_t lablen;
2738 	uint8_t* d = qbuf + 10;
2739 	long int random = 0;
2740 	int bits = 0;
2741 	log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */);
2742 	(void)len;
2743 	lablen = *d++;
2744 	while(lablen) {
2745 		while(lablen--) {
2746 			/* only perturb A-Z, a-z */
2747 			if(isalpha((unsigned char)*d)) {
2748 				/* get a random bit */
2749 				if(bits == 0) {
2750 					random = ub_random(rnd);
2751 					bits = 30;
2752 				}
2753 				if(random & 0x1) {
2754 					*d = (uint8_t)toupper((unsigned char)*d);
2755 				} else {
2756 					*d = (uint8_t)tolower((unsigned char)*d);
2757 				}
2758 				random >>= 1;
2759 				bits--;
2760 			}
2761 			d++;
2762 		}
2763 		lablen = *d++;
2764 	}
2765 	if(verbosity >= VERB_ALGO) {
2766 		char buf[LDNS_MAX_DOMAINLEN+1];
2767 		dname_str(qbuf+10, buf);
2768 		verbose(VERB_ALGO, "qname perturbed to %s", buf);
2769 	}
2770 }
2771 
2772 /** put serviced query into a buffer */
2773 static void
2774 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns)
2775 {
2776 	/* if we are using 0x20 bits for ID randomness, perturb them */
2777 	if(sq->outnet->use_caps_for_id && !sq->nocaps) {
2778 		serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen);
2779 	}
2780 	/* generate query */
2781 	sldns_buffer_clear(buff);
2782 	sldns_buffer_write_u16(buff, 0); /* id placeholder */
2783 	sldns_buffer_write(buff, sq->qbuf, sq->qbuflen);
2784 	sldns_buffer_flip(buff);
2785 	if(with_edns) {
2786 		/* add edns section */
2787 		struct edns_data edns;
2788 		struct edns_option padding_option;
2789 		edns.edns_present = 1;
2790 		edns.ext_rcode = 0;
2791 		edns.edns_version = EDNS_ADVERTISED_VERSION;
2792 		edns.opt_list_in = NULL;
2793 		edns.opt_list_out = sq->opt_list;
2794 		edns.opt_list_inplace_cb_out = NULL;
2795 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
2796 			if(addr_is_ip6(&sq->addr, sq->addrlen)) {
2797 				if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE)
2798 					edns.udp_size = EDNS_FRAG_SIZE_IP6;
2799 				else	edns.udp_size = EDNS_ADVERTISED_SIZE;
2800 			} else {
2801 				if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE)
2802 					edns.udp_size = EDNS_FRAG_SIZE_IP4;
2803 				else	edns.udp_size = EDNS_ADVERTISED_SIZE;
2804 			}
2805 		} else {
2806 			edns.udp_size = EDNS_ADVERTISED_SIZE;
2807 		}
2808 		edns.bits = 0;
2809 		if(sq->dnssec & EDNS_DO)
2810 			edns.bits = EDNS_DO;
2811 		if(sq->dnssec & BIT_CD)
2812 			LDNS_CD_SET(sldns_buffer_begin(buff));
2813 		if (sq->ssl_upstream && sq->padding_block_size) {
2814 			padding_option.opt_code = LDNS_EDNS_PADDING;
2815 			padding_option.opt_len = 0;
2816 			padding_option.opt_data = NULL;
2817 			padding_option.next = edns.opt_list_out;
2818 			edns.opt_list_out = &padding_option;
2819 			edns.padding_block_size = sq->padding_block_size;
2820 		}
2821 		attach_edns_record(buff, &edns);
2822 	}
2823 }
2824 
2825 /**
2826  * Perform serviced query UDP sending operation.
2827  * Sends UDP with EDNS, unless infra host marked non EDNS.
2828  * @param sq: query to send.
2829  * @param buff: buffer scratch space.
2830  * @return 0 on error.
2831  */
2832 static int
2833 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff)
2834 {
2835 	int rtt, vs;
2836 	uint8_t edns_lame_known;
2837 	time_t now = *sq->outnet->now_secs;
2838 
2839 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
2840 		sq->zonelen, now, &vs, &edns_lame_known, &rtt))
2841 		return 0;
2842 	sq->last_rtt = rtt;
2843 	verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs);
2844 	if(sq->status == serviced_initial) {
2845 		if(vs != -1) {
2846 			sq->status = serviced_query_UDP_EDNS;
2847 		} else {
2848 			sq->status = serviced_query_UDP;
2849 		}
2850 	}
2851 	serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) ||
2852 		(sq->status == serviced_query_UDP_EDNS_FRAG));
2853 	sq->last_sent_time = *sq->outnet->now_tv;
2854 	sq->edns_lame_known = (int)edns_lame_known;
2855 	verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt);
2856 	sq->pending = pending_udp_query(sq, buff, rtt,
2857 		serviced_udp_callback, sq);
2858 	if(!sq->pending)
2859 		return 0;
2860 	return 1;
2861 }
2862 
2863 /** check that perturbed qname is identical */
2864 static int
2865 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen)
2866 {
2867 	uint8_t* d1 = sldns_buffer_begin(pkt)+12;
2868 	uint8_t* d2 = qbuf+10;
2869 	uint8_t len1, len2;
2870 	int count = 0;
2871 	if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */
2872 		return 0;
2873 	log_assert(qbuflen >= 15 /* 10 header, root, type, class */);
2874 	len1 = *d1++;
2875 	len2 = *d2++;
2876 	while(len1 != 0 || len2 != 0) {
2877 		if(LABEL_IS_PTR(len1)) {
2878 			/* check if we can read *d1 with compression ptr rest */
2879 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2880 				return 0;
2881 			d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1);
2882 			/* check if we can read the destination *d1 */
2883 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2884 				return 0;
2885 			len1 = *d1++;
2886 			if(count++ > MAX_COMPRESS_PTRS)
2887 				return 0;
2888 			continue;
2889 		}
2890 		if(d2 > qbuf+qbuflen)
2891 			return 0;
2892 		if(len1 != len2)
2893 			return 0;
2894 		if(len1 > LDNS_MAX_LABELLEN)
2895 			return 0;
2896 		/* check len1 + 1(next length) are okay to read */
2897 		if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2898 			return 0;
2899 		log_assert(len1 <= LDNS_MAX_LABELLEN);
2900 		log_assert(len2 <= LDNS_MAX_LABELLEN);
2901 		log_assert(len1 == len2 && len1 != 0);
2902 		/* compare the labels - bitwise identical */
2903 		if(memcmp(d1, d2, len1) != 0)
2904 			return 0;
2905 		d1 += len1;
2906 		d2 += len2;
2907 		len1 = *d1++;
2908 		len2 = *d2++;
2909 	}
2910 	return 1;
2911 }
2912 
2913 /** call the callbacks for a serviced query */
2914 static void
2915 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
2916 	struct comm_reply* rep)
2917 {
2918 	struct service_callback* p;
2919 	int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/
2920 	uint8_t *backup_p = NULL;
2921 	size_t backlen = 0;
2922 #ifdef UNBOUND_DEBUG
2923 	rbnode_type* rem =
2924 #else
2925 	(void)
2926 #endif
2927 	/* remove from tree, and schedule for deletion, so that callbacks
2928 	 * can safely deregister themselves and even create new serviced
2929 	 * queries that are identical to this one. */
2930 	rbtree_delete(sq->outnet->serviced, sq);
2931 	log_assert(rem); /* should have been present */
2932 	sq->to_be_deleted = 1;
2933 	verbose(VERB_ALGO, "svcd callbacks start");
2934 	if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c &&
2935 		!sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
2936 		/* for type PTR do not check perturbed name in answer,
2937 		 * compatibility with cisco dns guard boxes that mess up
2938 		 * reverse queries 0x20 contents */
2939 		/* noerror and nxdomain must have a qname in reply */
2940 		if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
2941 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
2942 				== LDNS_RCODE_NOERROR ||
2943 			 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
2944 				== LDNS_RCODE_NXDOMAIN)) {
2945 			verbose(VERB_DETAIL, "no qname in reply to check 0x20ID");
2946 			log_addr(VERB_DETAIL, "from server",
2947 				&sq->addr, sq->addrlen);
2948 			log_buf(VERB_DETAIL, "for packet", c->buffer);
2949 			error = NETEVENT_CLOSED;
2950 			c = NULL;
2951 		} else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 &&
2952 			!serviced_check_qname(c->buffer, sq->qbuf,
2953 			sq->qbuflen)) {
2954 			verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname");
2955 			log_addr(VERB_DETAIL, "from server",
2956 				&sq->addr, sq->addrlen);
2957 			log_buf(VERB_DETAIL, "for packet", c->buffer);
2958 			error = NETEVENT_CAPSFAIL;
2959 			/* and cleanup too */
2960 			pkt_dname_tolower(c->buffer,
2961 				sldns_buffer_at(c->buffer, 12));
2962 		} else {
2963 			verbose(VERB_ALGO, "good 0x20-ID in reply qname");
2964 			/* cleanup caps, prettier cache contents. */
2965 			pkt_dname_tolower(c->buffer,
2966 				sldns_buffer_at(c->buffer, 12));
2967 		}
2968 	}
2969 	if(dobackup && c) {
2970 		/* make a backup of the query, since the querystate processing
2971 		 * may send outgoing queries that overwrite the buffer.
2972 		 * use secondary buffer to store the query.
2973 		 * This is a data copy, but faster than packet to server */
2974 		backlen = sldns_buffer_limit(c->buffer);
2975 		backup_p = regional_alloc_init(sq->region,
2976 			sldns_buffer_begin(c->buffer), backlen);
2977 		if(!backup_p) {
2978 			log_err("malloc failure in serviced query callbacks");
2979 			error = NETEVENT_CLOSED;
2980 			c = NULL;
2981 		}
2982 		sq->outnet->svcd_overhead = backlen;
2983 	}
2984 	/* test the actual sq->cblist, because the next elem could be deleted*/
2985 	while((p=sq->cblist) != NULL) {
2986 		sq->cblist = p->next; /* remove this element */
2987 		if(dobackup && c) {
2988 			sldns_buffer_clear(c->buffer);
2989 			sldns_buffer_write(c->buffer, backup_p, backlen);
2990 			sldns_buffer_flip(c->buffer);
2991 		}
2992 		fptr_ok(fptr_whitelist_serviced_query(p->cb));
2993 		(void)(*p->cb)(c, p->cb_arg, error, rep);
2994 	}
2995 	if(backup_p) {
2996 		sq->outnet->svcd_overhead = 0;
2997 	}
2998 	verbose(VERB_ALGO, "svcd callbacks end");
2999 	log_assert(sq->cblist == NULL);
3000 	serviced_delete(sq);
3001 }
3002 
3003 int
3004 serviced_tcp_callback(struct comm_point* c, void* arg, int error,
3005         struct comm_reply* rep)
3006 {
3007 	struct serviced_query* sq = (struct serviced_query*)arg;
3008 	struct comm_reply r2;
3009 #ifdef USE_DNSTAP
3010 	struct waiting_tcp* w = (struct waiting_tcp*)sq->pending;
3011 	struct pending_tcp* pend_tcp = NULL;
3012 	struct port_if* pi = NULL;
3013 	if(w && !w->on_tcp_waiting_list && w->next_waiting) {
3014 		pend_tcp = (struct pending_tcp*)w->next_waiting;
3015 		pi = pend_tcp->pi;
3016 	}
3017 #endif
3018 	sq->pending = NULL; /* removed after this callback */
3019 	if(error != NETEVENT_NOERROR)
3020 		log_addr(VERB_QUERY, "tcp error for address",
3021 			&sq->addr, sq->addrlen);
3022 	if(error==NETEVENT_NOERROR)
3023 		infra_update_tcp_works(sq->outnet->infra, &sq->addr,
3024 			sq->addrlen, sq->zone, sq->zonelen);
3025 #ifdef USE_DNSTAP
3026 	/*
3027 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3028 	 */
3029 	if(error==NETEVENT_NOERROR && pi && sq->outnet->dtenv &&
3030 	   (sq->outnet->dtenv->log_resolver_response_messages ||
3031 	    sq->outnet->dtenv->log_forwarder_response_messages)) {
3032 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3033 		log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
3034 		dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr,
3035 			&pi->addr, c->type, sq->zone, sq->zonelen, sq->qbuf,
3036 			sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv,
3037 			c->buffer);
3038 	}
3039 #endif
3040 	if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS &&
3041 		(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3042 		LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin(
3043 		c->buffer)) == LDNS_RCODE_NOTIMPL) ) {
3044 		/* attempt to fallback to nonEDNS */
3045 		sq->status = serviced_query_TCP_EDNS_fallback;
3046 		serviced_tcp_initiate(sq, c->buffer);
3047 		return 0;
3048 	} else if(error==NETEVENT_NOERROR &&
3049 		sq->status == serviced_query_TCP_EDNS_fallback &&
3050 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3051 			LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE(
3052 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN
3053 			|| LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3054 			== LDNS_RCODE_YXDOMAIN)) {
3055 		/* the fallback produced a result that looks promising, note
3056 		 * that this server should be approached without EDNS */
3057 		/* only store noEDNS in cache if domain is noDNSSEC */
3058 		if(!sq->want_dnssec)
3059 		  if(!infra_edns_update(sq->outnet->infra, &sq->addr,
3060 			sq->addrlen, sq->zone, sq->zonelen, -1,
3061 			*sq->outnet->now_secs))
3062 			log_err("Out of memory caching no edns for host");
3063 		sq->status = serviced_query_TCP;
3064 	}
3065 	if(sq->tcp_upstream || sq->ssl_upstream) {
3066 	    struct timeval now = *sq->outnet->now_tv;
3067 	    if(error!=NETEVENT_NOERROR) {
3068 	        if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3069 		    sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3070 		    -1, sq->last_rtt, (time_t)now.tv_sec))
3071 		    log_err("out of memory in TCP exponential backoff.");
3072 	    } else if(now.tv_sec > sq->last_sent_time.tv_sec ||
3073 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3074 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3075 		/* convert from microseconds to milliseconds */
3076 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3077 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3078 		verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime);
3079 		log_assert(roundtime >= 0);
3080 		/* only store if less then AUTH_TIMEOUT seconds, it could be
3081 		 * huge due to system-hibernated and we woke up */
3082 		if(roundtime < 60000) {
3083 		    if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3084 			sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3085 			roundtime, sq->last_rtt, (time_t)now.tv_sec))
3086 			log_err("out of memory noting rtt.");
3087 		}
3088 	    }
3089 	}
3090 	/* insert address into reply info */
3091 	if(!rep) {
3092 		/* create one if there isn't (on errors) */
3093 		rep = &r2;
3094 		r2.c = c;
3095 	}
3096 	memcpy(&rep->addr, &sq->addr, sq->addrlen);
3097 	rep->addrlen = sq->addrlen;
3098 	serviced_callbacks(sq, error, c, rep);
3099 	return 0;
3100 }
3101 
3102 static void
3103 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff)
3104 {
3105 	verbose(VERB_ALGO, "initiate TCP query %s",
3106 		sq->status==serviced_query_TCP_EDNS?"EDNS":"");
3107 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3108 	sq->last_sent_time = *sq->outnet->now_tv;
3109 	log_assert(!sq->busy);
3110 	sq->busy = 1;
3111 	sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout,
3112 		serviced_tcp_callback, sq);
3113 	sq->busy = 0;
3114 	if(!sq->pending) {
3115 		/* delete from tree so that a retry by above layer does not
3116 		 * clash with this entry */
3117 		verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query");
3118 		serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
3119 	}
3120 }
3121 
3122 /** Send serviced query over TCP return false on initial failure */
3123 static int
3124 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff)
3125 {
3126 	int vs, rtt, timeout;
3127 	uint8_t edns_lame_known;
3128 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
3129 		sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known,
3130 		&rtt))
3131 		return 0;
3132 	sq->last_rtt = rtt;
3133 	if(vs != -1)
3134 		sq->status = serviced_query_TCP_EDNS;
3135 	else 	sq->status = serviced_query_TCP;
3136 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3137 	sq->last_sent_time = *sq->outnet->now_tv;
3138 	if(sq->tcp_upstream || sq->ssl_upstream) {
3139 		timeout = rtt;
3140 		if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout)
3141 			timeout = sq->outnet->tcp_auth_query_timeout;
3142 	} else {
3143 		timeout = sq->outnet->tcp_auth_query_timeout;
3144 	}
3145 	log_assert(!sq->busy);
3146 	sq->busy = 1;
3147 	sq->pending = pending_tcp_query(sq, buff, timeout,
3148 		serviced_tcp_callback, sq);
3149 	sq->busy = 0;
3150 	return sq->pending != NULL;
3151 }
3152 
3153 /* see if packet is edns malformed; got zeroes at start.
3154  * This is from servers that return malformed packets to EDNS0 queries,
3155  * but they return good packets for nonEDNS0 queries.
3156  * We try to detect their output; without resorting to a full parse or
3157  * check for too many bytes after the end of the packet. */
3158 static int
3159 packet_edns_malformed(struct sldns_buffer* buf, int qtype)
3160 {
3161 	size_t len;
3162 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE)
3163 		return 1; /* malformed */
3164 	/* they have NOERROR rcode, 1 answer. */
3165 	if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR)
3166 		return 0;
3167 	/* one query (to skip) and answer records */
3168 	if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 ||
3169 		LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0)
3170 		return 0;
3171 	/* skip qname */
3172 	len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE),
3173 		sldns_buffer_limit(buf)-LDNS_HEADER_SIZE);
3174 	if(len == 0)
3175 		return 0;
3176 	if(len == 1 && qtype == 0)
3177 		return 0; /* we asked for '.' and type 0 */
3178 	/* and then 4 bytes (type and class of query) */
3179 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3)
3180 		return 0;
3181 
3182 	/* and start with 11 zeroes as the answer RR */
3183 	/* so check the qtype of the answer record, qname=0, type=0 */
3184 	if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 &&
3185 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 &&
3186 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0)
3187 		return 1;
3188 	return 0;
3189 }
3190 
3191 int
3192 serviced_udp_callback(struct comm_point* c, void* arg, int error,
3193         struct comm_reply* rep)
3194 {
3195 	struct serviced_query* sq = (struct serviced_query*)arg;
3196 	struct outside_network* outnet = sq->outnet;
3197 	struct timeval now = *sq->outnet->now_tv;
3198 #ifdef USE_DNSTAP
3199 	struct pending* p = (struct pending*)sq->pending;
3200 #endif
3201 
3202 	sq->pending = NULL; /* removed after callback */
3203 	if(error == NETEVENT_TIMEOUT) {
3204 		if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) {
3205 			/* fallback to 1480/1280 */
3206 			sq->status = serviced_query_UDP_EDNS_FRAG;
3207 			log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10,
3208 				&sq->addr, sq->addrlen);
3209 			if(!serviced_udp_send(sq, c->buffer)) {
3210 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3211 			}
3212 			return 0;
3213 		}
3214 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
3215 			/* fragmentation size did not fix it */
3216 			sq->status = serviced_query_UDP_EDNS;
3217 		}
3218 		sq->retry++;
3219 		if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3220 			sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
3221 			(time_t)now.tv_sec))
3222 			log_err("out of memory in UDP exponential backoff");
3223 		if(sq->retry < OUTBOUND_UDP_RETRY) {
3224 			log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
3225 				&sq->addr, sq->addrlen);
3226 			if(!serviced_udp_send(sq, c->buffer)) {
3227 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3228 			}
3229 			return 0;
3230 		}
3231 	}
3232 	if(error != NETEVENT_NOERROR) {
3233 		/* udp returns error (due to no ID or interface available) */
3234 		serviced_callbacks(sq, error, c, rep);
3235 		return 0;
3236 	}
3237 #ifdef USE_DNSTAP
3238 	/*
3239 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3240 	 */
3241 	if(error == NETEVENT_NOERROR && outnet->dtenv && p->pc &&
3242 		(outnet->dtenv->log_resolver_response_messages ||
3243 		outnet->dtenv->log_forwarder_response_messages)) {
3244 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3245 		log_addr(VERB_ALGO, "to local addr", &p->pc->pif->addr,
3246 			p->pc->pif->addrlen);
3247 		dt_msg_send_outside_response(outnet->dtenv, &sq->addr,
3248 			&p->pc->pif->addr, c->type, sq->zone, sq->zonelen,
3249 			sq->qbuf, sq->qbuflen, &sq->last_sent_time,
3250 			sq->outnet->now_tv, c->buffer);
3251 	}
3252 #endif
3253 	if( (sq->status == serviced_query_UDP_EDNS
3254 		||sq->status == serviced_query_UDP_EDNS_FRAG)
3255 		&& (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3256 			== LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(
3257 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL
3258 		    || packet_edns_malformed(c->buffer, sq->qtype)
3259 			)) {
3260 		/* try to get an answer by falling back without EDNS */
3261 		verbose(VERB_ALGO, "serviced query: attempt without EDNS");
3262 		sq->status = serviced_query_UDP_EDNS_fallback;
3263 		sq->retry = 0;
3264 		if(!serviced_udp_send(sq, c->buffer)) {
3265 			serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3266 		}
3267 		return 0;
3268 	} else if(sq->status == serviced_query_UDP_EDNS &&
3269 		!sq->edns_lame_known) {
3270 		/* now we know that edns queries received answers store that */
3271 		log_addr(VERB_ALGO, "serviced query: EDNS works for",
3272 			&sq->addr, sq->addrlen);
3273 		if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3274 			sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
3275 			log_err("Out of memory caching edns works");
3276 		}
3277 		sq->edns_lame_known = 1;
3278 	} else if(sq->status == serviced_query_UDP_EDNS_fallback &&
3279 		!sq->edns_lame_known && (LDNS_RCODE_WIRE(
3280 		sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR ||
3281 		LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3282 		LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin(
3283 		c->buffer)) == LDNS_RCODE_YXDOMAIN)) {
3284 		/* the fallback produced a result that looks promising, note
3285 		 * that this server should be approached without EDNS */
3286 		/* only store noEDNS in cache if domain is noDNSSEC */
3287 		if(!sq->want_dnssec) {
3288 		  log_addr(VERB_ALGO, "serviced query: EDNS fails for",
3289 			&sq->addr, sq->addrlen);
3290 		  if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3291 			sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
3292 			log_err("Out of memory caching no edns for host");
3293 		  }
3294 		} else {
3295 		  log_addr(VERB_ALGO, "serviced query: EDNS fails, but "
3296 			"not stored because need DNSSEC for", &sq->addr,
3297 			sq->addrlen);
3298 		}
3299 		sq->status = serviced_query_UDP;
3300 	}
3301 	if(now.tv_sec > sq->last_sent_time.tv_sec ||
3302 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3303 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3304 		/* convert from microseconds to milliseconds */
3305 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3306 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3307 		verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime);
3308 		log_assert(roundtime >= 0);
3309 		/* in case the system hibernated, do not enter a huge value,
3310 		 * above this value gives trouble with server selection */
3311 		if(roundtime < 60000) {
3312 		    if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3313 			sq->zone, sq->zonelen, sq->qtype, roundtime,
3314 			sq->last_rtt, (time_t)now.tv_sec))
3315 			log_err("out of memory noting rtt.");
3316 		}
3317 	}
3318 	/* perform TC flag check and TCP fallback after updating our
3319 	 * cache entries for EDNS status and RTT times */
3320 	if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) {
3321 		/* fallback to TCP */
3322 		/* this discards partial UDP contents */
3323 		if(sq->status == serviced_query_UDP_EDNS ||
3324 			sq->status == serviced_query_UDP_EDNS_FRAG ||
3325 			sq->status == serviced_query_UDP_EDNS_fallback)
3326 			/* if we have unfinished EDNS_fallback, start again */
3327 			sq->status = serviced_query_TCP_EDNS;
3328 		else	sq->status = serviced_query_TCP;
3329 		serviced_tcp_initiate(sq, c->buffer);
3330 		return 0;
3331 	}
3332 	/* yay! an answer */
3333 	serviced_callbacks(sq, error, c, rep);
3334 	return 0;
3335 }
3336 
3337 struct serviced_query*
3338 outnet_serviced_query(struct outside_network* outnet,
3339 	struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec,
3340 	int nocaps, int check_ratelimit, int tcp_upstream, int ssl_upstream,
3341 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
3342 	uint8_t* zone, size_t zonelen, struct module_qstate* qstate,
3343 	comm_point_callback_type* callback, void* callback_arg,
3344 	sldns_buffer* buff, struct module_env* env, int* was_ratelimited)
3345 {
3346 	struct serviced_query* sq;
3347 	struct service_callback* cb;
3348 	struct edns_string_addr* client_string_addr;
3349 	struct regional* region;
3350 	struct edns_option* backed_up_opt_list = qstate->edns_opts_back_out;
3351 	struct edns_option* per_upstream_opt_list = NULL;
3352 	time_t timenow = 0;
3353 
3354 	/* If we have an already populated EDNS option list make a copy since
3355 	 * we may now add upstream specific EDNS options. */
3356 	/* Use a region that could be attached to a serviced_query, if it needs
3357 	 * to be created. If an existing one is found then this region will be
3358 	 * destroyed here. */
3359 	region = alloc_reg_obtain(env->alloc);
3360 	if(!region) return NULL;
3361 	if(qstate->edns_opts_back_out) {
3362 		per_upstream_opt_list = edns_opt_copy_region(
3363 			qstate->edns_opts_back_out, region);
3364 		if(!per_upstream_opt_list) {
3365 			alloc_reg_release(env->alloc, region);
3366 			return NULL;
3367 		}
3368 		qstate->edns_opts_back_out = per_upstream_opt_list;
3369 	}
3370 
3371 	if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone,
3372 		zonelen, qstate, region)) {
3373 		alloc_reg_release(env->alloc, region);
3374 		return NULL;
3375 	}
3376 	/* Restore the option list; we can explicitly use the copied one from
3377 	 * now on. */
3378 	per_upstream_opt_list = qstate->edns_opts_back_out;
3379 	qstate->edns_opts_back_out = backed_up_opt_list;
3380 
3381 	if((client_string_addr = edns_string_addr_lookup(
3382 		&env->edns_strings->client_strings, addr, addrlen))) {
3383 		edns_opt_list_append(&per_upstream_opt_list,
3384 			env->edns_strings->client_string_opcode,
3385 			client_string_addr->string_len,
3386 			client_string_addr->string, region);
3387 	}
3388 
3389 	serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype,
3390 		qinfo->qclass, flags);
3391 	sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen,
3392 		per_upstream_opt_list);
3393 	if(!sq) {
3394 		/* Check ratelimit only for new serviced_query */
3395 		if(check_ratelimit) {
3396 			timenow = *env->now;
3397 			if(!infra_ratelimit_inc(env->infra_cache, zone,
3398 				zonelen, timenow, env->cfg->ratelimit_backoff,
3399 				&qstate->qinfo, qstate->reply)) {
3400 				/* Can we pass through with slip factor? */
3401 				if(env->cfg->ratelimit_factor == 0 ||
3402 					ub_random_max(env->rnd,
3403 					env->cfg->ratelimit_factor) != 1) {
3404 					*was_ratelimited = 1;
3405 					alloc_reg_release(env->alloc, region);
3406 					return NULL;
3407 				}
3408 				log_nametypeclass(VERB_ALGO,
3409 					"ratelimit allowed through for "
3410 					"delegation point", zone,
3411 					LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN);
3412 			}
3413 		}
3414 		/* make new serviced query entry */
3415 		sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
3416 			tcp_upstream, ssl_upstream, tls_auth_name, addr,
3417 			addrlen, zone, zonelen, (int)qinfo->qtype,
3418 			per_upstream_opt_list,
3419 			( ssl_upstream && env->cfg->pad_queries
3420 			? env->cfg->pad_queries_block_size : 0 ),
3421 			env->alloc, region);
3422 		if(!sq) {
3423 			if(check_ratelimit) {
3424 				infra_ratelimit_dec(env->infra_cache,
3425 					zone, zonelen, timenow);
3426 			}
3427 			alloc_reg_release(env->alloc, region);
3428 			return NULL;
3429 		}
3430 		if(!(cb = (struct service_callback*)regional_alloc(
3431 			sq->region, sizeof(*cb)))) {
3432 			if(check_ratelimit) {
3433 				infra_ratelimit_dec(env->infra_cache,
3434 					zone, zonelen, timenow);
3435 			}
3436 			(void)rbtree_delete(outnet->serviced, sq);
3437 			serviced_node_del(&sq->node, NULL);
3438 			return NULL;
3439 		}
3440 		/* No network action at this point; it will be invoked with the
3441 		 * serviced_query timer instead to run outside of the mesh. */
3442 	} else {
3443 		/* We don't need this region anymore. */
3444 		alloc_reg_release(env->alloc, region);
3445 		/* duplicate entries are included in the callback list, because
3446 		 * there is a counterpart registration by our caller that needs
3447 		 * to be doubly-removed (with callbacks perhaps). */
3448 		if(!(cb = (struct service_callback*)regional_alloc(
3449 			sq->region, sizeof(*cb)))) {
3450 			return NULL;
3451 		}
3452 	}
3453 	/* add callback to list of callbacks */
3454 	cb->cb = callback;
3455 	cb->cb_arg = callback_arg;
3456 	cb->next = sq->cblist;
3457 	sq->cblist = cb;
3458 	return sq;
3459 }
3460 
3461 /** remove callback from list */
3462 static void
3463 callback_list_remove(struct serviced_query* sq, void* cb_arg)
3464 {
3465 	struct service_callback** pp = &sq->cblist;
3466 	while(*pp) {
3467 		if((*pp)->cb_arg == cb_arg) {
3468 			struct service_callback* del = *pp;
3469 			*pp = del->next;
3470 			return;
3471 		}
3472 		pp = &(*pp)->next;
3473 	}
3474 }
3475 
3476 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg)
3477 {
3478 	if(!sq)
3479 		return;
3480 	callback_list_remove(sq, cb_arg);
3481 	/* if callbacks() routine scheduled deletion, let it do that */
3482 	if(!sq->cblist && !sq->busy && !sq->to_be_deleted) {
3483 		(void)rbtree_delete(sq->outnet->serviced, sq);
3484 		serviced_delete(sq);
3485 	}
3486 }
3487 
3488 /** create fd to send to this destination */
3489 static int
3490 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr,
3491 	socklen_t to_addrlen)
3492 {
3493 	struct sockaddr_storage* addr;
3494 	socklen_t addrlen;
3495 	int i, try, pnum, dscp;
3496 	struct port_if* pif;
3497 
3498 	/* create fd */
3499 	dscp = outnet->ip_dscp;
3500 	for(try = 0; try<1000; try++) {
3501 		int port = 0;
3502 		int freebind = 0;
3503 		int noproto = 0;
3504 		int inuse = 0;
3505 		int fd = -1;
3506 
3507 		/* select interface */
3508 		if(addr_is_ip6(to_addr, to_addrlen)) {
3509 			if(outnet->num_ip6 == 0) {
3510 				char to[64];
3511 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3512 				verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to);
3513 				return -1;
3514 			}
3515 			i = ub_random_max(outnet->rnd, outnet->num_ip6);
3516 			pif = &outnet->ip6_ifs[i];
3517 		} else {
3518 			if(outnet->num_ip4 == 0) {
3519 				char to[64];
3520 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3521 				verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to);
3522 				return -1;
3523 			}
3524 			i = ub_random_max(outnet->rnd, outnet->num_ip4);
3525 			pif = &outnet->ip4_ifs[i];
3526 		}
3527 		addr = &pif->addr;
3528 		addrlen = pif->addrlen;
3529 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3530 		pnum = ub_random_max(outnet->rnd, pif->avail_total);
3531 		if(pnum < pif->inuse) {
3532 			/* port already open */
3533 			port = pif->out[pnum]->number;
3534 		} else {
3535 			/* unused ports in start part of array */
3536 			port = pif->avail_ports[pnum - pif->inuse];
3537 		}
3538 #else
3539 		pnum = port = 0;
3540 #endif
3541 		if(addr_is_ip6(to_addr, to_addrlen)) {
3542 			struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
3543 			sa.sin6_port = (in_port_t)htons((uint16_t)port);
3544 			fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
3545 				(struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto,
3546 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3547 		} else {
3548 			struct sockaddr_in* sa = (struct sockaddr_in*)addr;
3549 			sa->sin_port = (in_port_t)htons((uint16_t)port);
3550 			fd = create_udp_sock(AF_INET, SOCK_DGRAM,
3551 				(struct sockaddr*)addr, addrlen, 1, &inuse, &noproto,
3552 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3553 		}
3554 		if(fd != -1) {
3555 			return fd;
3556 		}
3557 		if(!inuse) {
3558 			return -1;
3559 		}
3560 	}
3561 	/* too many tries */
3562 	log_err("cannot send probe, ports are in use");
3563 	return -1;
3564 }
3565 
3566 struct comm_point*
3567 outnet_comm_point_for_udp(struct outside_network* outnet,
3568 	comm_point_callback_type* cb, void* cb_arg,
3569 	struct sockaddr_storage* to_addr, socklen_t to_addrlen)
3570 {
3571 	struct comm_point* cp;
3572 	int fd = fd_for_dest(outnet, to_addr, to_addrlen);
3573 	if(fd == -1) {
3574 		return NULL;
3575 	}
3576 	cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff,
3577 		cb, cb_arg, NULL);
3578 	if(!cp) {
3579 		log_err("malloc failure");
3580 		close(fd);
3581 		return NULL;
3582 	}
3583 	return cp;
3584 }
3585 
3586 /** setup SSL for comm point */
3587 static int
3588 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet,
3589 	int fd, char* host)
3590 {
3591 	cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd);
3592 	if(!cp->ssl) {
3593 		log_err("cannot create SSL object");
3594 		return 0;
3595 	}
3596 #ifdef USE_WINSOCK
3597 	comm_point_tcp_win_bio_cb(cp, cp->ssl);
3598 #endif
3599 	cp->ssl_shake_state = comm_ssl_shake_write;
3600 	/* https verification */
3601 #ifdef HAVE_SSL
3602 	if(outnet->tls_use_sni) {
3603 		(void)SSL_set_tlsext_host_name(cp->ssl, host);
3604 	}
3605 #endif
3606 #ifdef HAVE_SSL_SET1_HOST
3607 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3608 		/* because we set SSL_VERIFY_PEER, in netevent in
3609 		 * ssl_handshake, it'll check if the certificate
3610 		 * verification has succeeded */
3611 		/* SSL_VERIFY_PEER is set on the sslctx */
3612 		/* and the certificates to verify with are loaded into
3613 		 * it with SSL_load_verify_locations or
3614 		 * SSL_CTX_set_default_verify_paths */
3615 		/* setting the hostname makes openssl verify the
3616 		 * host name in the x509 certificate in the
3617 		 * SSL connection*/
3618 		if(!SSL_set1_host(cp->ssl, host)) {
3619 			log_err("SSL_set1_host failed");
3620 			return 0;
3621 		}
3622 	}
3623 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST)
3624 	/* openssl 1.0.2 has this function that can be used for
3625 	 * set1_host like verification */
3626 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3627 		X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl);
3628 #  ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS
3629 		X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
3630 #  endif
3631 		if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) {
3632 			log_err("X509_VERIFY_PARAM_set1_host failed");
3633 			return 0;
3634 		}
3635 	}
3636 #else
3637 	(void)host;
3638 #endif /* HAVE_SSL_SET1_HOST */
3639 	return 1;
3640 }
3641 
3642 struct comm_point*
3643 outnet_comm_point_for_tcp(struct outside_network* outnet,
3644 	comm_point_callback_type* cb, void* cb_arg,
3645 	struct sockaddr_storage* to_addr, socklen_t to_addrlen,
3646 	sldns_buffer* query, int timeout, int ssl, char* host)
3647 {
3648 	struct comm_point* cp;
3649 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3650 	if(fd == -1) {
3651 		return 0;
3652 	}
3653 	fd_set_nonblock(fd);
3654 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3655 		/* outnet_tcp_connect has closed fd on error for us */
3656 		return 0;
3657 	}
3658 	cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg);
3659 	if(!cp) {
3660 		log_err("malloc failure");
3661 		close(fd);
3662 		return 0;
3663 	}
3664 	cp->repinfo.addrlen = to_addrlen;
3665 	memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3666 
3667 	/* setup for SSL (if needed) */
3668 	if(ssl) {
3669 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3670 			log_err("cannot setup XoT");
3671 			comm_point_delete(cp);
3672 			return NULL;
3673 		}
3674 	}
3675 
3676 	/* set timeout on TCP connection */
3677 	comm_point_start_listening(cp, fd, timeout);
3678 	/* copy scratch buffer to cp->buffer */
3679 	sldns_buffer_copy(cp->buffer, query);
3680 	return cp;
3681 }
3682 
3683 /** setup the User-Agent HTTP header based on http-user-agent configuration */
3684 static void
3685 setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg)
3686 {
3687 	if(cfg->hide_http_user_agent) return;
3688 	if(cfg->http_user_agent==NULL || cfg->http_user_agent[0] == 0) {
3689 		sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME,
3690 			PACKAGE_VERSION);
3691 	} else {
3692 		sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent);
3693 	}
3694 }
3695 
3696 /** setup http request headers in buffer for sending query to destination */
3697 static int
3698 setup_http_request(sldns_buffer* buf, char* host, char* path,
3699 	struct config_file* cfg)
3700 {
3701 	sldns_buffer_clear(buf);
3702 	sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path);
3703 	sldns_buffer_printf(buf, "Host: %s\r\n", host);
3704 	setup_http_user_agent(buf, cfg);
3705 	/* We do not really do multiple queries per connection,
3706 	 * but this header setting is also not needed.
3707 	 * sldns_buffer_printf(buf, "Connection: close\r\n") */
3708 	sldns_buffer_printf(buf, "\r\n");
3709 	if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf))
3710 		return 0; /* somehow buffer too short, but it is about 60K
3711 		and the request is only a couple bytes long. */
3712 	sldns_buffer_flip(buf);
3713 	return 1;
3714 }
3715 
3716 struct comm_point*
3717 outnet_comm_point_for_http(struct outside_network* outnet,
3718 	comm_point_callback_type* cb, void* cb_arg,
3719 	struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout,
3720 	int ssl, char* host, char* path, struct config_file* cfg)
3721 {
3722 	/* cp calls cb with err=NETEVENT_DONE when transfer is done */
3723 	struct comm_point* cp;
3724 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3725 	if(fd == -1) {
3726 		return 0;
3727 	}
3728 	fd_set_nonblock(fd);
3729 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3730 		/* outnet_tcp_connect has closed fd on error for us */
3731 		return 0;
3732 	}
3733 	cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg,
3734 		outnet->udp_buff);
3735 	if(!cp) {
3736 		log_err("malloc failure");
3737 		close(fd);
3738 		return 0;
3739 	}
3740 	cp->repinfo.addrlen = to_addrlen;
3741 	memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3742 
3743 	/* setup for SSL (if needed) */
3744 	if(ssl) {
3745 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3746 			log_err("cannot setup https");
3747 			comm_point_delete(cp);
3748 			return NULL;
3749 		}
3750 	}
3751 
3752 	/* set timeout on TCP connection */
3753 	comm_point_start_listening(cp, fd, timeout);
3754 
3755 	/* setup http request in cp->buffer */
3756 	if(!setup_http_request(cp->buffer, host, path, cfg)) {
3757 		log_err("error setting up http request");
3758 		comm_point_delete(cp);
3759 		return NULL;
3760 	}
3761 	return cp;
3762 }
3763 
3764 /** get memory used by waiting tcp entry (in use or not) */
3765 static size_t
3766 waiting_tcp_get_mem(struct waiting_tcp* w)
3767 {
3768 	size_t s;
3769 	if(!w) return 0;
3770 	s = sizeof(*w) + w->pkt_len;
3771 	if(w->timer)
3772 		s += comm_timer_get_mem(w->timer);
3773 	return s;
3774 }
3775 
3776 /** get memory used by port if */
3777 static size_t
3778 if_get_mem(struct port_if* pif)
3779 {
3780 	size_t s;
3781 	int i;
3782 	s = sizeof(*pif) +
3783 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3784 	    sizeof(int)*pif->avail_total +
3785 #endif
3786 		sizeof(struct port_comm*)*pif->maxout;
3787 	for(i=0; i<pif->inuse; i++)
3788 		s += sizeof(*pif->out[i]) +
3789 			comm_point_get_mem(pif->out[i]->cp);
3790 	return s;
3791 }
3792 
3793 /** get memory used by waiting udp */
3794 static size_t
3795 waiting_udp_get_mem(struct pending* w)
3796 {
3797 	size_t s;
3798 	s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len;
3799 	return s;
3800 }
3801 
3802 size_t outnet_get_mem(struct outside_network* outnet)
3803 {
3804 	size_t i;
3805 	int k;
3806 	struct waiting_tcp* w;
3807 	struct pending* u;
3808 	struct serviced_query* sq;
3809 	struct service_callback* sb;
3810 	struct port_comm* pc;
3811 	size_t s = sizeof(*outnet) + sizeof(*outnet->base) +
3812 		sizeof(*outnet->udp_buff) +
3813 		sldns_buffer_capacity(outnet->udp_buff);
3814 	/* second buffer is not ours */
3815 	for(pc = outnet->unused_fds; pc; pc = pc->next) {
3816 		s += sizeof(*pc) + comm_point_get_mem(pc->cp);
3817 	}
3818 	for(k=0; k<outnet->num_ip4; k++)
3819 		s += if_get_mem(&outnet->ip4_ifs[k]);
3820 	for(k=0; k<outnet->num_ip6; k++)
3821 		s += if_get_mem(&outnet->ip6_ifs[k]);
3822 	for(u=outnet->udp_wait_first; u; u=u->next_waiting)
3823 		s += waiting_udp_get_mem(u);
3824 
3825 	s += sizeof(struct pending_tcp*)*outnet->num_tcp;
3826 	for(i=0; i<outnet->num_tcp; i++) {
3827 		s += sizeof(struct pending_tcp);
3828 		s += comm_point_get_mem(outnet->tcp_conns[i]->c);
3829 		if(outnet->tcp_conns[i]->query)
3830 			s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query);
3831 	}
3832 	for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
3833 		s += waiting_tcp_get_mem(w);
3834 	s += sizeof(*outnet->pending);
3835 	s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) *
3836 		outnet->pending->count;
3837 	s += sizeof(*outnet->serviced);
3838 	s += outnet->svcd_overhead;
3839 	RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) {
3840 		s += sizeof(*sq) + sq->qbuflen;
3841 		for(sb = sq->cblist; sb; sb = sb->next)
3842 			s += sizeof(*sb);
3843 	}
3844 	return s;
3845 }
3846 
3847 size_t
3848 serviced_get_mem(struct serviced_query* sq)
3849 {
3850 	struct service_callback* sb;
3851 	size_t s;
3852 	s = sizeof(*sq) + sq->qbuflen;
3853 	for(sb = sq->cblist; sb; sb = sb->next)
3854 		s += sizeof(*sb);
3855 	if(sq->status == serviced_query_UDP_EDNS ||
3856 		sq->status == serviced_query_UDP ||
3857 		sq->status == serviced_query_UDP_EDNS_FRAG ||
3858 		sq->status == serviced_query_UDP_EDNS_fallback) {
3859 		s += sizeof(struct pending);
3860 		s += comm_timer_get_mem(NULL);
3861 	} else {
3862 		/* does not have size of the pkt pointer */
3863 		/* always has a timer except on malloc failures */
3864 
3865 		/* these sizes are part of the main outside network mem */
3866 		/*
3867 		s += sizeof(struct waiting_tcp);
3868 		s += comm_timer_get_mem(NULL);
3869 		*/
3870 	}
3871 	return s;
3872 }
3873 
3874