xref: /freebsd/contrib/unbound/services/outside_network.c (revision 4f0c9b76cf75724ef0b9c59bb8c182be24361d7c)
1 /*
2  * services/outside_network.c - implement sending of queries and wait answer.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file has functions to send queries to authoritative servers and
40  * wait for the pending answer events.
41  */
42 #include "config.h"
43 #include <ctype.h>
44 #ifdef HAVE_SYS_TYPES_H
45 #  include <sys/types.h>
46 #endif
47 #include <sys/time.h>
48 #include "services/outside_network.h"
49 #include "services/listen_dnsport.h"
50 #include "services/cache/infra.h"
51 #include "iterator/iterator.h"
52 #include "util/data/msgparse.h"
53 #include "util/data/msgreply.h"
54 #include "util/data/msgencode.h"
55 #include "util/data/dname.h"
56 #include "util/netevent.h"
57 #include "util/log.h"
58 #include "util/net_help.h"
59 #include "util/random.h"
60 #include "util/fptr_wlist.h"
61 #include "util/edns.h"
62 #include "sldns/sbuffer.h"
63 #include "dnstap/dnstap.h"
64 #ifdef HAVE_OPENSSL_SSL_H
65 #include <openssl/ssl.h>
66 #endif
67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST
68 #include <openssl/x509v3.h>
69 #endif
70 
71 #ifdef HAVE_NETDB_H
72 #include <netdb.h>
73 #endif
74 #include <fcntl.h>
75 
76 /** number of times to retry making a random ID that is unique. */
77 #define MAX_ID_RETRY 1000
78 /** number of times to retry finding interface, port that can be opened. */
79 #define MAX_PORT_RETRY 10000
80 /** number of retries on outgoing UDP queries */
81 #define OUTBOUND_UDP_RETRY 1
82 
83 /** initiate TCP transaction for serviced query */
84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff);
85 /** with a fd available, randomize and send UDP */
86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet,
87 	int timeout);
88 
89 /** remove waiting tcp from the outnet waiting list */
90 static void waiting_list_remove(struct outside_network* outnet,
91 	struct waiting_tcp* w);
92 
93 /** select a DNS ID for a TCP stream */
94 static uint16_t tcp_select_id(struct outside_network* outnet,
95 	struct reuse_tcp* reuse);
96 
97 /** Perform serviced query UDP sending operation */
98 static int serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff);
99 
100 /** Send serviced query over TCP return false on initial failure */
101 static int serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff);
102 
103 /** call the callbacks for a serviced query */
104 static void serviced_callbacks(struct serviced_query* sq, int error,
105 	struct comm_point* c, struct comm_reply* rep);
106 
107 int
108 pending_cmp(const void* key1, const void* key2)
109 {
110 	struct pending *p1 = (struct pending*)key1;
111 	struct pending *p2 = (struct pending*)key2;
112 	if(p1->id < p2->id)
113 		return -1;
114 	if(p1->id > p2->id)
115 		return 1;
116 	log_assert(p1->id == p2->id);
117 	return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen);
118 }
119 
120 int
121 serviced_cmp(const void* key1, const void* key2)
122 {
123 	struct serviced_query* q1 = (struct serviced_query*)key1;
124 	struct serviced_query* q2 = (struct serviced_query*)key2;
125 	int r;
126 	if(q1->qbuflen < q2->qbuflen)
127 		return -1;
128 	if(q1->qbuflen > q2->qbuflen)
129 		return 1;
130 	log_assert(q1->qbuflen == q2->qbuflen);
131 	log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */);
132 	/* alternate casing of qname is still the same query */
133 	if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0)
134 		return r;
135 	if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0)
136 		return r;
137 	if(q1->dnssec != q2->dnssec) {
138 		if(q1->dnssec < q2->dnssec)
139 			return -1;
140 		return 1;
141 	}
142 	if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
143 		return r;
144 	if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
145 		return r;
146 	return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
147 }
148 
149 /** compare if the reuse element has the same address, port and same ssl-is
150  * used-for-it characteristic */
151 static int
152 reuse_cmp_addrportssl(const void* key1, const void* key2)
153 {
154 	struct reuse_tcp* r1 = (struct reuse_tcp*)key1;
155 	struct reuse_tcp* r2 = (struct reuse_tcp*)key2;
156 	int r;
157 	/* compare address and port */
158 	r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen);
159 	if(r != 0)
160 		return r;
161 
162 	/* compare if SSL-enabled */
163 	if(r1->is_ssl && !r2->is_ssl)
164 		return 1;
165 	if(!r1->is_ssl && r2->is_ssl)
166 		return -1;
167 	return 0;
168 }
169 
170 int
171 reuse_cmp(const void* key1, const void* key2)
172 {
173 	int r;
174 	r = reuse_cmp_addrportssl(key1, key2);
175 	if(r != 0)
176 		return r;
177 
178 	/* compare ptr value */
179 	if(key1 < key2) return -1;
180 	if(key1 > key2) return 1;
181 	return 0;
182 }
183 
184 int reuse_id_cmp(const void* key1, const void* key2)
185 {
186 	struct waiting_tcp* w1 = (struct waiting_tcp*)key1;
187 	struct waiting_tcp* w2 = (struct waiting_tcp*)key2;
188 	if(w1->id < w2->id)
189 		return -1;
190 	if(w1->id > w2->id)
191 		return 1;
192 	return 0;
193 }
194 
195 /** delete waiting_tcp entry. Does not unlink from waiting list.
196  * @param w: to delete.
197  */
198 static void
199 waiting_tcp_delete(struct waiting_tcp* w)
200 {
201 	if(!w) return;
202 	if(w->timer)
203 		comm_timer_delete(w->timer);
204 	free(w);
205 }
206 
207 /**
208  * Pick random outgoing-interface of that family, and bind it.
209  * port set to 0 so OS picks a port number for us.
210  * if it is the ANY address, do not bind.
211  * @param pend: pending tcp structure, for storing the local address choice.
212  * @param w: tcp structure with destination address.
213  * @param s: socket fd.
214  * @return false on error, socket closed.
215  */
216 static int
217 pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s)
218 {
219 	struct port_if* pi = NULL;
220 	int num;
221 	pend->pi = NULL;
222 #ifdef INET6
223 	if(addr_is_ip6(&w->addr, w->addrlen))
224 		num = w->outnet->num_ip6;
225 	else
226 #endif
227 		num = w->outnet->num_ip4;
228 	if(num == 0) {
229 		log_err("no TCP outgoing interfaces of family");
230 		log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen);
231 		sock_close(s);
232 		return 0;
233 	}
234 #ifdef INET6
235 	if(addr_is_ip6(&w->addr, w->addrlen))
236 		pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)];
237 	else
238 #endif
239 		pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)];
240 	log_assert(pi);
241 	pend->pi = pi;
242 	if(addr_is_any(&pi->addr, pi->addrlen)) {
243 		/* binding to the ANY interface is for listening sockets */
244 		return 1;
245 	}
246 	/* set port to 0 */
247 	if(addr_is_ip6(&pi->addr, pi->addrlen))
248 		((struct sockaddr_in6*)&pi->addr)->sin6_port = 0;
249 	else	((struct sockaddr_in*)&pi->addr)->sin_port = 0;
250 	if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) {
251 #ifndef USE_WINSOCK
252 #ifdef EADDRNOTAVAIL
253 		if(!(verbosity < 4 && errno == EADDRNOTAVAIL))
254 #endif
255 #else /* USE_WINSOCK */
256 		if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL))
257 #endif
258 		    log_err("outgoing tcp: bind: %s", sock_strerror(errno));
259 		sock_close(s);
260 		return 0;
261 	}
262 	log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen);
263 	return 1;
264 }
265 
266 /** get TCP file descriptor for address, returns -1 on failure,
267  * tcp_mss is 0 or maxseg size to set for TCP packets. */
268 int
269 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp)
270 {
271 	int s;
272 	int af;
273 	char* err;
274 #if defined(SO_REUSEADDR) || defined(IP_BIND_ADDRESS_NO_PORT)
275 	int on = 1;
276 #endif
277 #ifdef INET6
278 	if(addr_is_ip6(addr, addrlen)){
279 		s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP);
280 		af = AF_INET6;
281 	} else {
282 #else
283 	{
284 #endif
285 		af = AF_INET;
286 		s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
287 	}
288 	if(s == -1) {
289 		log_err_addr("outgoing tcp: socket", sock_strerror(errno),
290 			addr, addrlen);
291 		return -1;
292 	}
293 
294 #ifdef SO_REUSEADDR
295 	if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on,
296 		(socklen_t)sizeof(on)) < 0) {
297 		verbose(VERB_ALGO, "outgoing tcp:"
298 			" setsockopt(.. SO_REUSEADDR ..) failed");
299 	}
300 #endif
301 
302 	err = set_ip_dscp(s, af, dscp);
303 	if(err != NULL) {
304 		verbose(VERB_ALGO, "outgoing tcp:"
305 			"error setting IP DiffServ codepoint on socket");
306 	}
307 
308 	if(tcp_mss > 0) {
309 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG)
310 		if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG,
311 			(void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) {
312 			verbose(VERB_ALGO, "outgoing tcp:"
313 				" setsockopt(.. TCP_MAXSEG ..) failed");
314 		}
315 #else
316 		verbose(VERB_ALGO, "outgoing tcp:"
317 			" setsockopt(TCP_MAXSEG) unsupported");
318 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */
319 	}
320 #ifdef IP_BIND_ADDRESS_NO_PORT
321 	if(setsockopt(s, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, (void*)&on,
322 		(socklen_t)sizeof(on)) < 0) {
323 		verbose(VERB_ALGO, "outgoing tcp:"
324 			" setsockopt(.. IP_BIND_ADDRESS_NO_PORT ..) failed");
325 	}
326 #endif /* IP_BIND_ADDRESS_NO_PORT */
327 	return s;
328 }
329 
330 /** connect tcp connection to addr, 0 on failure */
331 int
332 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen)
333 {
334 	if(connect(s, (struct sockaddr*)addr, addrlen) == -1) {
335 #ifndef USE_WINSOCK
336 #ifdef EINPROGRESS
337 		if(errno != EINPROGRESS) {
338 #endif
339 			if(tcp_connect_errno_needs_log(
340 				(struct sockaddr*)addr, addrlen))
341 				log_err_addr("outgoing tcp: connect",
342 					strerror(errno), addr, addrlen);
343 			close(s);
344 			return 0;
345 #ifdef EINPROGRESS
346 		}
347 #endif
348 #else /* USE_WINSOCK */
349 		if(WSAGetLastError() != WSAEINPROGRESS &&
350 			WSAGetLastError() != WSAEWOULDBLOCK) {
351 			closesocket(s);
352 			return 0;
353 		}
354 #endif
355 	}
356 	return 1;
357 }
358 
359 /** log reuse item addr and ptr with message */
360 static void
361 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse)
362 {
363 	uint16_t port;
364 	char addrbuf[128];
365 	if(verbosity < v) return;
366 	if(!reuse || !reuse->pending || !reuse->pending->c)
367 		return;
368 	addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf));
369 	port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port);
370 	verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port,
371 		reuse->pending->c->fd);
372 }
373 
374 /** pop the first element from the writewait list */
375 static struct waiting_tcp* reuse_write_wait_pop(struct reuse_tcp* reuse)
376 {
377 	struct waiting_tcp* w = reuse->write_wait_first;
378 	if(!w)
379 		return NULL;
380 	log_assert(w->write_wait_queued);
381 	log_assert(!w->write_wait_prev);
382 	reuse->write_wait_first = w->write_wait_next;
383 	if(w->write_wait_next)
384 		w->write_wait_next->write_wait_prev = NULL;
385 	else	reuse->write_wait_last = NULL;
386 	w->write_wait_queued = 0;
387 	w->write_wait_next = NULL;
388 	w->write_wait_prev = NULL;
389 	return w;
390 }
391 
392 /** remove the element from the writewait list */
393 static void reuse_write_wait_remove(struct reuse_tcp* reuse,
394 	struct waiting_tcp* w)
395 {
396 	log_assert(w);
397 	log_assert(w->write_wait_queued);
398 	if(!w)
399 		return;
400 	if(!w->write_wait_queued)
401 		return;
402 	if(w->write_wait_prev)
403 		w->write_wait_prev->write_wait_next = w->write_wait_next;
404 	else	reuse->write_wait_first = w->write_wait_next;
405 	log_assert(!w->write_wait_prev ||
406 		w->write_wait_prev->write_wait_next != w->write_wait_prev);
407 	if(w->write_wait_next)
408 		w->write_wait_next->write_wait_prev = w->write_wait_prev;
409 	else	reuse->write_wait_last = w->write_wait_prev;
410 	log_assert(!w->write_wait_next
411 		|| w->write_wait_next->write_wait_prev != w->write_wait_next);
412 	w->write_wait_queued = 0;
413 	w->write_wait_next = NULL;
414 	w->write_wait_prev = NULL;
415 }
416 
417 /** push the element after the last on the writewait list */
418 static void reuse_write_wait_push_back(struct reuse_tcp* reuse,
419 	struct waiting_tcp* w)
420 {
421 	if(!w) return;
422 	log_assert(!w->write_wait_queued);
423 	if(reuse->write_wait_last) {
424 		reuse->write_wait_last->write_wait_next = w;
425 		log_assert(reuse->write_wait_last->write_wait_next !=
426 			reuse->write_wait_last);
427 		w->write_wait_prev = reuse->write_wait_last;
428 	} else {
429 		reuse->write_wait_first = w;
430 	}
431 	reuse->write_wait_last = w;
432 	w->write_wait_queued = 1;
433 }
434 
435 /** insert element in tree by id */
436 void
437 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w)
438 {
439 #ifdef UNBOUND_DEBUG
440 	rbnode_type* added;
441 #endif
442 	log_assert(w->id_node.key == NULL);
443 	w->id_node.key = w;
444 #ifdef UNBOUND_DEBUG
445 	added =
446 #else
447 	(void)
448 #endif
449 	rbtree_insert(&reuse->tree_by_id, &w->id_node);
450 	log_assert(added);  /* should have been added */
451 }
452 
453 /** find element in tree by id */
454 struct waiting_tcp*
455 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id)
456 {
457 	struct waiting_tcp key_w;
458 	rbnode_type* n;
459 	memset(&key_w, 0, sizeof(key_w));
460 	key_w.id_node.key = &key_w;
461 	key_w.id = id;
462 	n = rbtree_search(&reuse->tree_by_id, &key_w);
463 	if(!n) return NULL;
464 	return (struct waiting_tcp*)n->key;
465 }
466 
467 /** return ID value of rbnode in tree_by_id */
468 static uint16_t
469 tree_by_id_get_id(rbnode_type* node)
470 {
471 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
472 	return w->id;
473 }
474 
475 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */
476 int
477 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp)
478 {
479 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse);
480 	if(pend_tcp->reuse.item_on_lru_list) {
481 		if(!pend_tcp->reuse.node.key)
482 			log_err("internal error: reuse_tcp_insert: "
483 				"in lru list without key");
484 		return 1;
485 	}
486 	pend_tcp->reuse.node.key = &pend_tcp->reuse;
487 	pend_tcp->reuse.pending = pend_tcp;
488 	if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) {
489 		/* We are not in the LRU list but we are already in the
490 		 * tcp_reuse tree, strange.
491 		 * Continue to add ourselves to the LRU list. */
492 		log_err("internal error: reuse_tcp_insert: in lru list but "
493 			"not in the tree");
494 	}
495 	/* insert into LRU, first is newest */
496 	pend_tcp->reuse.lru_prev = NULL;
497 	if(outnet->tcp_reuse_first) {
498 		pend_tcp->reuse.lru_next = outnet->tcp_reuse_first;
499 		log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse);
500 		outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse;
501 		log_assert(outnet->tcp_reuse_first->lru_prev !=
502 			outnet->tcp_reuse_first);
503 	} else {
504 		pend_tcp->reuse.lru_next = NULL;
505 		outnet->tcp_reuse_last = &pend_tcp->reuse;
506 	}
507 	outnet->tcp_reuse_first = &pend_tcp->reuse;
508 	pend_tcp->reuse.item_on_lru_list = 1;
509 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
510 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
511 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
512 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
513 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
514 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
515 	return 1;
516 }
517 
518 /** find reuse tcp stream to destination for query, or NULL if none */
519 static struct reuse_tcp*
520 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr,
521 	socklen_t addrlen, int use_ssl)
522 {
523 	struct waiting_tcp key_w;
524 	struct pending_tcp key_p;
525 	struct comm_point c;
526 	rbnode_type* result = NULL, *prev;
527 	verbose(VERB_CLIENT, "reuse_tcp_find");
528 	memset(&key_w, 0, sizeof(key_w));
529 	memset(&key_p, 0, sizeof(key_p));
530 	memset(&c, 0, sizeof(c));
531 	key_p.query = &key_w;
532 	key_p.c = &c;
533 	key_p.reuse.pending = &key_p;
534 	key_p.reuse.node.key = &key_p.reuse;
535 	if(use_ssl)
536 		key_p.reuse.is_ssl = 1;
537 	if(addrlen > (socklen_t)sizeof(key_p.reuse.addr))
538 		return NULL;
539 	memmove(&key_p.reuse.addr, addr, addrlen);
540 	key_p.reuse.addrlen = addrlen;
541 
542 	verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u",
543 		(unsigned)outnet->tcp_reuse.count);
544 	if(outnet->tcp_reuse.root == NULL ||
545 		outnet->tcp_reuse.root == RBTREE_NULL)
546 		return NULL;
547 	if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse,
548 		&result)) {
549 		/* exact match */
550 		/* but the key is on stack, and ptr is compared, impossible */
551 		log_assert(&key_p.reuse != (struct reuse_tcp*)result);
552 		log_assert(&key_p != ((struct reuse_tcp*)result)->pending);
553 	}
554 	/* not found, return null */
555 	if(!result || result == RBTREE_NULL)
556 		return NULL;
557 	verbose(VERB_CLIENT, "reuse_tcp_find check inexact match");
558 	/* inexact match, find one of possibly several connections to the
559 	 * same destination address, with the correct port, ssl, and
560 	 * also less than max number of open queries, or else, fail to open
561 	 * a new one */
562 	/* rewind to start of sequence of same address,port,ssl */
563 	prev = rbtree_previous(result);
564 	while(prev && prev != RBTREE_NULL &&
565 		reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) {
566 		result = prev;
567 		prev = rbtree_previous(result);
568 	}
569 
570 	/* loop to find first one that has correct characteristics */
571 	while(result && result != RBTREE_NULL &&
572 		reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) {
573 		if(((struct reuse_tcp*)result)->tree_by_id.count <
574 			outnet->max_reuse_tcp_queries) {
575 			/* same address, port, ssl-yes-or-no, and has
576 			 * space for another query */
577 			return (struct reuse_tcp*)result;
578 		}
579 		result = rbtree_next(result);
580 	}
581 	return NULL;
582 }
583 
584 /** use the buffer to setup writing the query */
585 static void
586 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend,
587 	struct waiting_tcp* w)
588 {
589 	struct timeval tv;
590 	verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write "
591 		"len %d timeout %d msec",
592 		(int)w->pkt_len, w->timeout);
593 	pend->c->tcp_write_pkt = w->pkt;
594 	pend->c->tcp_write_pkt_len = w->pkt_len;
595 	pend->c->tcp_write_and_read = 1;
596 	pend->c->tcp_write_byte_count = 0;
597 	pend->c->tcp_is_reading = 0;
598 	comm_point_start_listening(pend->c, s, -1);
599 	/* set timer on the waiting_tcp entry, this is the write timeout
600 	 * for the written packet.  The timer on pend->c is the timer
601 	 * for when there is no written packet and we have readtimeouts */
602 #ifndef S_SPLINT_S
603 	tv.tv_sec = w->timeout/1000;
604 	tv.tv_usec = (w->timeout%1000)*1000;
605 #endif
606 	/* if the waiting_tcp was previously waiting for a buffer in the
607 	 * outside_network.tcpwaitlist, then the timer is reset now that
608 	 * we start writing it */
609 	comm_timer_set(w->timer, &tv);
610 }
611 
612 /** use next free buffer to service a tcp query */
613 static int
614 outnet_tcp_take_into_use(struct waiting_tcp* w)
615 {
616 	struct pending_tcp* pend = w->outnet->tcp_free;
617 	int s;
618 	log_assert(pend);
619 	log_assert(w->pkt);
620 	log_assert(w->pkt_len > 0);
621 	log_assert(w->addrlen > 0);
622 	pend->c->tcp_do_toggle_rw = 0;
623 	pend->c->tcp_do_close = 0;
624 	/* open socket */
625 	s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp);
626 
627 	if(s == -1)
628 		return 0;
629 
630 	if(!pick_outgoing_tcp(pend, w, s))
631 		return 0;
632 
633 	fd_set_nonblock(s);
634 #ifdef USE_OSX_MSG_FASTOPEN
635 	/* API for fast open is different here. We use a connectx() function and
636 	   then writes can happen as normal even using SSL.*/
637 	/* connectx requires that the len be set in the sockaddr struct*/
638 	struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr;
639 	addr_in->sin_len = w->addrlen;
640 	sa_endpoints_t endpoints;
641 	endpoints.sae_srcif = 0;
642 	endpoints.sae_srcaddr = NULL;
643 	endpoints.sae_srcaddrlen = 0;
644 	endpoints.sae_dstaddr = (struct sockaddr *)&w->addr;
645 	endpoints.sae_dstaddrlen = w->addrlen;
646 	if (connectx(s, &endpoints, SAE_ASSOCID_ANY,
647 	             CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE,
648 	             NULL, 0, NULL, NULL) == -1) {
649 		/* if fails, failover to connect for OSX 10.10 */
650 #ifdef EINPROGRESS
651 		if(errno != EINPROGRESS) {
652 #else
653 		if(1) {
654 #endif
655 			if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
656 #else /* USE_OSX_MSG_FASTOPEN*/
657 #ifdef USE_MSG_FASTOPEN
658 	pend->c->tcp_do_fastopen = 1;
659 	/* Only do TFO for TCP in which case no connect() is required here.
660 	   Don't combine client TFO with SSL, since OpenSSL can't
661 	   currently support doing a handshake on fd that already isn't connected*/
662 	if (w->outnet->sslctx && w->ssl_upstream) {
663 		if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
664 #else /* USE_MSG_FASTOPEN*/
665 	if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
666 #endif /* USE_MSG_FASTOPEN*/
667 #endif /* USE_OSX_MSG_FASTOPEN*/
668 #ifndef USE_WINSOCK
669 #ifdef EINPROGRESS
670 		if(errno != EINPROGRESS) {
671 #else
672 		if(1) {
673 #endif
674 			if(tcp_connect_errno_needs_log(
675 				(struct sockaddr*)&w->addr, w->addrlen))
676 				log_err_addr("outgoing tcp: connect",
677 					strerror(errno), &w->addr, w->addrlen);
678 			close(s);
679 #else /* USE_WINSOCK */
680 		if(WSAGetLastError() != WSAEINPROGRESS &&
681 			WSAGetLastError() != WSAEWOULDBLOCK) {
682 			closesocket(s);
683 #endif
684 			return 0;
685 		}
686 	}
687 #ifdef USE_MSG_FASTOPEN
688 	}
689 #endif /* USE_MSG_FASTOPEN */
690 #ifdef USE_OSX_MSG_FASTOPEN
691 		}
692 	}
693 #endif /* USE_OSX_MSG_FASTOPEN */
694 	if(w->outnet->sslctx && w->ssl_upstream) {
695 		pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s);
696 		if(!pend->c->ssl) {
697 			pend->c->fd = s;
698 			comm_point_close(pend->c);
699 			return 0;
700 		}
701 		verbose(VERB_ALGO, "the query is using TLS encryption, for %s",
702 			(w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection"));
703 #ifdef USE_WINSOCK
704 		comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl);
705 #endif
706 		pend->c->ssl_shake_state = comm_ssl_shake_write;
707 		if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name,
708 			w->outnet->tls_use_sni)) {
709 			pend->c->fd = s;
710 #ifdef HAVE_SSL
711 			SSL_free(pend->c->ssl);
712 #endif
713 			pend->c->ssl = NULL;
714 			comm_point_close(pend->c);
715 			return 0;
716 		}
717 	}
718 	w->next_waiting = (void*)pend;
719 	w->outnet->num_tcp_outgoing++;
720 	w->outnet->tcp_free = pend->next_free;
721 	pend->next_free = NULL;
722 	pend->query = w;
723 	pend->reuse.outnet = w->outnet;
724 	pend->c->repinfo.addrlen = w->addrlen;
725 	pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again;
726 	pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again;
727 	pend->reuse.cp_more_read_again = 0;
728 	pend->reuse.cp_more_write_again = 0;
729 	memcpy(&pend->c->repinfo.addr, &w->addr, w->addrlen);
730 	pend->reuse.pending = pend;
731 
732 	/* Remove from tree in case the is_ssl will be different and causes the
733 	 * identity of the reuse_tcp to change; could result in nodes not being
734 	 * deleted from the tree (because the new identity does not match the
735 	 * previous node) but their ->key would be changed to NULL. */
736 	if(pend->reuse.node.key)
737 		reuse_tcp_remove_tree_list(w->outnet, &pend->reuse);
738 
739 	if(pend->c->ssl)
740 		pend->reuse.is_ssl = 1;
741 	else	pend->reuse.is_ssl = 0;
742 	/* insert in reuse by address tree if not already inserted there */
743 	(void)reuse_tcp_insert(w->outnet, pend);
744 	reuse_tree_by_id_insert(&pend->reuse, w);
745 	outnet_tcp_take_query_setup(s, pend, w);
746 	return 1;
747 }
748 
749 /** Touch the lru of a reuse_tcp element, it is in use.
750  * This moves it to the front of the list, where it is not likely to
751  * be closed.  Items at the back of the list are closed to make space. */
752 void
753 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse)
754 {
755 	if(!reuse->item_on_lru_list) {
756 		log_err("internal error: we need to touch the lru_list but item not in list");
757 		return; /* not on the list, no lru to modify */
758 	}
759 	log_assert(reuse->lru_prev ||
760 		(!reuse->lru_prev && outnet->tcp_reuse_first == reuse));
761 	if(!reuse->lru_prev)
762 		return; /* already first in the list */
763 	/* remove at current position */
764 	/* since it is not first, there is a previous element */
765 	reuse->lru_prev->lru_next = reuse->lru_next;
766 	log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
767 	if(reuse->lru_next)
768 		reuse->lru_next->lru_prev = reuse->lru_prev;
769 	else	outnet->tcp_reuse_last = reuse->lru_prev;
770 	log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next);
771 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
772 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
773 	/* insert at the front */
774 	reuse->lru_prev = NULL;
775 	reuse->lru_next = outnet->tcp_reuse_first;
776 	if(outnet->tcp_reuse_first) {
777 		outnet->tcp_reuse_first->lru_prev = reuse;
778 	}
779 	log_assert(reuse->lru_next != reuse);
780 	/* since it is not first, it is not the only element and
781 	 * lru_next is thus not NULL and thus reuse is now not the last in
782 	 * the list, so outnet->tcp_reuse_last does not need to be modified */
783 	outnet->tcp_reuse_first = reuse;
784 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
785 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
786 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
787 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
788 }
789 
790 /** Snip the last reuse_tcp element off of the LRU list */
791 struct reuse_tcp*
792 reuse_tcp_lru_snip(struct outside_network* outnet)
793 {
794 	struct reuse_tcp* reuse = outnet->tcp_reuse_last;
795 	if(!reuse) return NULL;
796 	/* snip off of LRU */
797 	log_assert(reuse->lru_next == NULL);
798 	if(reuse->lru_prev) {
799 		outnet->tcp_reuse_last = reuse->lru_prev;
800 		reuse->lru_prev->lru_next = NULL;
801 	} else {
802 		outnet->tcp_reuse_last = NULL;
803 		outnet->tcp_reuse_first = NULL;
804 	}
805 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
806 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
807 	reuse->item_on_lru_list = 0;
808 	reuse->lru_next = NULL;
809 	reuse->lru_prev = NULL;
810 	return reuse;
811 }
812 
813 /** call callback on waiting_tcp, if not NULL */
814 static void
815 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error,
816 	struct comm_reply* reply_info)
817 {
818 	if(w && w->cb) {
819 		fptr_ok(fptr_whitelist_pending_tcp(w->cb));
820 		(void)(*w->cb)(c, w->cb_arg, error, reply_info);
821 	}
822 }
823 
824 /** add waiting_tcp element to the outnet tcp waiting list */
825 static void
826 outnet_add_tcp_waiting(struct outside_network* outnet, struct waiting_tcp* w)
827 {
828 	struct timeval tv;
829 	log_assert(!w->on_tcp_waiting_list);
830 	if(w->on_tcp_waiting_list)
831 		return;
832 	w->next_waiting = NULL;
833 	if(outnet->tcp_wait_last)
834 		outnet->tcp_wait_last->next_waiting = w;
835 	else	outnet->tcp_wait_first = w;
836 	outnet->tcp_wait_last = w;
837 	w->on_tcp_waiting_list = 1;
838 #ifndef S_SPLINT_S
839 	tv.tv_sec = w->timeout/1000;
840 	tv.tv_usec = (w->timeout%1000)*1000;
841 #endif
842 	comm_timer_set(w->timer, &tv);
843 }
844 
845 /** add waiting_tcp element as first to the outnet tcp waiting list */
846 static void
847 outnet_add_tcp_waiting_first(struct outside_network* outnet,
848 	struct waiting_tcp* w, int reset_timer)
849 {
850 	struct timeval tv;
851 	log_assert(!w->on_tcp_waiting_list);
852 	if(w->on_tcp_waiting_list)
853 		return;
854 	w->next_waiting = outnet->tcp_wait_first;
855 	log_assert(w->next_waiting != w);
856 	if(!outnet->tcp_wait_last)
857 		outnet->tcp_wait_last = w;
858 	outnet->tcp_wait_first = w;
859 	w->on_tcp_waiting_list = 1;
860 	if(reset_timer) {
861 #ifndef S_SPLINT_S
862 		tv.tv_sec = w->timeout/1000;
863 		tv.tv_usec = (w->timeout%1000)*1000;
864 #endif
865 		comm_timer_set(w->timer, &tv);
866 	}
867 	log_assert(
868 		(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
869 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
870 }
871 
872 /** see if buffers can be used to service TCP queries */
873 static void
874 use_free_buffer(struct outside_network* outnet)
875 {
876 	struct waiting_tcp* w;
877 	while(outnet->tcp_wait_first && !outnet->want_to_quit) {
878 #ifdef USE_DNSTAP
879 		struct pending_tcp* pend_tcp = NULL;
880 #endif
881 		struct reuse_tcp* reuse = NULL;
882 		w = outnet->tcp_wait_first;
883 		log_assert(w->on_tcp_waiting_list);
884 		outnet->tcp_wait_first = w->next_waiting;
885 		if(outnet->tcp_wait_last == w)
886 			outnet->tcp_wait_last = NULL;
887 		log_assert(
888 			(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
889 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
890 		w->on_tcp_waiting_list = 0;
891 		reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen,
892 			w->ssl_upstream);
893 		/* re-select an ID when moving to a new TCP buffer */
894 		w->id = tcp_select_id(outnet, reuse);
895 		LDNS_ID_SET(w->pkt, w->id);
896 		if(reuse) {
897 			log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: "
898 				"found reuse", reuse);
899 #ifdef USE_DNSTAP
900 			pend_tcp = reuse->pending;
901 #endif
902 			reuse_tcp_lru_touch(outnet, reuse);
903 			comm_timer_disable(w->timer);
904 			w->next_waiting = (void*)reuse->pending;
905 			reuse_tree_by_id_insert(reuse, w);
906 			if(reuse->pending->query) {
907 				/* on the write wait list */
908 				reuse_write_wait_push_back(reuse, w);
909 			} else {
910 				/* write straight away */
911 				/* stop the timer on read of the fd */
912 				comm_point_stop_listening(reuse->pending->c);
913 				reuse->pending->query = w;
914 				outnet_tcp_take_query_setup(
915 					reuse->pending->c->fd, reuse->pending,
916 					w);
917 			}
918 		} else if(outnet->tcp_free) {
919 			struct pending_tcp* pend = w->outnet->tcp_free;
920 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
921 			pend->reuse.pending = pend;
922 			memcpy(&pend->reuse.addr, &w->addr, w->addrlen);
923 			pend->reuse.addrlen = w->addrlen;
924 			if(!outnet_tcp_take_into_use(w)) {
925 				waiting_tcp_callback(w, NULL, NETEVENT_CLOSED,
926 					NULL);
927 				waiting_tcp_delete(w);
928 #ifdef USE_DNSTAP
929 				w = NULL;
930 #endif
931 			}
932 #ifdef USE_DNSTAP
933 			pend_tcp = pend;
934 #endif
935 		} else {
936 			/* no reuse and no free buffer, put back at the start */
937 			outnet_add_tcp_waiting_first(outnet, w, 0);
938 			break;
939 		}
940 #ifdef USE_DNSTAP
941 		if(outnet->dtenv && pend_tcp && w && w->sq &&
942 			(outnet->dtenv->log_resolver_query_messages ||
943 			outnet->dtenv->log_forwarder_query_messages)) {
944 			sldns_buffer tmp;
945 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
946 			dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr,
947 				&pend_tcp->pi->addr, comm_tcp, w->sq->zone,
948 				w->sq->zonelen, &tmp);
949 		}
950 #endif
951 	}
952 }
953 
954 /** delete element from tree by id */
955 static void
956 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w)
957 {
958 #ifdef UNBOUND_DEBUG
959 	rbnode_type* rem;
960 #endif
961 	log_assert(w->id_node.key != NULL);
962 #ifdef UNBOUND_DEBUG
963 	rem =
964 #else
965 	(void)
966 #endif
967 	rbtree_delete(&reuse->tree_by_id, w);
968 	log_assert(rem);  /* should have been there */
969 	w->id_node.key = NULL;
970 }
971 
972 /** move writewait list to go for another connection. */
973 static void
974 reuse_move_writewait_away(struct outside_network* outnet,
975 	struct pending_tcp* pend)
976 {
977 	/* the writewait list has not been written yet, so if the
978 	 * stream was closed, they have not actually been failed, only
979 	 * the queries written.  Other queries can get written to another
980 	 * stream.  For upstreams that do not support multiple queries
981 	 * and answers, the stream can get closed, and then the queries
982 	 * can get written on a new socket */
983 	struct waiting_tcp* w;
984 	if(pend->query && pend->query->error_count == 0 &&
985 		pend->c->tcp_write_pkt == pend->query->pkt &&
986 		pend->c->tcp_write_pkt_len == pend->query->pkt_len) {
987 		/* since the current query is not written, it can also
988 		 * move to a free buffer */
989 		if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 &&
990 			LDNS_QDCOUNT(pend->query->pkt) > 0 &&
991 			dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) {
992 			char buf[LDNS_MAX_DOMAINLEN+1];
993 			dname_str(pend->query->pkt+12, buf);
994 			verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written",
995 				buf, (int)pend->c->tcp_write_byte_count);
996 		}
997 		pend->c->tcp_write_pkt = NULL;
998 		pend->c->tcp_write_pkt_len = 0;
999 		pend->c->tcp_write_and_read = 0;
1000 		pend->reuse.cp_more_read_again = 0;
1001 		pend->reuse.cp_more_write_again = 0;
1002 		pend->c->tcp_is_reading = 1;
1003 		w = pend->query;
1004 		pend->query = NULL;
1005 		/* increase error count, so that if the next socket fails too
1006 		 * the server selection is run again with this query failed
1007 		 * and it can select a different server (if possible), or
1008 		 * fail the query */
1009 		w->error_count ++;
1010 		reuse_tree_by_id_delete(&pend->reuse, w);
1011 		outnet_add_tcp_waiting(outnet, w);
1012 	}
1013 	while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) {
1014 		if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 &&
1015 			LDNS_QDCOUNT(w->pkt) > 0 &&
1016 			dname_valid(w->pkt+12, w->pkt_len-12)) {
1017 			char buf[LDNS_MAX_DOMAINLEN+1];
1018 			dname_str(w->pkt+12, buf);
1019 			verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf);
1020 		}
1021 		reuse_tree_by_id_delete(&pend->reuse, w);
1022 		outnet_add_tcp_waiting(outnet, w);
1023 	}
1024 }
1025 
1026 /** remove reused element from tree and lru list */
1027 void
1028 reuse_tcp_remove_tree_list(struct outside_network* outnet,
1029 	struct reuse_tcp* reuse)
1030 {
1031 	verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list");
1032 	if(reuse->node.key) {
1033 		/* delete it from reuse tree */
1034 		if(!rbtree_delete(&outnet->tcp_reuse, reuse)) {
1035 			/* should not be possible, it should be there */
1036 			char buf[256];
1037 			addr_to_str(&reuse->addr, reuse->addrlen, buf,
1038 				sizeof(buf));
1039 			log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list);
1040 		}
1041 		reuse->node.key = NULL;
1042 		/* defend against loops on broken tree by zeroing the
1043 		 * rbnode structure */
1044 		memset(&reuse->node, 0, sizeof(reuse->node));
1045 	}
1046 	/* delete from reuse list */
1047 	if(reuse->item_on_lru_list) {
1048 		if(reuse->lru_prev) {
1049 			/* assert that members of the lru list are waiting
1050 			 * and thus have a pending pointer to the struct */
1051 			log_assert(reuse->lru_prev->pending);
1052 			reuse->lru_prev->lru_next = reuse->lru_next;
1053 			log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
1054 		} else {
1055 			log_assert(!reuse->lru_next || reuse->lru_next->pending);
1056 			outnet->tcp_reuse_first = reuse->lru_next;
1057 			log_assert(!outnet->tcp_reuse_first ||
1058 				(outnet->tcp_reuse_first !=
1059 				 outnet->tcp_reuse_first->lru_next &&
1060 				 outnet->tcp_reuse_first !=
1061 				 outnet->tcp_reuse_first->lru_prev));
1062 		}
1063 		if(reuse->lru_next) {
1064 			/* assert that members of the lru list are waiting
1065 			 * and thus have a pending pointer to the struct */
1066 			log_assert(reuse->lru_next->pending);
1067 			reuse->lru_next->lru_prev = reuse->lru_prev;
1068 			log_assert(reuse->lru_next->lru_prev != reuse->lru_next);
1069 		} else {
1070 			log_assert(!reuse->lru_prev || reuse->lru_prev->pending);
1071 			outnet->tcp_reuse_last = reuse->lru_prev;
1072 			log_assert(!outnet->tcp_reuse_last ||
1073 				(outnet->tcp_reuse_last !=
1074 				 outnet->tcp_reuse_last->lru_next &&
1075 				 outnet->tcp_reuse_last !=
1076 				 outnet->tcp_reuse_last->lru_prev));
1077 		}
1078 		log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
1079 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
1080 		reuse->item_on_lru_list = 0;
1081 		reuse->lru_next = NULL;
1082 		reuse->lru_prev = NULL;
1083 	}
1084 	reuse->pending = NULL;
1085 }
1086 
1087 /** helper function that deletes an element from the tree of readwait
1088  * elements in tcp reuse structure */
1089 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg))
1090 {
1091 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1092 	waiting_tcp_delete(w);
1093 }
1094 
1095 /** delete readwait waiting_tcp elements, deletes the elements in the list */
1096 void reuse_del_readwait(rbtree_type* tree_by_id)
1097 {
1098 	if(tree_by_id->root == NULL ||
1099 		tree_by_id->root == RBTREE_NULL)
1100 		return;
1101 	traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL);
1102 	rbtree_init(tree_by_id, reuse_id_cmp);
1103 }
1104 
1105 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */
1106 static void
1107 decommission_pending_tcp(struct outside_network* outnet,
1108 	struct pending_tcp* pend)
1109 {
1110 	verbose(VERB_CLIENT, "decommission_pending_tcp");
1111 	/* A certain code path can lead here twice for the same pending_tcp
1112 	 * creating a loop in the free pending_tcp list. */
1113 	if(outnet->tcp_free != pend) {
1114 		pend->next_free = outnet->tcp_free;
1115 		outnet->tcp_free = pend;
1116 	}
1117 	if(pend->reuse.node.key) {
1118 		/* needs unlink from the reuse tree to get deleted */
1119 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1120 	}
1121 	/* free SSL structure after remove from outnet tcp reuse tree,
1122 	 * because the c->ssl null or not is used for sorting in the tree */
1123 	if(pend->c->ssl) {
1124 #ifdef HAVE_SSL
1125 		SSL_shutdown(pend->c->ssl);
1126 		SSL_free(pend->c->ssl);
1127 		pend->c->ssl = NULL;
1128 #endif
1129 	}
1130 	comm_point_close(pend->c);
1131 	pend->reuse.cp_more_read_again = 0;
1132 	pend->reuse.cp_more_write_again = 0;
1133 	/* unlink the query and writewait list, it is part of the tree
1134 	 * nodes and is deleted */
1135 	pend->query = NULL;
1136 	pend->reuse.write_wait_first = NULL;
1137 	pend->reuse.write_wait_last = NULL;
1138 	reuse_del_readwait(&pend->reuse.tree_by_id);
1139 }
1140 
1141 /** perform failure callbacks for waiting queries in reuse read rbtree */
1142 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err)
1143 {
1144 	rbnode_type* node;
1145 	if(tree_by_id->root == NULL ||
1146 		tree_by_id->root == RBTREE_NULL)
1147 		return;
1148 	node = rbtree_first(tree_by_id);
1149 	while(node && node != RBTREE_NULL) {
1150 		struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1151 		waiting_tcp_callback(w, NULL, err, NULL);
1152 		node = rbtree_next(node);
1153 	}
1154 }
1155 
1156 /** mark the entry for being in the cb_and_decommission stage */
1157 static void mark_for_cb_and_decommission(rbnode_type* node,
1158 	void* ATTR_UNUSED(arg))
1159 {
1160 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1161 	/* Mark the waiting_tcp to signal later code (serviced_delete) that
1162 	 * this item is part of the backed up tree_by_id and will be deleted
1163 	 * later. */
1164 	w->in_cb_and_decommission = 1;
1165 	/* Mark the serviced_query for deletion so that later code through
1166 	 * callbacks (iter_clear .. outnet_serviced_query_stop) won't
1167 	 * prematurely delete it. */
1168 	if(w->cb)
1169 		((struct serviced_query*)w->cb_arg)->to_be_deleted = 1;
1170 }
1171 
1172 /** perform callbacks for failure and also decommission pending tcp.
1173  * the callbacks remove references in sq->pending to the waiting_tcp
1174  * members of the tree_by_id in the pending tcp.  The pending_tcp is
1175  * removed before the callbacks, so that the callbacks do not modify
1176  * the pending_tcp due to its reference in the outside_network reuse tree */
1177 static void reuse_cb_and_decommission(struct outside_network* outnet,
1178 	struct pending_tcp* pend, int error)
1179 {
1180 	rbtree_type store;
1181 	store = pend->reuse.tree_by_id;
1182 	pend->query = NULL;
1183 	rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1184 	pend->reuse.write_wait_first = NULL;
1185 	pend->reuse.write_wait_last = NULL;
1186 	decommission_pending_tcp(outnet, pend);
1187 	if(store.root != NULL && store.root != RBTREE_NULL) {
1188 		traverse_postorder(&store, &mark_for_cb_and_decommission, NULL);
1189 	}
1190 	reuse_cb_readwait_for_failure(&store, error);
1191 	reuse_del_readwait(&store);
1192 }
1193 
1194 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1195 static void
1196 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1197 {
1198 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse);
1199 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1200 }
1201 
1202 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1203 static void
1204 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1205 {
1206 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse);
1207 	sldns_buffer_clear(pend_tcp->c->buffer);
1208 	pend_tcp->c->tcp_is_reading = 1;
1209 	pend_tcp->c->tcp_byte_count = 0;
1210 	comm_point_stop_listening(pend_tcp->c);
1211 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1212 }
1213 
1214 int
1215 outnet_tcp_cb(struct comm_point* c, void* arg, int error,
1216 	struct comm_reply *reply_info)
1217 {
1218 	struct pending_tcp* pend = (struct pending_tcp*)arg;
1219 	struct outside_network* outnet = pend->reuse.outnet;
1220 	struct waiting_tcp* w = NULL;
1221 	log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key);
1222 	verbose(VERB_ALGO, "outnettcp cb");
1223 	if(error == NETEVENT_TIMEOUT) {
1224 		if(pend->c->tcp_write_and_read) {
1225 			verbose(VERB_QUERY, "outnettcp got tcp timeout "
1226 				"for read, ignored because write underway");
1227 			/* if we are writing, ignore readtimer, wait for write timer
1228 			 * or write is done */
1229 			return 0;
1230 		} else {
1231 			verbose(VERB_QUERY, "outnettcp got tcp timeout %s",
1232 				(pend->reuse.tree_by_id.count?"for reading pkt":
1233 				"for keepalive for reuse"));
1234 		}
1235 		/* must be timeout for reading or keepalive reuse,
1236 		 * close it. */
1237 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1238 	} else if(error == NETEVENT_PKT_WRITTEN) {
1239 		/* the packet we want to write has been written. */
1240 		verbose(VERB_ALGO, "outnet tcp pkt was written event");
1241 		log_assert(c == pend->c);
1242 		log_assert(pend->query->pkt == pend->c->tcp_write_pkt);
1243 		log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len);
1244 		pend->c->tcp_write_pkt = NULL;
1245 		pend->c->tcp_write_pkt_len = 0;
1246 		/* the pend.query is already in tree_by_id */
1247 		log_assert(pend->query->id_node.key);
1248 		pend->query = NULL;
1249 		/* setup to write next packet or setup read timeout */
1250 		if(pend->reuse.write_wait_first) {
1251 			verbose(VERB_ALGO, "outnet tcp setup next pkt");
1252 			/* we can write it straight away perhaps, set flag
1253 			 * because this callback called after a tcp write
1254 			 * succeeded and likely more buffer space is available
1255 			 * and we can write some more. */
1256 			pend->reuse.cp_more_write_again = 1;
1257 			pend->query = reuse_write_wait_pop(&pend->reuse);
1258 			comm_point_stop_listening(pend->c);
1259 			outnet_tcp_take_query_setup(pend->c->fd, pend,
1260 				pend->query);
1261 		} else {
1262 			verbose(VERB_ALGO, "outnet tcp writes done, wait");
1263 			pend->c->tcp_write_and_read = 0;
1264 			pend->reuse.cp_more_read_again = 0;
1265 			pend->reuse.cp_more_write_again = 0;
1266 			pend->c->tcp_is_reading = 1;
1267 			comm_point_stop_listening(pend->c);
1268 			reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout);
1269 		}
1270 		return 0;
1271 	} else if(error != NETEVENT_NOERROR) {
1272 		verbose(VERB_QUERY, "outnettcp got tcp error %d", error);
1273 		reuse_move_writewait_away(outnet, pend);
1274 		/* pass error below and exit */
1275 	} else {
1276 		/* check ID */
1277 		if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) {
1278 			log_addr(VERB_QUERY,
1279 				"outnettcp: bad ID in reply, too short, from:",
1280 				&pend->reuse.addr, pend->reuse.addrlen);
1281 			error = NETEVENT_CLOSED;
1282 		} else {
1283 			uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin(
1284 				c->buffer));
1285 			/* find the query the reply is for */
1286 			w = reuse_tcp_by_id_find(&pend->reuse, id);
1287 			/* Make sure that the reply we got is at least for a
1288 			 * sent query with the same ID; the waiting_tcp that
1289 			 * gets a reply is assumed to not be waiting to be
1290 			 * sent. */
1291 			if(w && (w->on_tcp_waiting_list || w->write_wait_queued))
1292 				w = NULL;
1293 		}
1294 	}
1295 	if(error == NETEVENT_NOERROR && !w) {
1296 		/* no struct waiting found in tree, no reply to call */
1297 		log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:",
1298 			&pend->reuse.addr, pend->reuse.addrlen);
1299 		error = NETEVENT_CLOSED;
1300 	}
1301 	if(error == NETEVENT_NOERROR) {
1302 		/* add to reuse tree so it can be reused, if not a failure.
1303 		 * This is possible if the state machine wants to make a tcp
1304 		 * query again to the same destination. */
1305 		if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) {
1306 			(void)reuse_tcp_insert(outnet, pend);
1307 		}
1308 	}
1309 	if(w) {
1310 		log_assert(!w->on_tcp_waiting_list);
1311 		log_assert(!w->write_wait_queued);
1312 		reuse_tree_by_id_delete(&pend->reuse, w);
1313 		verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d",
1314 			error, (int)sldns_buffer_limit(c->buffer));
1315 		waiting_tcp_callback(w, c, error, reply_info);
1316 		waiting_tcp_delete(w);
1317 	}
1318 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb");
1319 	if(error == NETEVENT_NOERROR && pend->reuse.node.key) {
1320 		verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it");
1321 		/* it is in the reuse_tcp tree, with other queries, or
1322 		 * on the empty list. do not decommission it */
1323 		/* if there are more outstanding queries, we could try to
1324 		 * read again, to see if it is on the input,
1325 		 * because this callback called after a successful read
1326 		 * and there could be more bytes to read on the input */
1327 		if(pend->reuse.tree_by_id.count != 0)
1328 			pend->reuse.cp_more_read_again = 1;
1329 		reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout);
1330 		return 0;
1331 	}
1332 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it");
1333 	/* no queries on it, no space to keep it. or timeout or closed due
1334 	 * to error.  Close it */
1335 	reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT?
1336 		NETEVENT_TIMEOUT:NETEVENT_CLOSED));
1337 	use_free_buffer(outnet);
1338 	return 0;
1339 }
1340 
1341 /** lower use count on pc, see if it can be closed */
1342 static void
1343 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc)
1344 {
1345 	struct port_if* pif;
1346 	pc->num_outstanding--;
1347 	if(pc->num_outstanding > 0) {
1348 		return;
1349 	}
1350 	/* close it and replace in unused list */
1351 	verbose(VERB_ALGO, "close of port %d", pc->number);
1352 	comm_point_close(pc->cp);
1353 	pif = pc->pif;
1354 	log_assert(pif->inuse > 0);
1355 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1356 	pif->avail_ports[pif->avail_total - pif->inuse] = pc->number;
1357 #endif
1358 	pif->inuse--;
1359 	pif->out[pc->index] = pif->out[pif->inuse];
1360 	pif->out[pc->index]->index = pc->index;
1361 	pc->next = outnet->unused_fds;
1362 	outnet->unused_fds = pc;
1363 }
1364 
1365 /** try to send waiting UDP queries */
1366 static void
1367 outnet_send_wait_udp(struct outside_network* outnet)
1368 {
1369 	struct pending* pend;
1370 	/* process waiting queries */
1371 	while(outnet->udp_wait_first && outnet->unused_fds
1372 		&& !outnet->want_to_quit) {
1373 		pend = outnet->udp_wait_first;
1374 		outnet->udp_wait_first = pend->next_waiting;
1375 		if(!pend->next_waiting) outnet->udp_wait_last = NULL;
1376 		sldns_buffer_clear(outnet->udp_buff);
1377 		sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len);
1378 		sldns_buffer_flip(outnet->udp_buff);
1379 		free(pend->pkt); /* freeing now makes get_mem correct */
1380 		pend->pkt = NULL;
1381 		pend->pkt_len = 0;
1382 		log_assert(!pend->sq->busy);
1383 		pend->sq->busy = 1;
1384 		if(!randomize_and_send_udp(pend, outnet->udp_buff,
1385 			pend->timeout)) {
1386 			/* callback error on pending */
1387 			if(pend->cb) {
1388 				fptr_ok(fptr_whitelist_pending_udp(pend->cb));
1389 				(void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg,
1390 					NETEVENT_CLOSED, NULL);
1391 			}
1392 			pending_delete(outnet, pend);
1393 		} else {
1394 			pend->sq->busy = 0;
1395 		}
1396 	}
1397 }
1398 
1399 int
1400 outnet_udp_cb(struct comm_point* c, void* arg, int error,
1401 	struct comm_reply *reply_info)
1402 {
1403 	struct outside_network* outnet = (struct outside_network*)arg;
1404 	struct pending key;
1405 	struct pending* p;
1406 	verbose(VERB_ALGO, "answer cb");
1407 
1408 	if(error != NETEVENT_NOERROR) {
1409 		verbose(VERB_QUERY, "outnetudp got udp error %d", error);
1410 		return 0;
1411 	}
1412 	if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1413 		verbose(VERB_QUERY, "outnetudp udp too short");
1414 		return 0;
1415 	}
1416 	log_assert(reply_info);
1417 
1418 	/* setup lookup key */
1419 	key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer));
1420 	memcpy(&key.addr, &reply_info->addr, reply_info->addrlen);
1421 	key.addrlen = reply_info->addrlen;
1422 	verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id);
1423 	log_addr(VERB_ALGO, "Incoming reply addr =",
1424 		&reply_info->addr, reply_info->addrlen);
1425 
1426 	/* find it, see if this thing is a valid query response */
1427 	verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count);
1428 	p = (struct pending*)rbtree_search(outnet->pending, &key);
1429 	if(!p) {
1430 		verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped.");
1431 		log_buf(VERB_ALGO, "dropped message", c->buffer);
1432 		outnet->unwanted_replies++;
1433 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1434 			>= outnet->unwanted_threshold) {
1435 			log_warn("unwanted reply total reached threshold (%u)"
1436 				" you may be under attack."
1437 				" defensive action: clearing the cache",
1438 				(unsigned)outnet->unwanted_threshold);
1439 			fptr_ok(fptr_whitelist_alloc_cleanup(
1440 				outnet->unwanted_action));
1441 			(*outnet->unwanted_action)(outnet->unwanted_param);
1442 			outnet->unwanted_total = 0;
1443 		}
1444 		return 0;
1445 	}
1446 
1447 	verbose(VERB_ALGO, "received udp reply.");
1448 	log_buf(VERB_ALGO, "udp message", c->buffer);
1449 	if(p->pc->cp != c) {
1450 		verbose(VERB_QUERY, "received reply id,addr on wrong port. "
1451 			"dropped.");
1452 		outnet->unwanted_replies++;
1453 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1454 			>= outnet->unwanted_threshold) {
1455 			log_warn("unwanted reply total reached threshold (%u)"
1456 				" you may be under attack."
1457 				" defensive action: clearing the cache",
1458 				(unsigned)outnet->unwanted_threshold);
1459 			fptr_ok(fptr_whitelist_alloc_cleanup(
1460 				outnet->unwanted_action));
1461 			(*outnet->unwanted_action)(outnet->unwanted_param);
1462 			outnet->unwanted_total = 0;
1463 		}
1464 		return 0;
1465 	}
1466 	comm_timer_disable(p->timer);
1467 	verbose(VERB_ALGO, "outnet handle udp reply");
1468 	/* delete from tree first in case callback creates a retry */
1469 	(void)rbtree_delete(outnet->pending, p->node.key);
1470 	if(p->cb) {
1471 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1472 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info);
1473 	}
1474 	portcomm_loweruse(outnet, p->pc);
1475 	pending_delete(NULL, p);
1476 	outnet_send_wait_udp(outnet);
1477 	return 0;
1478 }
1479 
1480 /** calculate number of ip4 and ip6 interfaces*/
1481 static void
1482 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6,
1483 	int* num_ip4, int* num_ip6)
1484 {
1485 	int i;
1486 	*num_ip4 = 0;
1487 	*num_ip6 = 0;
1488 	if(num_ifs <= 0) {
1489 		if(do_ip4)
1490 			*num_ip4 = 1;
1491 		if(do_ip6)
1492 			*num_ip6 = 1;
1493 		return;
1494 	}
1495 	for(i=0; i<num_ifs; i++)
1496 	{
1497 		if(str_is_ip6(ifs[i])) {
1498 			if(do_ip6)
1499 				(*num_ip6)++;
1500 		} else {
1501 			if(do_ip4)
1502 				(*num_ip4)++;
1503 		}
1504 	}
1505 }
1506 
1507 void
1508 pending_udp_timer_delay_cb(void* arg)
1509 {
1510 	struct pending* p = (struct pending*)arg;
1511 	struct outside_network* outnet = p->outnet;
1512 	verbose(VERB_ALGO, "timeout udp with delay");
1513 	portcomm_loweruse(outnet, p->pc);
1514 	pending_delete(outnet, p);
1515 	outnet_send_wait_udp(outnet);
1516 }
1517 
1518 void
1519 pending_udp_timer_cb(void *arg)
1520 {
1521 	struct pending* p = (struct pending*)arg;
1522 	struct outside_network* outnet = p->outnet;
1523 	/* it timed out */
1524 	verbose(VERB_ALGO, "timeout udp");
1525 	if(p->cb) {
1526 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1527 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL);
1528 	}
1529 	/* if delayclose, keep port open for a longer time.
1530 	 * But if the udpwaitlist exists, then we are struggling to
1531 	 * keep up with demand for sockets, so do not wait, but service
1532 	 * the customer (customer service more important than portICMPs) */
1533 	if(outnet->delayclose && !outnet->udp_wait_first) {
1534 		p->cb = NULL;
1535 		p->timer->callback = &pending_udp_timer_delay_cb;
1536 		comm_timer_set(p->timer, &outnet->delay_tv);
1537 		return;
1538 	}
1539 	portcomm_loweruse(outnet, p->pc);
1540 	pending_delete(outnet, p);
1541 	outnet_send_wait_udp(outnet);
1542 }
1543 
1544 /** create pending_tcp buffers */
1545 static int
1546 create_pending_tcp(struct outside_network* outnet, size_t bufsize)
1547 {
1548 	size_t i;
1549 	if(outnet->num_tcp == 0)
1550 		return 1; /* no tcp needed, nothing to do */
1551 	if(!(outnet->tcp_conns = (struct pending_tcp **)calloc(
1552 			outnet->num_tcp, sizeof(struct pending_tcp*))))
1553 		return 0;
1554 	for(i=0; i<outnet->num_tcp; i++) {
1555 		if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1,
1556 			sizeof(struct pending_tcp))))
1557 			return 0;
1558 		outnet->tcp_conns[i]->next_free = outnet->tcp_free;
1559 		outnet->tcp_free = outnet->tcp_conns[i];
1560 		outnet->tcp_conns[i]->c = comm_point_create_tcp_out(
1561 			outnet->base, bufsize, outnet_tcp_cb,
1562 			outnet->tcp_conns[i]);
1563 		if(!outnet->tcp_conns[i]->c)
1564 			return 0;
1565 	}
1566 	return 1;
1567 }
1568 
1569 /** setup an outgoing interface, ready address */
1570 static int setup_if(struct port_if* pif, const char* addrstr,
1571 	int* avail, int numavail, size_t numfd)
1572 {
1573 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1574 	pif->avail_total = numavail;
1575 	pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int));
1576 	if(!pif->avail_ports)
1577 		return 0;
1578 #endif
1579 	if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) &&
1580 	   !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT,
1581 			      &pif->addr, &pif->addrlen, &pif->pfxlen))
1582 		return 0;
1583 	pif->maxout = (int)numfd;
1584 	pif->inuse = 0;
1585 	pif->out = (struct port_comm**)calloc(numfd,
1586 		sizeof(struct port_comm*));
1587 	if(!pif->out)
1588 		return 0;
1589 	return 1;
1590 }
1591 
1592 struct outside_network*
1593 outside_network_create(struct comm_base *base, size_t bufsize,
1594 	size_t num_ports, char** ifs, int num_ifs, int do_ip4,
1595 	int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra,
1596 	struct ub_randstate* rnd, int use_caps_for_id, int* availports,
1597 	int numavailports, size_t unwanted_threshold, int tcp_mss,
1598 	void (*unwanted_action)(void*), void* unwanted_param, int do_udp,
1599 	void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv,
1600 	int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout,
1601 	int tcp_auth_query_timeout)
1602 {
1603 	struct outside_network* outnet = (struct outside_network*)
1604 		calloc(1, sizeof(struct outside_network));
1605 	size_t k;
1606 	if(!outnet) {
1607 		log_err("malloc failed");
1608 		return NULL;
1609 	}
1610 	comm_base_timept(base, &outnet->now_secs, &outnet->now_tv);
1611 	outnet->base = base;
1612 	outnet->num_tcp = num_tcp;
1613 	outnet->max_reuse_tcp_queries = max_reuse_tcp_queries;
1614 	outnet->tcp_reuse_timeout= tcp_reuse_timeout;
1615 	outnet->tcp_auth_query_timeout = tcp_auth_query_timeout;
1616 	outnet->num_tcp_outgoing = 0;
1617 	outnet->num_udp_outgoing = 0;
1618 	outnet->infra = infra;
1619 	outnet->rnd = rnd;
1620 	outnet->sslctx = sslctx;
1621 	outnet->tls_use_sni = tls_use_sni;
1622 #ifdef USE_DNSTAP
1623 	outnet->dtenv = dtenv;
1624 #else
1625 	(void)dtenv;
1626 #endif
1627 	outnet->svcd_overhead = 0;
1628 	outnet->want_to_quit = 0;
1629 	outnet->unwanted_threshold = unwanted_threshold;
1630 	outnet->unwanted_action = unwanted_action;
1631 	outnet->unwanted_param = unwanted_param;
1632 	outnet->use_caps_for_id = use_caps_for_id;
1633 	outnet->do_udp = do_udp;
1634 	outnet->tcp_mss = tcp_mss;
1635 	outnet->ip_dscp = dscp;
1636 #ifndef S_SPLINT_S
1637 	if(delayclose) {
1638 		outnet->delayclose = 1;
1639 		outnet->delay_tv.tv_sec = delayclose/1000;
1640 		outnet->delay_tv.tv_usec = (delayclose%1000)*1000;
1641 	}
1642 #endif
1643 	if(udp_connect) {
1644 		outnet->udp_connect = 1;
1645 	}
1646 	if(numavailports == 0 || num_ports == 0) {
1647 		log_err("no outgoing ports available");
1648 		outside_network_delete(outnet);
1649 		return NULL;
1650 	}
1651 #ifndef INET6
1652 	do_ip6 = 0;
1653 #endif
1654 	calc_num46(ifs, num_ifs, do_ip4, do_ip6,
1655 		&outnet->num_ip4, &outnet->num_ip6);
1656 	if(outnet->num_ip4 != 0) {
1657 		if(!(outnet->ip4_ifs = (struct port_if*)calloc(
1658 			(size_t)outnet->num_ip4, sizeof(struct port_if)))) {
1659 			log_err("malloc failed");
1660 			outside_network_delete(outnet);
1661 			return NULL;
1662 		}
1663 	}
1664 	if(outnet->num_ip6 != 0) {
1665 		if(!(outnet->ip6_ifs = (struct port_if*)calloc(
1666 			(size_t)outnet->num_ip6, sizeof(struct port_if)))) {
1667 			log_err("malloc failed");
1668 			outside_network_delete(outnet);
1669 			return NULL;
1670 		}
1671 	}
1672 	if(	!(outnet->udp_buff = sldns_buffer_new(bufsize)) ||
1673 		!(outnet->pending = rbtree_create(pending_cmp)) ||
1674 		!(outnet->serviced = rbtree_create(serviced_cmp)) ||
1675 		!create_pending_tcp(outnet, bufsize)) {
1676 		log_err("malloc failed");
1677 		outside_network_delete(outnet);
1678 		return NULL;
1679 	}
1680 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1681 	outnet->tcp_reuse_max = num_tcp;
1682 
1683 	/* allocate commpoints */
1684 	for(k=0; k<num_ports; k++) {
1685 		struct port_comm* pc;
1686 		pc = (struct port_comm*)calloc(1, sizeof(*pc));
1687 		if(!pc) {
1688 			log_err("malloc failed");
1689 			outside_network_delete(outnet);
1690 			return NULL;
1691 		}
1692 		pc->cp = comm_point_create_udp(outnet->base, -1,
1693 			outnet->udp_buff, outnet_udp_cb, outnet, NULL);
1694 		if(!pc->cp) {
1695 			log_err("malloc failed");
1696 			free(pc);
1697 			outside_network_delete(outnet);
1698 			return NULL;
1699 		}
1700 		pc->next = outnet->unused_fds;
1701 		outnet->unused_fds = pc;
1702 	}
1703 
1704 	/* allocate interfaces */
1705 	if(num_ifs == 0) {
1706 		if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0",
1707 			availports, numavailports, num_ports)) {
1708 			log_err("malloc failed");
1709 			outside_network_delete(outnet);
1710 			return NULL;
1711 		}
1712 		if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::",
1713 			availports, numavailports, num_ports)) {
1714 			log_err("malloc failed");
1715 			outside_network_delete(outnet);
1716 			return NULL;
1717 		}
1718 	} else {
1719 		size_t done_4 = 0, done_6 = 0;
1720 		int i;
1721 		for(i=0; i<num_ifs; i++) {
1722 			if(str_is_ip6(ifs[i]) && do_ip6) {
1723 				if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i],
1724 					availports, numavailports, num_ports)){
1725 					log_err("malloc failed");
1726 					outside_network_delete(outnet);
1727 					return NULL;
1728 				}
1729 				done_6++;
1730 			}
1731 			if(!str_is_ip6(ifs[i]) && do_ip4) {
1732 				if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i],
1733 					availports, numavailports, num_ports)){
1734 					log_err("malloc failed");
1735 					outside_network_delete(outnet);
1736 					return NULL;
1737 				}
1738 				done_4++;
1739 			}
1740 		}
1741 	}
1742 	return outnet;
1743 }
1744 
1745 /** helper pending delete */
1746 static void
1747 pending_node_del(rbnode_type* node, void* arg)
1748 {
1749 	struct pending* pend = (struct pending*)node;
1750 	struct outside_network* outnet = (struct outside_network*)arg;
1751 	pending_delete(outnet, pend);
1752 }
1753 
1754 /** helper serviced delete */
1755 static void
1756 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg))
1757 {
1758 	struct serviced_query* sq = (struct serviced_query*)node;
1759 	alloc_reg_release(sq->alloc, sq->region);
1760 	if(sq->timer)
1761 		comm_timer_delete(sq->timer);
1762 	free(sq);
1763 }
1764 
1765 void
1766 outside_network_quit_prepare(struct outside_network* outnet)
1767 {
1768 	if(!outnet)
1769 		return;
1770 	/* prevent queued items from being sent */
1771 	outnet->want_to_quit = 1;
1772 }
1773 
1774 void
1775 outside_network_delete(struct outside_network* outnet)
1776 {
1777 	if(!outnet)
1778 		return;
1779 	outnet->want_to_quit = 1;
1780 	/* check every element, since we can be called on malloc error */
1781 	if(outnet->pending) {
1782 		/* free pending elements, but do no unlink from tree. */
1783 		traverse_postorder(outnet->pending, pending_node_del, NULL);
1784 		free(outnet->pending);
1785 	}
1786 	if(outnet->serviced) {
1787 		traverse_postorder(outnet->serviced, serviced_node_del, NULL);
1788 		free(outnet->serviced);
1789 	}
1790 	if(outnet->udp_buff)
1791 		sldns_buffer_free(outnet->udp_buff);
1792 	if(outnet->unused_fds) {
1793 		struct port_comm* p = outnet->unused_fds, *np;
1794 		while(p) {
1795 			np = p->next;
1796 			comm_point_delete(p->cp);
1797 			free(p);
1798 			p = np;
1799 		}
1800 		outnet->unused_fds = NULL;
1801 	}
1802 	if(outnet->ip4_ifs) {
1803 		int i, k;
1804 		for(i=0; i<outnet->num_ip4; i++) {
1805 			for(k=0; k<outnet->ip4_ifs[i].inuse; k++) {
1806 				struct port_comm* pc = outnet->ip4_ifs[i].
1807 					out[k];
1808 				comm_point_delete(pc->cp);
1809 				free(pc);
1810 			}
1811 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1812 			free(outnet->ip4_ifs[i].avail_ports);
1813 #endif
1814 			free(outnet->ip4_ifs[i].out);
1815 		}
1816 		free(outnet->ip4_ifs);
1817 	}
1818 	if(outnet->ip6_ifs) {
1819 		int i, k;
1820 		for(i=0; i<outnet->num_ip6; i++) {
1821 			for(k=0; k<outnet->ip6_ifs[i].inuse; k++) {
1822 				struct port_comm* pc = outnet->ip6_ifs[i].
1823 					out[k];
1824 				comm_point_delete(pc->cp);
1825 				free(pc);
1826 			}
1827 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1828 			free(outnet->ip6_ifs[i].avail_ports);
1829 #endif
1830 			free(outnet->ip6_ifs[i].out);
1831 		}
1832 		free(outnet->ip6_ifs);
1833 	}
1834 	if(outnet->tcp_conns) {
1835 		size_t i;
1836 		for(i=0; i<outnet->num_tcp; i++)
1837 			if(outnet->tcp_conns[i]) {
1838 				struct pending_tcp* pend;
1839 				pend = outnet->tcp_conns[i];
1840 				if(pend->reuse.item_on_lru_list) {
1841 					/* delete waiting_tcp elements that
1842 					 * the tcp conn is working on */
1843 					decommission_pending_tcp(outnet, pend);
1844 				}
1845 				comm_point_delete(outnet->tcp_conns[i]->c);
1846 				free(outnet->tcp_conns[i]);
1847 				outnet->tcp_conns[i] = NULL;
1848 			}
1849 		free(outnet->tcp_conns);
1850 		outnet->tcp_conns = NULL;
1851 	}
1852 	if(outnet->tcp_wait_first) {
1853 		struct waiting_tcp* p = outnet->tcp_wait_first, *np;
1854 		while(p) {
1855 			np = p->next_waiting;
1856 			waiting_tcp_delete(p);
1857 			p = np;
1858 		}
1859 	}
1860 	/* was allocated in struct pending that was deleted above */
1861 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1862 	outnet->tcp_reuse_first = NULL;
1863 	outnet->tcp_reuse_last = NULL;
1864 	if(outnet->udp_wait_first) {
1865 		struct pending* p = outnet->udp_wait_first, *np;
1866 		while(p) {
1867 			np = p->next_waiting;
1868 			pending_delete(NULL, p);
1869 			p = np;
1870 		}
1871 	}
1872 	free(outnet);
1873 }
1874 
1875 void
1876 pending_delete(struct outside_network* outnet, struct pending* p)
1877 {
1878 	if(!p)
1879 		return;
1880 	if(outnet && outnet->udp_wait_first &&
1881 		(p->next_waiting || p == outnet->udp_wait_last) ) {
1882 		/* delete from waiting list, if it is in the waiting list */
1883 		struct pending* prev = NULL, *x = outnet->udp_wait_first;
1884 		while(x && x != p) {
1885 			prev = x;
1886 			x = x->next_waiting;
1887 		}
1888 		if(x) {
1889 			log_assert(x == p);
1890 			if(prev)
1891 				prev->next_waiting = p->next_waiting;
1892 			else	outnet->udp_wait_first = p->next_waiting;
1893 			if(outnet->udp_wait_last == p)
1894 				outnet->udp_wait_last = prev;
1895 		}
1896 	}
1897 	if(outnet) {
1898 		(void)rbtree_delete(outnet->pending, p->node.key);
1899 	}
1900 	if(p->timer)
1901 		comm_timer_delete(p->timer);
1902 	free(p->pkt);
1903 	free(p);
1904 }
1905 
1906 static void
1907 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd)
1908 {
1909 	int i, last;
1910 	if(!(pfxlen > 0 && pfxlen < 128))
1911 		return;
1912 	for(i = 0; i < (128 - pfxlen) / 8; i++) {
1913 		sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256);
1914 	}
1915 	last = pfxlen & 7;
1916 	if(last != 0) {
1917 		sa->sin6_addr.s6_addr[15-i] |=
1918 			((0xFF >> last) & ub_random_max(rnd, 256));
1919 	}
1920 }
1921 
1922 /**
1923  * Try to open a UDP socket for outgoing communication.
1924  * Sets sockets options as needed.
1925  * @param addr: socket address.
1926  * @param addrlen: length of address.
1927  * @param pfxlen: length of network prefix (for address randomisation).
1928  * @param port: port override for addr.
1929  * @param inuse: if -1 is returned, this bool means the port was in use.
1930  * @param rnd: random state (for address randomisation).
1931  * @param dscp: DSCP to use.
1932  * @return fd or -1
1933  */
1934 static int
1935 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen,
1936 	int port, int* inuse, struct ub_randstate* rnd, int dscp)
1937 {
1938 	int fd, noproto;
1939 	if(addr_is_ip6(addr, addrlen)) {
1940 		int freebind = 0;
1941 		struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
1942 		sa.sin6_port = (in_port_t)htons((uint16_t)port);
1943 		sa.sin6_flowinfo = 0;
1944 		sa.sin6_scope_id = 0;
1945 		if(pfxlen != 0) {
1946 			freebind = 1;
1947 			sai6_putrandom(&sa, pfxlen, rnd);
1948 		}
1949 		fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
1950 			(struct sockaddr*)&sa, addrlen, 1, inuse, &noproto,
1951 			0, 0, 0, NULL, 0, freebind, 0, dscp);
1952 	} else {
1953 		struct sockaddr_in* sa = (struct sockaddr_in*)addr;
1954 		sa->sin_port = (in_port_t)htons((uint16_t)port);
1955 		fd = create_udp_sock(AF_INET, SOCK_DGRAM,
1956 			(struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
1957 			0, 0, 0, NULL, 0, 0, 0, dscp);
1958 	}
1959 	return fd;
1960 }
1961 
1962 /** Select random ID */
1963 static int
1964 select_id(struct outside_network* outnet, struct pending* pend,
1965 	sldns_buffer* packet)
1966 {
1967 	int id_tries = 0;
1968 	pend->id = GET_RANDOM_ID(outnet->rnd);
1969 	LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
1970 
1971 	/* insert in tree */
1972 	pend->node.key = pend;
1973 	while(!rbtree_insert(outnet->pending, &pend->node)) {
1974 		/* change ID to avoid collision */
1975 		pend->id = GET_RANDOM_ID(outnet->rnd);
1976 		LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
1977 		id_tries++;
1978 		if(id_tries == MAX_ID_RETRY) {
1979 			pend->id=99999; /* non existent ID */
1980 			log_err("failed to generate unique ID, drop msg");
1981 			return 0;
1982 		}
1983 	}
1984 	verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id);
1985 	return 1;
1986 }
1987 
1988 /** return true is UDP connect error needs to be logged */
1989 static int udp_connect_needs_log(int err)
1990 {
1991 	switch(err) {
1992 	case ECONNREFUSED:
1993 #  ifdef ENETUNREACH
1994 	case ENETUNREACH:
1995 #  endif
1996 #  ifdef EHOSTDOWN
1997 	case EHOSTDOWN:
1998 #  endif
1999 #  ifdef EHOSTUNREACH
2000 	case EHOSTUNREACH:
2001 #  endif
2002 #  ifdef ENETDOWN
2003 	case ENETDOWN:
2004 #  endif
2005 #  ifdef EADDRNOTAVAIL
2006 	case EADDRNOTAVAIL:
2007 #  endif
2008 	case EPERM:
2009 	case EACCES:
2010 		if(verbosity >= VERB_ALGO)
2011 			return 1;
2012 		return 0;
2013 	default:
2014 		break;
2015 	}
2016 	return 1;
2017 }
2018 
2019 
2020 /** Select random interface and port */
2021 static int
2022 select_ifport(struct outside_network* outnet, struct pending* pend,
2023 	int num_if, struct port_if* ifs)
2024 {
2025 	int my_if, my_port, fd, portno, inuse, tries=0;
2026 	struct port_if* pif;
2027 	/* randomly select interface and port */
2028 	if(num_if == 0) {
2029 		verbose(VERB_QUERY, "Need to send query but have no "
2030 			"outgoing interfaces of that family");
2031 		return 0;
2032 	}
2033 	log_assert(outnet->unused_fds);
2034 	tries = 0;
2035 	while(1) {
2036 		my_if = ub_random_max(outnet->rnd, num_if);
2037 		pif = &ifs[my_if];
2038 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2039 		if(outnet->udp_connect) {
2040 			/* if we connect() we cannot reuse fds for a port */
2041 			if(pif->inuse >= pif->avail_total) {
2042 				tries++;
2043 				if(tries < MAX_PORT_RETRY)
2044 					continue;
2045 				log_err("failed to find an open port, drop msg");
2046 				return 0;
2047 			}
2048 			my_port = pif->inuse + ub_random_max(outnet->rnd,
2049 				pif->avail_total - pif->inuse);
2050 		} else  {
2051 			my_port = ub_random_max(outnet->rnd, pif->avail_total);
2052 			if(my_port < pif->inuse) {
2053 				/* port already open */
2054 				pend->pc = pif->out[my_port];
2055 				verbose(VERB_ALGO, "using UDP if=%d port=%d",
2056 					my_if, pend->pc->number);
2057 				break;
2058 			}
2059 		}
2060 		/* try to open new port, if fails, loop to try again */
2061 		log_assert(pif->inuse < pif->maxout);
2062 		portno = pif->avail_ports[my_port - pif->inuse];
2063 #else
2064 		my_port = portno = 0;
2065 #endif
2066 		fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen,
2067 			portno, &inuse, outnet->rnd, outnet->ip_dscp);
2068 		if(fd == -1 && !inuse) {
2069 			/* nonrecoverable error making socket */
2070 			return 0;
2071 		}
2072 		if(fd != -1) {
2073 			verbose(VERB_ALGO, "opened UDP if=%d port=%d",
2074 				my_if, portno);
2075 			if(outnet->udp_connect) {
2076 				/* connect() to the destination */
2077 				if(connect(fd, (struct sockaddr*)&pend->addr,
2078 					pend->addrlen) < 0) {
2079 					if(udp_connect_needs_log(errno)) {
2080 						log_err_addr("udp connect failed",
2081 							strerror(errno), &pend->addr,
2082 							pend->addrlen);
2083 					}
2084 					sock_close(fd);
2085 					return 0;
2086 				}
2087 			}
2088 			/* grab fd */
2089 			pend->pc = outnet->unused_fds;
2090 			outnet->unused_fds = pend->pc->next;
2091 
2092 			/* setup portcomm */
2093 			pend->pc->next = NULL;
2094 			pend->pc->number = portno;
2095 			pend->pc->pif = pif;
2096 			pend->pc->index = pif->inuse;
2097 			pend->pc->num_outstanding = 0;
2098 			comm_point_start_listening(pend->pc->cp, fd, -1);
2099 
2100 			/* grab port in interface */
2101 			pif->out[pif->inuse] = pend->pc;
2102 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2103 			pif->avail_ports[my_port - pif->inuse] =
2104 				pif->avail_ports[pif->avail_total-pif->inuse-1];
2105 #endif
2106 			pif->inuse++;
2107 			break;
2108 		}
2109 		/* failed, already in use */
2110 		verbose(VERB_QUERY, "port %d in use, trying another", portno);
2111 		tries++;
2112 		if(tries == MAX_PORT_RETRY) {
2113 			log_err("failed to find an open port, drop msg");
2114 			return 0;
2115 		}
2116 	}
2117 	log_assert(pend->pc);
2118 	pend->pc->num_outstanding++;
2119 
2120 	return 1;
2121 }
2122 
2123 static int
2124 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
2125 {
2126 	struct timeval tv;
2127 	struct outside_network* outnet = pend->sq->outnet;
2128 
2129 	/* select id */
2130 	if(!select_id(outnet, pend, packet)) {
2131 		return 0;
2132 	}
2133 
2134 	/* select src_if, port */
2135 	if(addr_is_ip6(&pend->addr, pend->addrlen)) {
2136 		if(!select_ifport(outnet, pend,
2137 			outnet->num_ip6, outnet->ip6_ifs))
2138 			return 0;
2139 	} else {
2140 		if(!select_ifport(outnet, pend,
2141 			outnet->num_ip4, outnet->ip4_ifs))
2142 			return 0;
2143 	}
2144 	log_assert(pend->pc && pend->pc->cp);
2145 
2146 	/* send it over the commlink */
2147 	if(!comm_point_send_udp_msg(pend->pc->cp, packet,
2148 		(struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) {
2149 		portcomm_loweruse(outnet, pend->pc);
2150 		return 0;
2151 	}
2152 	outnet->num_udp_outgoing++;
2153 
2154 	/* system calls to set timeout after sending UDP to make roundtrip
2155 	   smaller. */
2156 #ifndef S_SPLINT_S
2157 	tv.tv_sec = timeout/1000;
2158 	tv.tv_usec = (timeout%1000)*1000;
2159 #endif
2160 	comm_timer_set(pend->timer, &tv);
2161 
2162 #ifdef USE_DNSTAP
2163 	/*
2164 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
2165 	 * There are no chances to get the src (local service) addr if unbound
2166 	 * is not configured with specific outgoing IP-addresses. So we will
2167 	 * pass 0.0.0.0 (::) to argument for
2168 	 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls.
2169 	 */
2170 	if(outnet->dtenv &&
2171 	   (outnet->dtenv->log_resolver_query_messages ||
2172 		outnet->dtenv->log_forwarder_query_messages)) {
2173 			log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen);
2174 			log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen);
2175 			dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp,
2176 				pend->sq->zone, pend->sq->zonelen, packet);
2177 	}
2178 #endif
2179 	return 1;
2180 }
2181 
2182 struct pending*
2183 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
2184 	int timeout, comm_point_callback_type* cb, void* cb_arg)
2185 {
2186 	struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
2187 	if(!pend) return NULL;
2188 	pend->outnet = sq->outnet;
2189 	pend->sq = sq;
2190 	pend->addrlen = sq->addrlen;
2191 	memmove(&pend->addr, &sq->addr, sq->addrlen);
2192 	pend->cb = cb;
2193 	pend->cb_arg = cb_arg;
2194 	pend->node.key = pend;
2195 	pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb,
2196 		pend);
2197 	if(!pend->timer) {
2198 		free(pend);
2199 		return NULL;
2200 	}
2201 
2202 	if(sq->outnet->unused_fds == NULL) {
2203 		/* no unused fd, cannot create a new port (randomly) */
2204 		verbose(VERB_ALGO, "no fds available, udp query waiting");
2205 		pend->timeout = timeout;
2206 		pend->pkt_len = sldns_buffer_limit(packet);
2207 		pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet),
2208 			pend->pkt_len);
2209 		if(!pend->pkt) {
2210 			comm_timer_delete(pend->timer);
2211 			free(pend);
2212 			return NULL;
2213 		}
2214 		/* put at end of waiting list */
2215 		if(sq->outnet->udp_wait_last)
2216 			sq->outnet->udp_wait_last->next_waiting = pend;
2217 		else
2218 			sq->outnet->udp_wait_first = pend;
2219 		sq->outnet->udp_wait_last = pend;
2220 		return pend;
2221 	}
2222 	log_assert(!sq->busy);
2223 	sq->busy = 1;
2224 	if(!randomize_and_send_udp(pend, packet, timeout)) {
2225 		pending_delete(sq->outnet, pend);
2226 		return NULL;
2227 	}
2228 	sq->busy = 0;
2229 	return pend;
2230 }
2231 
2232 void
2233 outnet_tcptimer(void* arg)
2234 {
2235 	struct waiting_tcp* w = (struct waiting_tcp*)arg;
2236 	struct outside_network* outnet = w->outnet;
2237 	verbose(VERB_CLIENT, "outnet_tcptimer");
2238 	if(w->on_tcp_waiting_list) {
2239 		/* it is on the waiting list */
2240 		waiting_list_remove(outnet, w);
2241 		waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL);
2242 		waiting_tcp_delete(w);
2243 	} else {
2244 		/* it was in use */
2245 		struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting;
2246 		reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT);
2247 	}
2248 	use_free_buffer(outnet);
2249 }
2250 
2251 /** close the oldest reuse_tcp connection to make a fd and struct pend
2252  * available for a new stream connection */
2253 static void
2254 reuse_tcp_close_oldest(struct outside_network* outnet)
2255 {
2256 	struct reuse_tcp* reuse;
2257 	verbose(VERB_CLIENT, "reuse_tcp_close_oldest");
2258 	reuse = reuse_tcp_lru_snip(outnet);
2259 	if(!reuse) return;
2260 	/* free up */
2261 	reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED);
2262 }
2263 
2264 static uint16_t
2265 tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse)
2266 {
2267 	if(reuse)
2268 		return reuse_tcp_select_id(reuse, outnet);
2269 	return GET_RANDOM_ID(outnet->rnd);
2270 }
2271 
2272 /** find spare ID value for reuse tcp stream.  That is random and also does
2273  * not collide with an existing query ID that is in use or waiting */
2274 uint16_t
2275 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet)
2276 {
2277 	uint16_t id = 0, curid, nextid;
2278 	const int try_random = 2000;
2279 	int i;
2280 	unsigned select, count, space;
2281 	rbnode_type* node;
2282 
2283 	/* make really sure the tree is not empty */
2284 	if(reuse->tree_by_id.count == 0) {
2285 		id = GET_RANDOM_ID(outnet->rnd);
2286 		return id;
2287 	}
2288 
2289 	/* try to find random empty spots by picking them */
2290 	for(i = 0; i<try_random; i++) {
2291 		id = GET_RANDOM_ID(outnet->rnd);
2292 		if(!reuse_tcp_by_id_find(reuse, id)) {
2293 			return id;
2294 		}
2295 	}
2296 
2297 	/* equally pick a random unused element from the tree that is
2298 	 * not in use.  Pick a the n-th index of an unused number,
2299 	 * then loop over the empty spaces in the tree and find it */
2300 	log_assert(reuse->tree_by_id.count < 0xffff);
2301 	select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count);
2302 	/* select value now in 0 .. num free - 1 */
2303 
2304 	count = 0; /* number of free spaces passed by */
2305 	node = rbtree_first(&reuse->tree_by_id);
2306 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2307 	/* see if select is before first node */
2308 	if(select < (unsigned)tree_by_id_get_id(node))
2309 		return select;
2310 	count += tree_by_id_get_id(node);
2311 	/* perhaps select is between nodes */
2312 	while(node && node != RBTREE_NULL) {
2313 		rbnode_type* next = rbtree_next(node);
2314 		if(next && next != RBTREE_NULL) {
2315 			curid = tree_by_id_get_id(node);
2316 			nextid = tree_by_id_get_id(next);
2317 			log_assert(curid < nextid);
2318 			if(curid != 0xffff && curid + 1 < nextid) {
2319 				/* space between nodes */
2320 				space = nextid - curid - 1;
2321 				log_assert(select >= count);
2322 				if(select < count + space) {
2323 					/* here it is */
2324 					return curid + 1 + (select - count);
2325 				}
2326 				count += space;
2327 			}
2328 		}
2329 		node = next;
2330 	}
2331 
2332 	/* select is after the last node */
2333 	/* count is the number of free positions before the nodes in the
2334 	 * tree */
2335 	node = rbtree_last(&reuse->tree_by_id);
2336 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2337 	curid = tree_by_id_get_id(node);
2338 	log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff);
2339 	return curid + 1 + (select - count);
2340 }
2341 
2342 struct waiting_tcp*
2343 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
2344 	int timeout, comm_point_callback_type* callback, void* callback_arg)
2345 {
2346 	struct pending_tcp* pend = sq->outnet->tcp_free;
2347 	struct reuse_tcp* reuse = NULL;
2348 	struct waiting_tcp* w;
2349 
2350 	verbose(VERB_CLIENT, "pending_tcp_query");
2351 	if(sldns_buffer_limit(packet) < sizeof(uint16_t)) {
2352 		verbose(VERB_ALGO, "pending tcp query with too short buffer < 2");
2353 		return NULL;
2354 	}
2355 
2356 	/* find out if a reused stream to the target exists */
2357 	/* if so, take it into use */
2358 	reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen,
2359 		sq->ssl_upstream);
2360 	if(reuse) {
2361 		log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse);
2362 		log_assert(reuse->pending);
2363 		pend = reuse->pending;
2364 		reuse_tcp_lru_touch(sq->outnet, reuse);
2365 	}
2366 
2367 	log_assert(!reuse || (reuse && pend));
2368 	/* if !pend but we have reuse streams, close a reuse stream
2369 	 * to be able to open a new one to this target, no use waiting
2370 	 * to reuse a file descriptor while another query needs to use
2371 	 * that buffer and file descriptor now. */
2372 	if(!pend) {
2373 		reuse_tcp_close_oldest(sq->outnet);
2374 		pend = sq->outnet->tcp_free;
2375 		log_assert(!reuse || (pend == reuse->pending));
2376 	}
2377 
2378 	/* allocate space to store query */
2379 	w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp)
2380 		+ sldns_buffer_limit(packet));
2381 	if(!w) {
2382 		return NULL;
2383 	}
2384 	if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) {
2385 		free(w);
2386 		return NULL;
2387 	}
2388 	w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
2389 	w->pkt_len = sldns_buffer_limit(packet);
2390 	memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len);
2391 	w->id = tcp_select_id(sq->outnet, reuse);
2392 	LDNS_ID_SET(w->pkt, w->id);
2393 	memcpy(&w->addr, &sq->addr, sq->addrlen);
2394 	w->addrlen = sq->addrlen;
2395 	w->outnet = sq->outnet;
2396 	w->on_tcp_waiting_list = 0;
2397 	w->next_waiting = NULL;
2398 	w->cb = callback;
2399 	w->cb_arg = callback_arg;
2400 	w->ssl_upstream = sq->ssl_upstream;
2401 	w->tls_auth_name = sq->tls_auth_name;
2402 	w->timeout = timeout;
2403 	w->id_node.key = NULL;
2404 	w->write_wait_prev = NULL;
2405 	w->write_wait_next = NULL;
2406 	w->write_wait_queued = 0;
2407 	w->error_count = 0;
2408 #ifdef USE_DNSTAP
2409 	w->sq = NULL;
2410 #endif
2411 	w->in_cb_and_decommission = 0;
2412 	if(pend) {
2413 		/* we have a buffer available right now */
2414 		if(reuse) {
2415 			log_assert(reuse == &pend->reuse);
2416 			/* reuse existing fd, write query and continue */
2417 			/* store query in tree by id */
2418 			verbose(VERB_CLIENT, "pending_tcp_query: reuse, store");
2419 			w->next_waiting = (void*)pend;
2420 			reuse_tree_by_id_insert(&pend->reuse, w);
2421 			/* can we write right now? */
2422 			if(pend->query == NULL) {
2423 				/* write straight away */
2424 				/* stop the timer on read of the fd */
2425 				comm_point_stop_listening(pend->c);
2426 				pend->query = w;
2427 				outnet_tcp_take_query_setup(pend->c->fd, pend,
2428 					w);
2429 			} else {
2430 				/* put it in the waiting list for
2431 				 * this stream */
2432 				reuse_write_wait_push_back(&pend->reuse, w);
2433 			}
2434 		} else {
2435 			/* create new fd and connect to addr, setup to
2436 			 * write query */
2437 			verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect");
2438 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
2439 			pend->reuse.pending = pend;
2440 			memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen);
2441 			pend->reuse.addrlen = sq->addrlen;
2442 			if(!outnet_tcp_take_into_use(w)) {
2443 				waiting_tcp_delete(w);
2444 				return NULL;
2445 			}
2446 		}
2447 #ifdef USE_DNSTAP
2448 		if(sq->outnet->dtenv &&
2449 		   (sq->outnet->dtenv->log_resolver_query_messages ||
2450 		    sq->outnet->dtenv->log_forwarder_query_messages)) {
2451 			/* use w->pkt, because it has the ID value */
2452 			sldns_buffer tmp;
2453 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
2454 			dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr,
2455 				&pend->pi->addr, comm_tcp, sq->zone,
2456 				sq->zonelen, &tmp);
2457 		}
2458 #endif
2459 	} else {
2460 		/* queue up */
2461 		/* waiting for a buffer on the outside network buffer wait
2462 		 * list */
2463 		verbose(VERB_CLIENT, "pending_tcp_query: queue to wait");
2464 #ifdef USE_DNSTAP
2465 		w->sq = sq;
2466 #endif
2467 		outnet_add_tcp_waiting(sq->outnet, w);
2468 	}
2469 	return w;
2470 }
2471 
2472 /** create query for serviced queries */
2473 static void
2474 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen,
2475 	uint16_t qtype, uint16_t qclass, uint16_t flags)
2476 {
2477 	sldns_buffer_clear(buff);
2478 	/* skip id */
2479 	sldns_buffer_write_u16(buff, flags);
2480 	sldns_buffer_write_u16(buff, 1); /* qdcount */
2481 	sldns_buffer_write_u16(buff, 0); /* ancount */
2482 	sldns_buffer_write_u16(buff, 0); /* nscount */
2483 	sldns_buffer_write_u16(buff, 0); /* arcount */
2484 	sldns_buffer_write(buff, qname, qnamelen);
2485 	sldns_buffer_write_u16(buff, qtype);
2486 	sldns_buffer_write_u16(buff, qclass);
2487 	sldns_buffer_flip(buff);
2488 }
2489 
2490 /** lookup serviced query in serviced query rbtree */
2491 static struct serviced_query*
2492 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2493 	struct sockaddr_storage* addr, socklen_t addrlen,
2494 	struct edns_option* opt_list)
2495 {
2496 	struct serviced_query key;
2497 	key.node.key = &key;
2498 	key.qbuf = sldns_buffer_begin(buff);
2499 	key.qbuflen = sldns_buffer_limit(buff);
2500 	key.dnssec = dnssec;
2501 	memcpy(&key.addr, addr, addrlen);
2502 	key.addrlen = addrlen;
2503 	key.outnet = outnet;
2504 	key.opt_list = opt_list;
2505 	return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
2506 }
2507 
2508 void
2509 serviced_timer_cb(void* arg)
2510 {
2511 	struct serviced_query* sq = (struct serviced_query*)arg;
2512 	struct outside_network* outnet = sq->outnet;
2513 	verbose(VERB_ALGO, "serviced send timer");
2514 	/* By the time this cb is called, if we don't have any registered
2515 	 * callbacks for this serviced_query anymore; do not send. */
2516 	if(!sq->cblist)
2517 		goto delete;
2518 	/* perform first network action */
2519 	if(outnet->do_udp && !(sq->tcp_upstream || sq->ssl_upstream)) {
2520 		if(!serviced_udp_send(sq, outnet->udp_buff))
2521 			goto delete;
2522 	} else {
2523 		if(!serviced_tcp_send(sq, outnet->udp_buff))
2524 			goto delete;
2525 	}
2526 	/* Maybe by this time we don't have callbacks attached anymore. Don't
2527 	 * proactively try to delete; let it run and maybe another callback
2528 	 * will get attached by the time we get an answer. */
2529 	return;
2530 delete:
2531 	serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
2532 }
2533 
2534 /** Create new serviced entry */
2535 static struct serviced_query*
2536 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2537 	int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
2538 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
2539 	uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list,
2540 	size_t pad_queries_block_size, struct alloc_cache* alloc,
2541 	struct regional* region)
2542 {
2543 	struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
2544 	struct timeval t;
2545 #ifdef UNBOUND_DEBUG
2546 	rbnode_type* ins;
2547 #endif
2548 	if(!sq)
2549 		return NULL;
2550 	sq->node.key = sq;
2551 	sq->alloc = alloc;
2552 	sq->region = region;
2553 	sq->qbuf = regional_alloc_init(region, sldns_buffer_begin(buff),
2554 		sldns_buffer_limit(buff));
2555 	if(!sq->qbuf) {
2556 		alloc_reg_release(alloc, region);
2557 		free(sq);
2558 		return NULL;
2559 	}
2560 	sq->qbuflen = sldns_buffer_limit(buff);
2561 	sq->zone = regional_alloc_init(region, zone, zonelen);
2562 	if(!sq->zone) {
2563 		alloc_reg_release(alloc, region);
2564 		free(sq);
2565 		return NULL;
2566 	}
2567 	sq->zonelen = zonelen;
2568 	sq->qtype = qtype;
2569 	sq->dnssec = dnssec;
2570 	sq->want_dnssec = want_dnssec;
2571 	sq->nocaps = nocaps;
2572 	sq->tcp_upstream = tcp_upstream;
2573 	sq->ssl_upstream = ssl_upstream;
2574 	if(tls_auth_name) {
2575 		sq->tls_auth_name = regional_strdup(region, tls_auth_name);
2576 		if(!sq->tls_auth_name) {
2577 			alloc_reg_release(alloc, region);
2578 			free(sq);
2579 			return NULL;
2580 		}
2581 	} else {
2582 		sq->tls_auth_name = NULL;
2583 	}
2584 	memcpy(&sq->addr, addr, addrlen);
2585 	sq->addrlen = addrlen;
2586 	sq->opt_list = opt_list;
2587 	sq->busy = 0;
2588 	sq->timer = comm_timer_create(outnet->base, serviced_timer_cb, sq);
2589 	if(!sq->timer) {
2590 		alloc_reg_release(alloc, region);
2591 		free(sq);
2592 		return NULL;
2593 	}
2594 	memset(&t, 0, sizeof(t));
2595 	comm_timer_set(sq->timer, &t);
2596 	sq->outnet = outnet;
2597 	sq->cblist = NULL;
2598 	sq->pending = NULL;
2599 	sq->status = serviced_initial;
2600 	sq->retry = 0;
2601 	sq->to_be_deleted = 0;
2602 	sq->padding_block_size = pad_queries_block_size;
2603 #ifdef UNBOUND_DEBUG
2604 	ins =
2605 #else
2606 	(void)
2607 #endif
2608 	rbtree_insert(outnet->serviced, &sq->node);
2609 	log_assert(ins != NULL); /* must not be already present */
2610 	return sq;
2611 }
2612 
2613 /** remove waiting tcp from the outnet waiting list */
2614 static void
2615 waiting_list_remove(struct outside_network* outnet, struct waiting_tcp* w)
2616 {
2617 	struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL;
2618 	w->on_tcp_waiting_list = 0;
2619 	while(p) {
2620 		if(p == w) {
2621 			/* remove w */
2622 			if(prev)
2623 				prev->next_waiting = w->next_waiting;
2624 			else	outnet->tcp_wait_first = w->next_waiting;
2625 			if(outnet->tcp_wait_last == w)
2626 				outnet->tcp_wait_last = prev;
2627 			return;
2628 		}
2629 		prev = p;
2630 		p = p->next_waiting;
2631 	}
2632 	/* waiting_list_remove is currently called only with items that are
2633 	 * already in the waiting list. */
2634 	log_assert(0);
2635 }
2636 
2637 /** reuse tcp stream, remove serviced query from stream,
2638  * return true if the stream is kept, false if it is to be closed */
2639 static int
2640 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w,
2641 	struct serviced_query* sq)
2642 {
2643 	struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting;
2644 	verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep");
2645 	/* remove the callback. let query continue to write to not cancel
2646 	 * the stream itself.  also keep it as an entry in the tree_by_id,
2647 	 * in case the answer returns (that we no longer want), but we cannot
2648 	 * pick the same ID number meanwhile */
2649 	w->cb = NULL;
2650 	/* see if can be entered in reuse tree
2651 	 * for that the FD has to be non-1 */
2652 	if(pend_tcp->c->fd == -1) {
2653 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd");
2654 		return 0;
2655 	}
2656 	/* if in tree and used by other queries */
2657 	if(pend_tcp->reuse.node.key) {
2658 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries");
2659 		/* do not reset the keepalive timer, for that
2660 		 * we'd need traffic, and this is where the serviced is
2661 		 * removed due to state machine internal reasons,
2662 		 * eg. iterator no longer interested in this query */
2663 		return 1;
2664 	}
2665 	/* if still open and want to keep it open */
2666 	if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count <
2667 		sq->outnet->tcp_reuse_max) {
2668 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open");
2669 		/* set a keepalive timer on it */
2670 		if(!reuse_tcp_insert(sq->outnet, pend_tcp)) {
2671 			return 0;
2672 		}
2673 		reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout);
2674 		return 1;
2675 	}
2676 	return 0;
2677 }
2678 
2679 /** cleanup serviced query entry */
2680 static void
2681 serviced_delete(struct serviced_query* sq)
2682 {
2683 	verbose(VERB_CLIENT, "serviced_delete");
2684 	if(sq->pending) {
2685 		/* clear up the pending query */
2686 		if(sq->status == serviced_query_UDP_EDNS ||
2687 			sq->status == serviced_query_UDP ||
2688 			sq->status == serviced_query_UDP_EDNS_FRAG ||
2689 			sq->status == serviced_query_UDP_EDNS_fallback) {
2690 			struct pending* p = (struct pending*)sq->pending;
2691 			verbose(VERB_CLIENT, "serviced_delete: UDP");
2692 			if(p->pc)
2693 				portcomm_loweruse(sq->outnet, p->pc);
2694 			pending_delete(sq->outnet, p);
2695 			/* this call can cause reentrant calls back into the
2696 			 * mesh */
2697 			outnet_send_wait_udp(sq->outnet);
2698 		} else {
2699 			struct waiting_tcp* w = (struct waiting_tcp*)
2700 				sq->pending;
2701 			verbose(VERB_CLIENT, "serviced_delete: TCP");
2702 			log_assert(!(w->write_wait_queued && w->on_tcp_waiting_list));
2703 			/* if on stream-write-waiting list then
2704 			 * remove from waiting list and waiting_tcp_delete */
2705 			if(w->write_wait_queued) {
2706 				struct pending_tcp* pend =
2707 					(struct pending_tcp*)w->next_waiting;
2708 				verbose(VERB_CLIENT, "serviced_delete: writewait");
2709 				if(!w->in_cb_and_decommission)
2710 					reuse_tree_by_id_delete(&pend->reuse, w);
2711 				reuse_write_wait_remove(&pend->reuse, w);
2712 				if(!w->in_cb_and_decommission)
2713 					waiting_tcp_delete(w);
2714 			} else if(!w->on_tcp_waiting_list) {
2715 				struct pending_tcp* pend =
2716 					(struct pending_tcp*)w->next_waiting;
2717 				verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep");
2718 				/* w needs to stay on tree_by_id to not assign
2719 				 * the same ID; remove the callback since its
2720 				 * serviced_query will be gone. */
2721 				w->cb = NULL;
2722 				if(!reuse_tcp_remove_serviced_keep(w, sq)) {
2723 					if(!w->in_cb_and_decommission)
2724 						reuse_cb_and_decommission(sq->outnet,
2725 							pend, NETEVENT_CLOSED);
2726 					use_free_buffer(sq->outnet);
2727 				}
2728 				sq->pending = NULL;
2729 			} else {
2730 				verbose(VERB_CLIENT, "serviced_delete: tcpwait");
2731 				waiting_list_remove(sq->outnet, w);
2732 				if(!w->in_cb_and_decommission)
2733 					waiting_tcp_delete(w);
2734 			}
2735 		}
2736 	}
2737 	/* does not delete from tree, caller has to do that */
2738 	serviced_node_del(&sq->node, NULL);
2739 }
2740 
2741 /** perturb a dname capitalization randomly */
2742 static void
2743 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len)
2744 {
2745 	uint8_t lablen;
2746 	uint8_t* d = qbuf + 10;
2747 	long int random = 0;
2748 	int bits = 0;
2749 	log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */);
2750 	(void)len;
2751 	lablen = *d++;
2752 	while(lablen) {
2753 		while(lablen--) {
2754 			/* only perturb A-Z, a-z */
2755 			if(isalpha((unsigned char)*d)) {
2756 				/* get a random bit */
2757 				if(bits == 0) {
2758 					random = ub_random(rnd);
2759 					bits = 30;
2760 				}
2761 				if(random & 0x1) {
2762 					*d = (uint8_t)toupper((unsigned char)*d);
2763 				} else {
2764 					*d = (uint8_t)tolower((unsigned char)*d);
2765 				}
2766 				random >>= 1;
2767 				bits--;
2768 			}
2769 			d++;
2770 		}
2771 		lablen = *d++;
2772 	}
2773 	if(verbosity >= VERB_ALGO) {
2774 		char buf[LDNS_MAX_DOMAINLEN+1];
2775 		dname_str(qbuf+10, buf);
2776 		verbose(VERB_ALGO, "qname perturbed to %s", buf);
2777 	}
2778 }
2779 
2780 /** put serviced query into a buffer */
2781 static void
2782 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns)
2783 {
2784 	/* if we are using 0x20 bits for ID randomness, perturb them */
2785 	if(sq->outnet->use_caps_for_id && !sq->nocaps) {
2786 		serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen);
2787 	}
2788 	/* generate query */
2789 	sldns_buffer_clear(buff);
2790 	sldns_buffer_write_u16(buff, 0); /* id placeholder */
2791 	sldns_buffer_write(buff, sq->qbuf, sq->qbuflen);
2792 	sldns_buffer_flip(buff);
2793 	if(with_edns) {
2794 		/* add edns section */
2795 		struct edns_data edns;
2796 		struct edns_option padding_option;
2797 		edns.edns_present = 1;
2798 		edns.ext_rcode = 0;
2799 		edns.edns_version = EDNS_ADVERTISED_VERSION;
2800 		edns.opt_list_in = NULL;
2801 		edns.opt_list_out = sq->opt_list;
2802 		edns.opt_list_inplace_cb_out = NULL;
2803 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
2804 			if(addr_is_ip6(&sq->addr, sq->addrlen)) {
2805 				if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE)
2806 					edns.udp_size = EDNS_FRAG_SIZE_IP6;
2807 				else	edns.udp_size = EDNS_ADVERTISED_SIZE;
2808 			} else {
2809 				if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE)
2810 					edns.udp_size = EDNS_FRAG_SIZE_IP4;
2811 				else	edns.udp_size = EDNS_ADVERTISED_SIZE;
2812 			}
2813 		} else {
2814 			edns.udp_size = EDNS_ADVERTISED_SIZE;
2815 		}
2816 		edns.bits = 0;
2817 		if(sq->dnssec & EDNS_DO)
2818 			edns.bits = EDNS_DO;
2819 		if(sq->dnssec & BIT_CD)
2820 			LDNS_CD_SET(sldns_buffer_begin(buff));
2821 		if (sq->ssl_upstream && sq->padding_block_size) {
2822 			padding_option.opt_code = LDNS_EDNS_PADDING;
2823 			padding_option.opt_len = 0;
2824 			padding_option.opt_data = NULL;
2825 			padding_option.next = edns.opt_list_out;
2826 			edns.opt_list_out = &padding_option;
2827 			edns.padding_block_size = sq->padding_block_size;
2828 		}
2829 		attach_edns_record(buff, &edns);
2830 	}
2831 }
2832 
2833 /**
2834  * Perform serviced query UDP sending operation.
2835  * Sends UDP with EDNS, unless infra host marked non EDNS.
2836  * @param sq: query to send.
2837  * @param buff: buffer scratch space.
2838  * @return 0 on error.
2839  */
2840 static int
2841 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff)
2842 {
2843 	int rtt, vs;
2844 	uint8_t edns_lame_known;
2845 	time_t now = *sq->outnet->now_secs;
2846 
2847 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
2848 		sq->zonelen, now, &vs, &edns_lame_known, &rtt))
2849 		return 0;
2850 	sq->last_rtt = rtt;
2851 	verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs);
2852 	if(sq->status == serviced_initial) {
2853 		if(vs != -1) {
2854 			sq->status = serviced_query_UDP_EDNS;
2855 		} else {
2856 			sq->status = serviced_query_UDP;
2857 		}
2858 	}
2859 	serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) ||
2860 		(sq->status == serviced_query_UDP_EDNS_FRAG));
2861 	sq->last_sent_time = *sq->outnet->now_tv;
2862 	sq->edns_lame_known = (int)edns_lame_known;
2863 	verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt);
2864 	sq->pending = pending_udp_query(sq, buff, rtt,
2865 		serviced_udp_callback, sq);
2866 	if(!sq->pending)
2867 		return 0;
2868 	return 1;
2869 }
2870 
2871 /** check that perturbed qname is identical */
2872 static int
2873 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen)
2874 {
2875 	uint8_t* d1 = sldns_buffer_begin(pkt)+12;
2876 	uint8_t* d2 = qbuf+10;
2877 	uint8_t len1, len2;
2878 	int count = 0;
2879 	if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */
2880 		return 0;
2881 	log_assert(qbuflen >= 15 /* 10 header, root, type, class */);
2882 	len1 = *d1++;
2883 	len2 = *d2++;
2884 	while(len1 != 0 || len2 != 0) {
2885 		if(LABEL_IS_PTR(len1)) {
2886 			/* check if we can read *d1 with compression ptr rest */
2887 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2888 				return 0;
2889 			d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1);
2890 			/* check if we can read the destination *d1 */
2891 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2892 				return 0;
2893 			len1 = *d1++;
2894 			if(count++ > MAX_COMPRESS_PTRS)
2895 				return 0;
2896 			continue;
2897 		}
2898 		if(d2 > qbuf+qbuflen)
2899 			return 0;
2900 		if(len1 != len2)
2901 			return 0;
2902 		if(len1 > LDNS_MAX_LABELLEN)
2903 			return 0;
2904 		/* check len1 + 1(next length) are okay to read */
2905 		if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2906 			return 0;
2907 		log_assert(len1 <= LDNS_MAX_LABELLEN);
2908 		log_assert(len2 <= LDNS_MAX_LABELLEN);
2909 		log_assert(len1 == len2 && len1 != 0);
2910 		/* compare the labels - bitwise identical */
2911 		if(memcmp(d1, d2, len1) != 0)
2912 			return 0;
2913 		d1 += len1;
2914 		d2 += len2;
2915 		len1 = *d1++;
2916 		len2 = *d2++;
2917 	}
2918 	return 1;
2919 }
2920 
2921 /** call the callbacks for a serviced query */
2922 static void
2923 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
2924 	struct comm_reply* rep)
2925 {
2926 	struct service_callback* p;
2927 	int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/
2928 	uint8_t *backup_p = NULL;
2929 	size_t backlen = 0;
2930 #ifdef UNBOUND_DEBUG
2931 	rbnode_type* rem =
2932 #else
2933 	(void)
2934 #endif
2935 	/* remove from tree, and schedule for deletion, so that callbacks
2936 	 * can safely deregister themselves and even create new serviced
2937 	 * queries that are identical to this one. */
2938 	rbtree_delete(sq->outnet->serviced, sq);
2939 	log_assert(rem); /* should have been present */
2940 	sq->to_be_deleted = 1;
2941 	verbose(VERB_ALGO, "svcd callbacks start");
2942 	if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c &&
2943 		!sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
2944 		/* for type PTR do not check perturbed name in answer,
2945 		 * compatibility with cisco dns guard boxes that mess up
2946 		 * reverse queries 0x20 contents */
2947 		/* noerror and nxdomain must have a qname in reply */
2948 		if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
2949 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
2950 				== LDNS_RCODE_NOERROR ||
2951 			 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
2952 				== LDNS_RCODE_NXDOMAIN)) {
2953 			verbose(VERB_DETAIL, "no qname in reply to check 0x20ID");
2954 			log_addr(VERB_DETAIL, "from server",
2955 				&sq->addr, sq->addrlen);
2956 			log_buf(VERB_DETAIL, "for packet", c->buffer);
2957 			error = NETEVENT_CLOSED;
2958 			c = NULL;
2959 		} else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 &&
2960 			!serviced_check_qname(c->buffer, sq->qbuf,
2961 			sq->qbuflen)) {
2962 			verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname");
2963 			log_addr(VERB_DETAIL, "from server",
2964 				&sq->addr, sq->addrlen);
2965 			log_buf(VERB_DETAIL, "for packet", c->buffer);
2966 			error = NETEVENT_CAPSFAIL;
2967 			/* and cleanup too */
2968 			pkt_dname_tolower(c->buffer,
2969 				sldns_buffer_at(c->buffer, 12));
2970 		} else {
2971 			verbose(VERB_ALGO, "good 0x20-ID in reply qname");
2972 			/* cleanup caps, prettier cache contents. */
2973 			pkt_dname_tolower(c->buffer,
2974 				sldns_buffer_at(c->buffer, 12));
2975 		}
2976 	}
2977 	if(dobackup && c) {
2978 		/* make a backup of the query, since the querystate processing
2979 		 * may send outgoing queries that overwrite the buffer.
2980 		 * use secondary buffer to store the query.
2981 		 * This is a data copy, but faster than packet to server */
2982 		backlen = sldns_buffer_limit(c->buffer);
2983 		backup_p = regional_alloc_init(sq->region,
2984 			sldns_buffer_begin(c->buffer), backlen);
2985 		if(!backup_p) {
2986 			log_err("malloc failure in serviced query callbacks");
2987 			error = NETEVENT_CLOSED;
2988 			c = NULL;
2989 		}
2990 		sq->outnet->svcd_overhead = backlen;
2991 	}
2992 	/* test the actual sq->cblist, because the next elem could be deleted*/
2993 	while((p=sq->cblist) != NULL) {
2994 		sq->cblist = p->next; /* remove this element */
2995 		if(dobackup && c) {
2996 			sldns_buffer_clear(c->buffer);
2997 			sldns_buffer_write(c->buffer, backup_p, backlen);
2998 			sldns_buffer_flip(c->buffer);
2999 		}
3000 		fptr_ok(fptr_whitelist_serviced_query(p->cb));
3001 		(void)(*p->cb)(c, p->cb_arg, error, rep);
3002 	}
3003 	if(backup_p) {
3004 		sq->outnet->svcd_overhead = 0;
3005 	}
3006 	verbose(VERB_ALGO, "svcd callbacks end");
3007 	log_assert(sq->cblist == NULL);
3008 	serviced_delete(sq);
3009 }
3010 
3011 int
3012 serviced_tcp_callback(struct comm_point* c, void* arg, int error,
3013         struct comm_reply* rep)
3014 {
3015 	struct serviced_query* sq = (struct serviced_query*)arg;
3016 	struct comm_reply r2;
3017 #ifdef USE_DNSTAP
3018 	struct waiting_tcp* w = (struct waiting_tcp*)sq->pending;
3019 	struct pending_tcp* pend_tcp = NULL;
3020 	struct port_if* pi = NULL;
3021 	if(w && !w->on_tcp_waiting_list && w->next_waiting) {
3022 		pend_tcp = (struct pending_tcp*)w->next_waiting;
3023 		pi = pend_tcp->pi;
3024 	}
3025 #endif
3026 	sq->pending = NULL; /* removed after this callback */
3027 	if(error != NETEVENT_NOERROR)
3028 		log_addr(VERB_QUERY, "tcp error for address",
3029 			&sq->addr, sq->addrlen);
3030 	if(error==NETEVENT_NOERROR)
3031 		infra_update_tcp_works(sq->outnet->infra, &sq->addr,
3032 			sq->addrlen, sq->zone, sq->zonelen);
3033 #ifdef USE_DNSTAP
3034 	/*
3035 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3036 	 */
3037 	if(error==NETEVENT_NOERROR && pi && sq->outnet->dtenv &&
3038 	   (sq->outnet->dtenv->log_resolver_response_messages ||
3039 	    sq->outnet->dtenv->log_forwarder_response_messages)) {
3040 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3041 		log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
3042 		dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr,
3043 			&pi->addr, c->type, sq->zone, sq->zonelen, sq->qbuf,
3044 			sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv,
3045 			c->buffer);
3046 	}
3047 #endif
3048 	if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS &&
3049 		(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3050 		LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin(
3051 		c->buffer)) == LDNS_RCODE_NOTIMPL) ) {
3052 		/* attempt to fallback to nonEDNS */
3053 		sq->status = serviced_query_TCP_EDNS_fallback;
3054 		serviced_tcp_initiate(sq, c->buffer);
3055 		return 0;
3056 	} else if(error==NETEVENT_NOERROR &&
3057 		sq->status == serviced_query_TCP_EDNS_fallback &&
3058 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3059 			LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE(
3060 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN
3061 			|| LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3062 			== LDNS_RCODE_YXDOMAIN)) {
3063 		/* the fallback produced a result that looks promising, note
3064 		 * that this server should be approached without EDNS */
3065 		/* only store noEDNS in cache if domain is noDNSSEC */
3066 		if(!sq->want_dnssec)
3067 		  if(!infra_edns_update(sq->outnet->infra, &sq->addr,
3068 			sq->addrlen, sq->zone, sq->zonelen, -1,
3069 			*sq->outnet->now_secs))
3070 			log_err("Out of memory caching no edns for host");
3071 		sq->status = serviced_query_TCP;
3072 	}
3073 	if(sq->tcp_upstream || sq->ssl_upstream) {
3074 	    struct timeval now = *sq->outnet->now_tv;
3075 	    if(error!=NETEVENT_NOERROR) {
3076 	        if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3077 		    sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3078 		    -1, sq->last_rtt, (time_t)now.tv_sec))
3079 		    log_err("out of memory in TCP exponential backoff.");
3080 	    } else if(now.tv_sec > sq->last_sent_time.tv_sec ||
3081 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3082 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3083 		/* convert from microseconds to milliseconds */
3084 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3085 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3086 		verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime);
3087 		log_assert(roundtime >= 0);
3088 		/* only store if less then AUTH_TIMEOUT seconds, it could be
3089 		 * huge due to system-hibernated and we woke up */
3090 		if(roundtime < 60000) {
3091 		    if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3092 			sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3093 			roundtime, sq->last_rtt, (time_t)now.tv_sec))
3094 			log_err("out of memory noting rtt.");
3095 		}
3096 	    }
3097 	}
3098 	/* insert address into reply info */
3099 	if(!rep) {
3100 		/* create one if there isn't (on errors) */
3101 		rep = &r2;
3102 		r2.c = c;
3103 	}
3104 	memcpy(&rep->addr, &sq->addr, sq->addrlen);
3105 	rep->addrlen = sq->addrlen;
3106 	serviced_callbacks(sq, error, c, rep);
3107 	return 0;
3108 }
3109 
3110 static void
3111 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff)
3112 {
3113 	verbose(VERB_ALGO, "initiate TCP query %s",
3114 		sq->status==serviced_query_TCP_EDNS?"EDNS":"");
3115 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3116 	sq->last_sent_time = *sq->outnet->now_tv;
3117 	log_assert(!sq->busy);
3118 	sq->busy = 1;
3119 	sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout,
3120 		serviced_tcp_callback, sq);
3121 	sq->busy = 0;
3122 	if(!sq->pending) {
3123 		/* delete from tree so that a retry by above layer does not
3124 		 * clash with this entry */
3125 		verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query");
3126 		serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
3127 	}
3128 }
3129 
3130 /** Send serviced query over TCP return false on initial failure */
3131 static int
3132 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff)
3133 {
3134 	int vs, rtt, timeout;
3135 	uint8_t edns_lame_known;
3136 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
3137 		sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known,
3138 		&rtt))
3139 		return 0;
3140 	sq->last_rtt = rtt;
3141 	if(vs != -1)
3142 		sq->status = serviced_query_TCP_EDNS;
3143 	else 	sq->status = serviced_query_TCP;
3144 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3145 	sq->last_sent_time = *sq->outnet->now_tv;
3146 	if(sq->tcp_upstream || sq->ssl_upstream) {
3147 		timeout = rtt;
3148 		if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout)
3149 			timeout = sq->outnet->tcp_auth_query_timeout;
3150 	} else {
3151 		timeout = sq->outnet->tcp_auth_query_timeout;
3152 	}
3153 	log_assert(!sq->busy);
3154 	sq->busy = 1;
3155 	sq->pending = pending_tcp_query(sq, buff, timeout,
3156 		serviced_tcp_callback, sq);
3157 	sq->busy = 0;
3158 	return sq->pending != NULL;
3159 }
3160 
3161 /* see if packet is edns malformed; got zeroes at start.
3162  * This is from servers that return malformed packets to EDNS0 queries,
3163  * but they return good packets for nonEDNS0 queries.
3164  * We try to detect their output; without resorting to a full parse or
3165  * check for too many bytes after the end of the packet. */
3166 static int
3167 packet_edns_malformed(struct sldns_buffer* buf, int qtype)
3168 {
3169 	size_t len;
3170 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE)
3171 		return 1; /* malformed */
3172 	/* they have NOERROR rcode, 1 answer. */
3173 	if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR)
3174 		return 0;
3175 	/* one query (to skip) and answer records */
3176 	if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 ||
3177 		LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0)
3178 		return 0;
3179 	/* skip qname */
3180 	len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE),
3181 		sldns_buffer_limit(buf)-LDNS_HEADER_SIZE);
3182 	if(len == 0)
3183 		return 0;
3184 	if(len == 1 && qtype == 0)
3185 		return 0; /* we asked for '.' and type 0 */
3186 	/* and then 4 bytes (type and class of query) */
3187 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3)
3188 		return 0;
3189 
3190 	/* and start with 11 zeroes as the answer RR */
3191 	/* so check the qtype of the answer record, qname=0, type=0 */
3192 	if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 &&
3193 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 &&
3194 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0)
3195 		return 1;
3196 	return 0;
3197 }
3198 
3199 int
3200 serviced_udp_callback(struct comm_point* c, void* arg, int error,
3201         struct comm_reply* rep)
3202 {
3203 	struct serviced_query* sq = (struct serviced_query*)arg;
3204 	struct outside_network* outnet = sq->outnet;
3205 	struct timeval now = *sq->outnet->now_tv;
3206 #ifdef USE_DNSTAP
3207 	struct pending* p = (struct pending*)sq->pending;
3208 #endif
3209 
3210 	sq->pending = NULL; /* removed after callback */
3211 	if(error == NETEVENT_TIMEOUT) {
3212 		if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) {
3213 			/* fallback to 1480/1280 */
3214 			sq->status = serviced_query_UDP_EDNS_FRAG;
3215 			log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10,
3216 				&sq->addr, sq->addrlen);
3217 			if(!serviced_udp_send(sq, c->buffer)) {
3218 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3219 			}
3220 			return 0;
3221 		}
3222 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
3223 			/* fragmentation size did not fix it */
3224 			sq->status = serviced_query_UDP_EDNS;
3225 		}
3226 		sq->retry++;
3227 		if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3228 			sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
3229 			(time_t)now.tv_sec))
3230 			log_err("out of memory in UDP exponential backoff");
3231 		if(sq->retry < OUTBOUND_UDP_RETRY) {
3232 			log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
3233 				&sq->addr, sq->addrlen);
3234 			if(!serviced_udp_send(sq, c->buffer)) {
3235 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3236 			}
3237 			return 0;
3238 		}
3239 	}
3240 	if(error != NETEVENT_NOERROR) {
3241 		/* udp returns error (due to no ID or interface available) */
3242 		serviced_callbacks(sq, error, c, rep);
3243 		return 0;
3244 	}
3245 #ifdef USE_DNSTAP
3246 	/*
3247 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3248 	 */
3249 	if(error == NETEVENT_NOERROR && outnet->dtenv && p->pc &&
3250 		(outnet->dtenv->log_resolver_response_messages ||
3251 		outnet->dtenv->log_forwarder_response_messages)) {
3252 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3253 		log_addr(VERB_ALGO, "to local addr", &p->pc->pif->addr,
3254 			p->pc->pif->addrlen);
3255 		dt_msg_send_outside_response(outnet->dtenv, &sq->addr,
3256 			&p->pc->pif->addr, c->type, sq->zone, sq->zonelen,
3257 			sq->qbuf, sq->qbuflen, &sq->last_sent_time,
3258 			sq->outnet->now_tv, c->buffer);
3259 	}
3260 #endif
3261 	if( (sq->status == serviced_query_UDP_EDNS
3262 		||sq->status == serviced_query_UDP_EDNS_FRAG)
3263 		&& (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3264 			== LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(
3265 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL
3266 		    || packet_edns_malformed(c->buffer, sq->qtype)
3267 			)) {
3268 		/* try to get an answer by falling back without EDNS */
3269 		verbose(VERB_ALGO, "serviced query: attempt without EDNS");
3270 		sq->status = serviced_query_UDP_EDNS_fallback;
3271 		sq->retry = 0;
3272 		if(!serviced_udp_send(sq, c->buffer)) {
3273 			serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3274 		}
3275 		return 0;
3276 	} else if(sq->status == serviced_query_UDP_EDNS &&
3277 		!sq->edns_lame_known) {
3278 		/* now we know that edns queries received answers store that */
3279 		log_addr(VERB_ALGO, "serviced query: EDNS works for",
3280 			&sq->addr, sq->addrlen);
3281 		if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3282 			sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
3283 			log_err("Out of memory caching edns works");
3284 		}
3285 		sq->edns_lame_known = 1;
3286 	} else if(sq->status == serviced_query_UDP_EDNS_fallback &&
3287 		!sq->edns_lame_known && (LDNS_RCODE_WIRE(
3288 		sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR ||
3289 		LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3290 		LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin(
3291 		c->buffer)) == LDNS_RCODE_YXDOMAIN)) {
3292 		/* the fallback produced a result that looks promising, note
3293 		 * that this server should be approached without EDNS */
3294 		/* only store noEDNS in cache if domain is noDNSSEC */
3295 		if(!sq->want_dnssec) {
3296 		  log_addr(VERB_ALGO, "serviced query: EDNS fails for",
3297 			&sq->addr, sq->addrlen);
3298 		  if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3299 			sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
3300 			log_err("Out of memory caching no edns for host");
3301 		  }
3302 		} else {
3303 		  log_addr(VERB_ALGO, "serviced query: EDNS fails, but "
3304 			"not stored because need DNSSEC for", &sq->addr,
3305 			sq->addrlen);
3306 		}
3307 		sq->status = serviced_query_UDP;
3308 	}
3309 	if(now.tv_sec > sq->last_sent_time.tv_sec ||
3310 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3311 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3312 		/* convert from microseconds to milliseconds */
3313 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3314 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3315 		verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime);
3316 		log_assert(roundtime >= 0);
3317 		/* in case the system hibernated, do not enter a huge value,
3318 		 * above this value gives trouble with server selection */
3319 		if(roundtime < 60000) {
3320 		    if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3321 			sq->zone, sq->zonelen, sq->qtype, roundtime,
3322 			sq->last_rtt, (time_t)now.tv_sec))
3323 			log_err("out of memory noting rtt.");
3324 		}
3325 	}
3326 	/* perform TC flag check and TCP fallback after updating our
3327 	 * cache entries for EDNS status and RTT times */
3328 	if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) {
3329 		/* fallback to TCP */
3330 		/* this discards partial UDP contents */
3331 		if(sq->status == serviced_query_UDP_EDNS ||
3332 			sq->status == serviced_query_UDP_EDNS_FRAG ||
3333 			sq->status == serviced_query_UDP_EDNS_fallback)
3334 			/* if we have unfinished EDNS_fallback, start again */
3335 			sq->status = serviced_query_TCP_EDNS;
3336 		else	sq->status = serviced_query_TCP;
3337 		serviced_tcp_initiate(sq, c->buffer);
3338 		return 0;
3339 	}
3340 	/* yay! an answer */
3341 	serviced_callbacks(sq, error, c, rep);
3342 	return 0;
3343 }
3344 
3345 struct serviced_query*
3346 outnet_serviced_query(struct outside_network* outnet,
3347 	struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec,
3348 	int nocaps, int check_ratelimit, int tcp_upstream, int ssl_upstream,
3349 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
3350 	uint8_t* zone, size_t zonelen, struct module_qstate* qstate,
3351 	comm_point_callback_type* callback, void* callback_arg,
3352 	sldns_buffer* buff, struct module_env* env, int* was_ratelimited)
3353 {
3354 	struct serviced_query* sq;
3355 	struct service_callback* cb;
3356 	struct edns_string_addr* client_string_addr;
3357 	struct regional* region;
3358 	struct edns_option* backed_up_opt_list = qstate->edns_opts_back_out;
3359 	struct edns_option* per_upstream_opt_list = NULL;
3360 	time_t timenow = 0;
3361 
3362 	/* If we have an already populated EDNS option list make a copy since
3363 	 * we may now add upstream specific EDNS options. */
3364 	/* Use a region that could be attached to a serviced_query, if it needs
3365 	 * to be created. If an existing one is found then this region will be
3366 	 * destroyed here. */
3367 	region = alloc_reg_obtain(env->alloc);
3368 	if(!region) return NULL;
3369 	if(qstate->edns_opts_back_out) {
3370 		per_upstream_opt_list = edns_opt_copy_region(
3371 			qstate->edns_opts_back_out, region);
3372 		if(!per_upstream_opt_list) {
3373 			alloc_reg_release(env->alloc, region);
3374 			return NULL;
3375 		}
3376 		qstate->edns_opts_back_out = per_upstream_opt_list;
3377 	}
3378 
3379 	if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone,
3380 		zonelen, qstate, region)) {
3381 		alloc_reg_release(env->alloc, region);
3382 		return NULL;
3383 	}
3384 	/* Restore the option list; we can explicitly use the copied one from
3385 	 * now on. */
3386 	per_upstream_opt_list = qstate->edns_opts_back_out;
3387 	qstate->edns_opts_back_out = backed_up_opt_list;
3388 
3389 	if((client_string_addr = edns_string_addr_lookup(
3390 		&env->edns_strings->client_strings, addr, addrlen))) {
3391 		edns_opt_list_append(&per_upstream_opt_list,
3392 			env->edns_strings->client_string_opcode,
3393 			client_string_addr->string_len,
3394 			client_string_addr->string, region);
3395 	}
3396 
3397 	serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype,
3398 		qinfo->qclass, flags);
3399 	sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen,
3400 		per_upstream_opt_list);
3401 	if(!sq) {
3402 		/* Check ratelimit only for new serviced_query */
3403 		if(check_ratelimit) {
3404 			timenow = *env->now;
3405 			if(!infra_ratelimit_inc(env->infra_cache, zone,
3406 				zonelen, timenow, env->cfg->ratelimit_backoff,
3407 				&qstate->qinfo, qstate->reply)) {
3408 				/* Can we pass through with slip factor? */
3409 				if(env->cfg->ratelimit_factor == 0 ||
3410 					ub_random_max(env->rnd,
3411 					env->cfg->ratelimit_factor) != 1) {
3412 					*was_ratelimited = 1;
3413 					alloc_reg_release(env->alloc, region);
3414 					return NULL;
3415 				}
3416 				log_nametypeclass(VERB_ALGO,
3417 					"ratelimit allowed through for "
3418 					"delegation point", zone,
3419 					LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN);
3420 			}
3421 		}
3422 		/* make new serviced query entry */
3423 		sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
3424 			tcp_upstream, ssl_upstream, tls_auth_name, addr,
3425 			addrlen, zone, zonelen, (int)qinfo->qtype,
3426 			per_upstream_opt_list,
3427 			( ssl_upstream && env->cfg->pad_queries
3428 			? env->cfg->pad_queries_block_size : 0 ),
3429 			env->alloc, region);
3430 		if(!sq) {
3431 			if(check_ratelimit) {
3432 				infra_ratelimit_dec(env->infra_cache,
3433 					zone, zonelen, timenow);
3434 			}
3435 			alloc_reg_release(env->alloc, region);
3436 			return NULL;
3437 		}
3438 		if(!(cb = (struct service_callback*)regional_alloc(
3439 			sq->region, sizeof(*cb)))) {
3440 			if(check_ratelimit) {
3441 				infra_ratelimit_dec(env->infra_cache,
3442 					zone, zonelen, timenow);
3443 			}
3444 			(void)rbtree_delete(outnet->serviced, sq);
3445 			serviced_node_del(&sq->node, NULL);
3446 			return NULL;
3447 		}
3448 		/* No network action at this point; it will be invoked with the
3449 		 * serviced_query timer instead to run outside of the mesh. */
3450 	} else {
3451 		/* We don't need this region anymore. */
3452 		alloc_reg_release(env->alloc, region);
3453 		/* duplicate entries are included in the callback list, because
3454 		 * there is a counterpart registration by our caller that needs
3455 		 * to be doubly-removed (with callbacks perhaps). */
3456 		if(!(cb = (struct service_callback*)regional_alloc(
3457 			sq->region, sizeof(*cb)))) {
3458 			return NULL;
3459 		}
3460 	}
3461 	/* add callback to list of callbacks */
3462 	cb->cb = callback;
3463 	cb->cb_arg = callback_arg;
3464 	cb->next = sq->cblist;
3465 	sq->cblist = cb;
3466 	return sq;
3467 }
3468 
3469 /** remove callback from list */
3470 static void
3471 callback_list_remove(struct serviced_query* sq, void* cb_arg)
3472 {
3473 	struct service_callback** pp = &sq->cblist;
3474 	while(*pp) {
3475 		if((*pp)->cb_arg == cb_arg) {
3476 			struct service_callback* del = *pp;
3477 			*pp = del->next;
3478 			return;
3479 		}
3480 		pp = &(*pp)->next;
3481 	}
3482 }
3483 
3484 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg)
3485 {
3486 	if(!sq)
3487 		return;
3488 	callback_list_remove(sq, cb_arg);
3489 	/* if callbacks() routine scheduled deletion, let it do that */
3490 	if(!sq->cblist && !sq->busy && !sq->to_be_deleted) {
3491 		(void)rbtree_delete(sq->outnet->serviced, sq);
3492 		serviced_delete(sq);
3493 	}
3494 }
3495 
3496 /** create fd to send to this destination */
3497 static int
3498 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr,
3499 	socklen_t to_addrlen)
3500 {
3501 	struct sockaddr_storage* addr;
3502 	socklen_t addrlen;
3503 	int i, try, pnum, dscp;
3504 	struct port_if* pif;
3505 
3506 	/* create fd */
3507 	dscp = outnet->ip_dscp;
3508 	for(try = 0; try<1000; try++) {
3509 		int port = 0;
3510 		int freebind = 0;
3511 		int noproto = 0;
3512 		int inuse = 0;
3513 		int fd = -1;
3514 
3515 		/* select interface */
3516 		if(addr_is_ip6(to_addr, to_addrlen)) {
3517 			if(outnet->num_ip6 == 0) {
3518 				char to[64];
3519 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3520 				verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to);
3521 				return -1;
3522 			}
3523 			i = ub_random_max(outnet->rnd, outnet->num_ip6);
3524 			pif = &outnet->ip6_ifs[i];
3525 		} else {
3526 			if(outnet->num_ip4 == 0) {
3527 				char to[64];
3528 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3529 				verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to);
3530 				return -1;
3531 			}
3532 			i = ub_random_max(outnet->rnd, outnet->num_ip4);
3533 			pif = &outnet->ip4_ifs[i];
3534 		}
3535 		addr = &pif->addr;
3536 		addrlen = pif->addrlen;
3537 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3538 		pnum = ub_random_max(outnet->rnd, pif->avail_total);
3539 		if(pnum < pif->inuse) {
3540 			/* port already open */
3541 			port = pif->out[pnum]->number;
3542 		} else {
3543 			/* unused ports in start part of array */
3544 			port = pif->avail_ports[pnum - pif->inuse];
3545 		}
3546 #else
3547 		pnum = port = 0;
3548 #endif
3549 		if(addr_is_ip6(to_addr, to_addrlen)) {
3550 			struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
3551 			sa.sin6_port = (in_port_t)htons((uint16_t)port);
3552 			fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
3553 				(struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto,
3554 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3555 		} else {
3556 			struct sockaddr_in* sa = (struct sockaddr_in*)addr;
3557 			sa->sin_port = (in_port_t)htons((uint16_t)port);
3558 			fd = create_udp_sock(AF_INET, SOCK_DGRAM,
3559 				(struct sockaddr*)addr, addrlen, 1, &inuse, &noproto,
3560 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3561 		}
3562 		if(fd != -1) {
3563 			return fd;
3564 		}
3565 		if(!inuse) {
3566 			return -1;
3567 		}
3568 	}
3569 	/* too many tries */
3570 	log_err("cannot send probe, ports are in use");
3571 	return -1;
3572 }
3573 
3574 struct comm_point*
3575 outnet_comm_point_for_udp(struct outside_network* outnet,
3576 	comm_point_callback_type* cb, void* cb_arg,
3577 	struct sockaddr_storage* to_addr, socklen_t to_addrlen)
3578 {
3579 	struct comm_point* cp;
3580 	int fd = fd_for_dest(outnet, to_addr, to_addrlen);
3581 	if(fd == -1) {
3582 		return NULL;
3583 	}
3584 	cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff,
3585 		cb, cb_arg, NULL);
3586 	if(!cp) {
3587 		log_err("malloc failure");
3588 		close(fd);
3589 		return NULL;
3590 	}
3591 	return cp;
3592 }
3593 
3594 /** setup SSL for comm point */
3595 static int
3596 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet,
3597 	int fd, char* host)
3598 {
3599 	cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd);
3600 	if(!cp->ssl) {
3601 		log_err("cannot create SSL object");
3602 		return 0;
3603 	}
3604 #ifdef USE_WINSOCK
3605 	comm_point_tcp_win_bio_cb(cp, cp->ssl);
3606 #endif
3607 	cp->ssl_shake_state = comm_ssl_shake_write;
3608 	/* https verification */
3609 #ifdef HAVE_SSL
3610 	if(outnet->tls_use_sni) {
3611 		(void)SSL_set_tlsext_host_name(cp->ssl, host);
3612 	}
3613 #endif
3614 #ifdef HAVE_SSL_SET1_HOST
3615 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3616 		/* because we set SSL_VERIFY_PEER, in netevent in
3617 		 * ssl_handshake, it'll check if the certificate
3618 		 * verification has succeeded */
3619 		/* SSL_VERIFY_PEER is set on the sslctx */
3620 		/* and the certificates to verify with are loaded into
3621 		 * it with SSL_load_verify_locations or
3622 		 * SSL_CTX_set_default_verify_paths */
3623 		/* setting the hostname makes openssl verify the
3624 		 * host name in the x509 certificate in the
3625 		 * SSL connection*/
3626 		if(!SSL_set1_host(cp->ssl, host)) {
3627 			log_err("SSL_set1_host failed");
3628 			return 0;
3629 		}
3630 	}
3631 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST)
3632 	/* openssl 1.0.2 has this function that can be used for
3633 	 * set1_host like verification */
3634 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3635 		X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl);
3636 #  ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS
3637 		X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
3638 #  endif
3639 		if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) {
3640 			log_err("X509_VERIFY_PARAM_set1_host failed");
3641 			return 0;
3642 		}
3643 	}
3644 #else
3645 	(void)host;
3646 #endif /* HAVE_SSL_SET1_HOST */
3647 	return 1;
3648 }
3649 
3650 struct comm_point*
3651 outnet_comm_point_for_tcp(struct outside_network* outnet,
3652 	comm_point_callback_type* cb, void* cb_arg,
3653 	struct sockaddr_storage* to_addr, socklen_t to_addrlen,
3654 	sldns_buffer* query, int timeout, int ssl, char* host)
3655 {
3656 	struct comm_point* cp;
3657 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3658 	if(fd == -1) {
3659 		return 0;
3660 	}
3661 	fd_set_nonblock(fd);
3662 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3663 		/* outnet_tcp_connect has closed fd on error for us */
3664 		return 0;
3665 	}
3666 	cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg);
3667 	if(!cp) {
3668 		log_err("malloc failure");
3669 		close(fd);
3670 		return 0;
3671 	}
3672 	cp->repinfo.addrlen = to_addrlen;
3673 	memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3674 
3675 	/* setup for SSL (if needed) */
3676 	if(ssl) {
3677 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3678 			log_err("cannot setup XoT");
3679 			comm_point_delete(cp);
3680 			return NULL;
3681 		}
3682 	}
3683 
3684 	/* set timeout on TCP connection */
3685 	comm_point_start_listening(cp, fd, timeout);
3686 	/* copy scratch buffer to cp->buffer */
3687 	sldns_buffer_copy(cp->buffer, query);
3688 	return cp;
3689 }
3690 
3691 /** setup the User-Agent HTTP header based on http-user-agent configuration */
3692 static void
3693 setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg)
3694 {
3695 	if(cfg->hide_http_user_agent) return;
3696 	if(cfg->http_user_agent==NULL || cfg->http_user_agent[0] == 0) {
3697 		sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME,
3698 			PACKAGE_VERSION);
3699 	} else {
3700 		sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent);
3701 	}
3702 }
3703 
3704 /** setup http request headers in buffer for sending query to destination */
3705 static int
3706 setup_http_request(sldns_buffer* buf, char* host, char* path,
3707 	struct config_file* cfg)
3708 {
3709 	sldns_buffer_clear(buf);
3710 	sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path);
3711 	sldns_buffer_printf(buf, "Host: %s\r\n", host);
3712 	setup_http_user_agent(buf, cfg);
3713 	/* We do not really do multiple queries per connection,
3714 	 * but this header setting is also not needed.
3715 	 * sldns_buffer_printf(buf, "Connection: close\r\n") */
3716 	sldns_buffer_printf(buf, "\r\n");
3717 	if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf))
3718 		return 0; /* somehow buffer too short, but it is about 60K
3719 		and the request is only a couple bytes long. */
3720 	sldns_buffer_flip(buf);
3721 	return 1;
3722 }
3723 
3724 struct comm_point*
3725 outnet_comm_point_for_http(struct outside_network* outnet,
3726 	comm_point_callback_type* cb, void* cb_arg,
3727 	struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout,
3728 	int ssl, char* host, char* path, struct config_file* cfg)
3729 {
3730 	/* cp calls cb with err=NETEVENT_DONE when transfer is done */
3731 	struct comm_point* cp;
3732 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3733 	if(fd == -1) {
3734 		return 0;
3735 	}
3736 	fd_set_nonblock(fd);
3737 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3738 		/* outnet_tcp_connect has closed fd on error for us */
3739 		return 0;
3740 	}
3741 	cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg,
3742 		outnet->udp_buff);
3743 	if(!cp) {
3744 		log_err("malloc failure");
3745 		close(fd);
3746 		return 0;
3747 	}
3748 	cp->repinfo.addrlen = to_addrlen;
3749 	memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3750 
3751 	/* setup for SSL (if needed) */
3752 	if(ssl) {
3753 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3754 			log_err("cannot setup https");
3755 			comm_point_delete(cp);
3756 			return NULL;
3757 		}
3758 	}
3759 
3760 	/* set timeout on TCP connection */
3761 	comm_point_start_listening(cp, fd, timeout);
3762 
3763 	/* setup http request in cp->buffer */
3764 	if(!setup_http_request(cp->buffer, host, path, cfg)) {
3765 		log_err("error setting up http request");
3766 		comm_point_delete(cp);
3767 		return NULL;
3768 	}
3769 	return cp;
3770 }
3771 
3772 /** get memory used by waiting tcp entry (in use or not) */
3773 static size_t
3774 waiting_tcp_get_mem(struct waiting_tcp* w)
3775 {
3776 	size_t s;
3777 	if(!w) return 0;
3778 	s = sizeof(*w) + w->pkt_len;
3779 	if(w->timer)
3780 		s += comm_timer_get_mem(w->timer);
3781 	return s;
3782 }
3783 
3784 /** get memory used by port if */
3785 static size_t
3786 if_get_mem(struct port_if* pif)
3787 {
3788 	size_t s;
3789 	int i;
3790 	s = sizeof(*pif) +
3791 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3792 	    sizeof(int)*pif->avail_total +
3793 #endif
3794 		sizeof(struct port_comm*)*pif->maxout;
3795 	for(i=0; i<pif->inuse; i++)
3796 		s += sizeof(*pif->out[i]) +
3797 			comm_point_get_mem(pif->out[i]->cp);
3798 	return s;
3799 }
3800 
3801 /** get memory used by waiting udp */
3802 static size_t
3803 waiting_udp_get_mem(struct pending* w)
3804 {
3805 	size_t s;
3806 	s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len;
3807 	return s;
3808 }
3809 
3810 size_t outnet_get_mem(struct outside_network* outnet)
3811 {
3812 	size_t i;
3813 	int k;
3814 	struct waiting_tcp* w;
3815 	struct pending* u;
3816 	struct serviced_query* sq;
3817 	struct service_callback* sb;
3818 	struct port_comm* pc;
3819 	size_t s = sizeof(*outnet) + sizeof(*outnet->base) +
3820 		sizeof(*outnet->udp_buff) +
3821 		sldns_buffer_capacity(outnet->udp_buff);
3822 	/* second buffer is not ours */
3823 	for(pc = outnet->unused_fds; pc; pc = pc->next) {
3824 		s += sizeof(*pc) + comm_point_get_mem(pc->cp);
3825 	}
3826 	for(k=0; k<outnet->num_ip4; k++)
3827 		s += if_get_mem(&outnet->ip4_ifs[k]);
3828 	for(k=0; k<outnet->num_ip6; k++)
3829 		s += if_get_mem(&outnet->ip6_ifs[k]);
3830 	for(u=outnet->udp_wait_first; u; u=u->next_waiting)
3831 		s += waiting_udp_get_mem(u);
3832 
3833 	s += sizeof(struct pending_tcp*)*outnet->num_tcp;
3834 	for(i=0; i<outnet->num_tcp; i++) {
3835 		s += sizeof(struct pending_tcp);
3836 		s += comm_point_get_mem(outnet->tcp_conns[i]->c);
3837 		if(outnet->tcp_conns[i]->query)
3838 			s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query);
3839 	}
3840 	for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
3841 		s += waiting_tcp_get_mem(w);
3842 	s += sizeof(*outnet->pending);
3843 	s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) *
3844 		outnet->pending->count;
3845 	s += sizeof(*outnet->serviced);
3846 	s += outnet->svcd_overhead;
3847 	RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) {
3848 		s += sizeof(*sq) + sq->qbuflen;
3849 		for(sb = sq->cblist; sb; sb = sb->next)
3850 			s += sizeof(*sb);
3851 	}
3852 	return s;
3853 }
3854 
3855 size_t
3856 serviced_get_mem(struct serviced_query* sq)
3857 {
3858 	struct service_callback* sb;
3859 	size_t s;
3860 	s = sizeof(*sq) + sq->qbuflen;
3861 	for(sb = sq->cblist; sb; sb = sb->next)
3862 		s += sizeof(*sb);
3863 	if(sq->status == serviced_query_UDP_EDNS ||
3864 		sq->status == serviced_query_UDP ||
3865 		sq->status == serviced_query_UDP_EDNS_FRAG ||
3866 		sq->status == serviced_query_UDP_EDNS_fallback) {
3867 		s += sizeof(struct pending);
3868 		s += comm_timer_get_mem(NULL);
3869 	} else {
3870 		/* does not have size of the pkt pointer */
3871 		/* always has a timer except on malloc failures */
3872 
3873 		/* these sizes are part of the main outside network mem */
3874 		/*
3875 		s += sizeof(struct waiting_tcp);
3876 		s += comm_timer_get_mem(NULL);
3877 		*/
3878 	}
3879 	return s;
3880 }
3881 
3882