xref: /freebsd/contrib/unbound/services/outside_network.c (revision e1c4c8dd8d2d10b6104f06856a77bd5b4813a801)
1 /*
2  * services/outside_network.c - implement sending of queries and wait answer.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file has functions to send queries to authoritative servers and
40  * wait for the pending answer events.
41  */
42 #include "config.h"
43 #include <ctype.h>
44 #ifdef HAVE_SYS_TYPES_H
45 #  include <sys/types.h>
46 #endif
47 #include <sys/time.h>
48 #include "services/outside_network.h"
49 #include "services/listen_dnsport.h"
50 #include "services/cache/infra.h"
51 #include "iterator/iterator.h"
52 #include "util/data/msgparse.h"
53 #include "util/data/msgreply.h"
54 #include "util/data/msgencode.h"
55 #include "util/data/dname.h"
56 #include "util/netevent.h"
57 #include "util/log.h"
58 #include "util/net_help.h"
59 #include "util/random.h"
60 #include "util/fptr_wlist.h"
61 #include "util/edns.h"
62 #include "sldns/sbuffer.h"
63 #include "dnstap/dnstap.h"
64 #ifdef HAVE_OPENSSL_SSL_H
65 #include <openssl/ssl.h>
66 #endif
67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST
68 #include <openssl/x509v3.h>
69 #endif
70 
71 #ifdef HAVE_NETDB_H
72 #include <netdb.h>
73 #endif
74 #include <fcntl.h>
75 
76 /** number of times to retry making a random ID that is unique. */
77 #define MAX_ID_RETRY 1000
78 /** number of times to retry finding interface, port that can be opened. */
79 #define MAX_PORT_RETRY 10000
80 /** number of retries on outgoing UDP queries */
81 #define OUTBOUND_UDP_RETRY 1
82 
83 /** initiate TCP transaction for serviced query */
84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff);
85 /** with a fd available, randomize and send UDP */
86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet,
87 	int timeout);
88 
89 /** select a DNS ID for a TCP stream */
90 static uint16_t tcp_select_id(struct outside_network* outnet,
91 	struct reuse_tcp* reuse);
92 
93 /** Perform serviced query UDP sending operation */
94 static int serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff);
95 
96 /** Send serviced query over TCP return false on initial failure */
97 static int serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff);
98 
99 /** call the callbacks for a serviced query */
100 static void serviced_callbacks(struct serviced_query* sq, int error,
101 	struct comm_point* c, struct comm_reply* rep);
102 
103 int
104 pending_cmp(const void* key1, const void* key2)
105 {
106 	struct pending *p1 = (struct pending*)key1;
107 	struct pending *p2 = (struct pending*)key2;
108 	if(p1->id < p2->id)
109 		return -1;
110 	if(p1->id > p2->id)
111 		return 1;
112 	log_assert(p1->id == p2->id);
113 	return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen);
114 }
115 
116 int
117 serviced_cmp(const void* key1, const void* key2)
118 {
119 	struct serviced_query* q1 = (struct serviced_query*)key1;
120 	struct serviced_query* q2 = (struct serviced_query*)key2;
121 	int r;
122 	if(q1->qbuflen < q2->qbuflen)
123 		return -1;
124 	if(q1->qbuflen > q2->qbuflen)
125 		return 1;
126 	log_assert(q1->qbuflen == q2->qbuflen);
127 	log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */);
128 	/* alternate casing of qname is still the same query */
129 	if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0)
130 		return r;
131 	if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0)
132 		return r;
133 	if(q1->dnssec != q2->dnssec) {
134 		if(q1->dnssec < q2->dnssec)
135 			return -1;
136 		return 1;
137 	}
138 	if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
139 		return r;
140 	if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
141 		return r;
142 	return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
143 }
144 
145 /** compare if the reuse element has the same address, port and same ssl-is
146  * used-for-it characteristic */
147 static int
148 reuse_cmp_addrportssl(const void* key1, const void* key2)
149 {
150 	struct reuse_tcp* r1 = (struct reuse_tcp*)key1;
151 	struct reuse_tcp* r2 = (struct reuse_tcp*)key2;
152 	int r;
153 	/* compare address and port */
154 	r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen);
155 	if(r != 0)
156 		return r;
157 
158 	/* compare if SSL-enabled */
159 	if(r1->is_ssl && !r2->is_ssl)
160 		return 1;
161 	if(!r1->is_ssl && r2->is_ssl)
162 		return -1;
163 	return 0;
164 }
165 
166 int
167 reuse_cmp(const void* key1, const void* key2)
168 {
169 	int r;
170 	r = reuse_cmp_addrportssl(key1, key2);
171 	if(r != 0)
172 		return r;
173 
174 	/* compare ptr value */
175 	if(key1 < key2) return -1;
176 	if(key1 > key2) return 1;
177 	return 0;
178 }
179 
180 int reuse_id_cmp(const void* key1, const void* key2)
181 {
182 	struct waiting_tcp* w1 = (struct waiting_tcp*)key1;
183 	struct waiting_tcp* w2 = (struct waiting_tcp*)key2;
184 	if(w1->id < w2->id)
185 		return -1;
186 	if(w1->id > w2->id)
187 		return 1;
188 	return 0;
189 }
190 
191 /** delete waiting_tcp entry. Does not unlink from waiting list.
192  * @param w: to delete.
193  */
194 static void
195 waiting_tcp_delete(struct waiting_tcp* w)
196 {
197 	if(!w) return;
198 	if(w->timer)
199 		comm_timer_delete(w->timer);
200 	free(w);
201 }
202 
203 /**
204  * Pick random outgoing-interface of that family, and bind it.
205  * port set to 0 so OS picks a port number for us.
206  * if it is the ANY address, do not bind.
207  * @param pend: pending tcp structure, for storing the local address choice.
208  * @param w: tcp structure with destination address.
209  * @param s: socket fd.
210  * @return false on error, socket closed.
211  */
212 static int
213 pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s)
214 {
215 	struct port_if* pi = NULL;
216 	int num;
217 	pend->pi = NULL;
218 #ifdef INET6
219 	if(addr_is_ip6(&w->addr, w->addrlen))
220 		num = w->outnet->num_ip6;
221 	else
222 #endif
223 		num = w->outnet->num_ip4;
224 	if(num == 0) {
225 		log_err("no TCP outgoing interfaces of family");
226 		log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen);
227 		sock_close(s);
228 		return 0;
229 	}
230 #ifdef INET6
231 	if(addr_is_ip6(&w->addr, w->addrlen))
232 		pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)];
233 	else
234 #endif
235 		pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)];
236 	log_assert(pi);
237 	pend->pi = pi;
238 	if(addr_is_any(&pi->addr, pi->addrlen)) {
239 		/* binding to the ANY interface is for listening sockets */
240 		return 1;
241 	}
242 	/* set port to 0 */
243 	if(addr_is_ip6(&pi->addr, pi->addrlen))
244 		((struct sockaddr_in6*)&pi->addr)->sin6_port = 0;
245 	else	((struct sockaddr_in*)&pi->addr)->sin_port = 0;
246 	if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) {
247 #ifndef USE_WINSOCK
248 #ifdef EADDRNOTAVAIL
249 		if(!(verbosity < 4 && errno == EADDRNOTAVAIL))
250 #endif
251 #else /* USE_WINSOCK */
252 		if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL))
253 #endif
254 		    log_err("outgoing tcp: bind: %s", sock_strerror(errno));
255 		sock_close(s);
256 		return 0;
257 	}
258 	log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen);
259 	return 1;
260 }
261 
262 /** get TCP file descriptor for address, returns -1 on failure,
263  * tcp_mss is 0 or maxseg size to set for TCP packets. */
264 int
265 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp)
266 {
267 	int s;
268 	int af;
269 	char* err;
270 #if defined(SO_REUSEADDR) || defined(IP_BIND_ADDRESS_NO_PORT)
271 	int on = 1;
272 #endif
273 #ifdef INET6
274 	if(addr_is_ip6(addr, addrlen)){
275 		s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP);
276 		af = AF_INET6;
277 	} else {
278 #else
279 	{
280 #endif
281 		af = AF_INET;
282 		s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
283 	}
284 	if(s == -1) {
285 		log_err_addr("outgoing tcp: socket", sock_strerror(errno),
286 			addr, addrlen);
287 		return -1;
288 	}
289 
290 #ifdef SO_REUSEADDR
291 	if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on,
292 		(socklen_t)sizeof(on)) < 0) {
293 		verbose(VERB_ALGO, "outgoing tcp:"
294 			" setsockopt(.. SO_REUSEADDR ..) failed");
295 	}
296 #endif
297 
298 	err = set_ip_dscp(s, af, dscp);
299 	if(err != NULL) {
300 		verbose(VERB_ALGO, "outgoing tcp:"
301 			"error setting IP DiffServ codepoint on socket");
302 	}
303 
304 	if(tcp_mss > 0) {
305 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG)
306 		if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG,
307 			(void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) {
308 			verbose(VERB_ALGO, "outgoing tcp:"
309 				" setsockopt(.. TCP_MAXSEG ..) failed");
310 		}
311 #else
312 		verbose(VERB_ALGO, "outgoing tcp:"
313 			" setsockopt(TCP_MAXSEG) unsupported");
314 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */
315 	}
316 #ifdef IP_BIND_ADDRESS_NO_PORT
317 	if(setsockopt(s, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, (void*)&on,
318 		(socklen_t)sizeof(on)) < 0) {
319 		verbose(VERB_ALGO, "outgoing tcp:"
320 			" setsockopt(.. IP_BIND_ADDRESS_NO_PORT ..) failed");
321 	}
322 #endif /* IP_BIND_ADDRESS_NO_PORT */
323 	return s;
324 }
325 
326 /** connect tcp connection to addr, 0 on failure */
327 int
328 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen)
329 {
330 	if(connect(s, (struct sockaddr*)addr, addrlen) == -1) {
331 #ifndef USE_WINSOCK
332 #ifdef EINPROGRESS
333 		if(errno != EINPROGRESS) {
334 #endif
335 			if(tcp_connect_errno_needs_log(
336 				(struct sockaddr*)addr, addrlen))
337 				log_err_addr("outgoing tcp: connect",
338 					strerror(errno), addr, addrlen);
339 			close(s);
340 			return 0;
341 #ifdef EINPROGRESS
342 		}
343 #endif
344 #else /* USE_WINSOCK */
345 		if(WSAGetLastError() != WSAEINPROGRESS &&
346 			WSAGetLastError() != WSAEWOULDBLOCK) {
347 			closesocket(s);
348 			return 0;
349 		}
350 #endif
351 	}
352 	return 1;
353 }
354 
355 /** log reuse item addr and ptr with message */
356 static void
357 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse)
358 {
359 	uint16_t port;
360 	char addrbuf[128];
361 	if(verbosity < v) return;
362 	if(!reuse || !reuse->pending || !reuse->pending->c)
363 		return;
364 	addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf));
365 	port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port);
366 	verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port,
367 		reuse->pending->c->fd);
368 }
369 
370 /** pop the first element from the writewait list */
371 struct waiting_tcp*
372 reuse_write_wait_pop(struct reuse_tcp* reuse)
373 {
374 	struct waiting_tcp* w = reuse->write_wait_first;
375 	if(!w)
376 		return NULL;
377 	log_assert(w->write_wait_queued);
378 	log_assert(!w->write_wait_prev);
379 	reuse->write_wait_first = w->write_wait_next;
380 	if(w->write_wait_next)
381 		w->write_wait_next->write_wait_prev = NULL;
382 	else	reuse->write_wait_last = NULL;
383 	w->write_wait_queued = 0;
384 	w->write_wait_next = NULL;
385 	w->write_wait_prev = NULL;
386 	return w;
387 }
388 
389 /** remove the element from the writewait list */
390 void
391 reuse_write_wait_remove(struct reuse_tcp* reuse, struct waiting_tcp* w)
392 {
393 	log_assert(w);
394 	log_assert(w->write_wait_queued);
395 	if(!w)
396 		return;
397 	if(!w->write_wait_queued)
398 		return;
399 	if(w->write_wait_prev)
400 		w->write_wait_prev->write_wait_next = w->write_wait_next;
401 	else	reuse->write_wait_first = w->write_wait_next;
402 	log_assert(!w->write_wait_prev ||
403 		w->write_wait_prev->write_wait_next != w->write_wait_prev);
404 	if(w->write_wait_next)
405 		w->write_wait_next->write_wait_prev = w->write_wait_prev;
406 	else	reuse->write_wait_last = w->write_wait_prev;
407 	log_assert(!w->write_wait_next
408 		|| w->write_wait_next->write_wait_prev != w->write_wait_next);
409 	w->write_wait_queued = 0;
410 	w->write_wait_next = NULL;
411 	w->write_wait_prev = NULL;
412 }
413 
414 /** push the element after the last on the writewait list */
415 void
416 reuse_write_wait_push_back(struct reuse_tcp* reuse, struct waiting_tcp* w)
417 {
418 	if(!w) return;
419 	log_assert(!w->write_wait_queued);
420 	if(reuse->write_wait_last) {
421 		reuse->write_wait_last->write_wait_next = w;
422 		log_assert(reuse->write_wait_last->write_wait_next !=
423 			reuse->write_wait_last);
424 		w->write_wait_prev = reuse->write_wait_last;
425 	} else {
426 		reuse->write_wait_first = w;
427 		w->write_wait_prev = NULL;
428 	}
429 	w->write_wait_next = NULL;
430 	reuse->write_wait_last = w;
431 	w->write_wait_queued = 1;
432 }
433 
434 /** insert element in tree by id */
435 void
436 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w)
437 {
438 #ifdef UNBOUND_DEBUG
439 	rbnode_type* added;
440 #endif
441 	log_assert(w->id_node.key == NULL);
442 	w->id_node.key = w;
443 #ifdef UNBOUND_DEBUG
444 	added =
445 #else
446 	(void)
447 #endif
448 	rbtree_insert(&reuse->tree_by_id, &w->id_node);
449 	log_assert(added);  /* should have been added */
450 }
451 
452 /** find element in tree by id */
453 struct waiting_tcp*
454 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id)
455 {
456 	struct waiting_tcp key_w;
457 	rbnode_type* n;
458 	memset(&key_w, 0, sizeof(key_w));
459 	key_w.id_node.key = &key_w;
460 	key_w.id = id;
461 	n = rbtree_search(&reuse->tree_by_id, &key_w);
462 	if(!n) return NULL;
463 	return (struct waiting_tcp*)n->key;
464 }
465 
466 /** return ID value of rbnode in tree_by_id */
467 static uint16_t
468 tree_by_id_get_id(rbnode_type* node)
469 {
470 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
471 	return w->id;
472 }
473 
474 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */
475 int
476 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp)
477 {
478 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse);
479 	if(pend_tcp->reuse.item_on_lru_list) {
480 		if(!pend_tcp->reuse.node.key)
481 			log_err("internal error: reuse_tcp_insert: "
482 				"in lru list without key");
483 		return 1;
484 	}
485 	pend_tcp->reuse.node.key = &pend_tcp->reuse;
486 	pend_tcp->reuse.pending = pend_tcp;
487 	if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) {
488 		/* We are not in the LRU list but we are already in the
489 		 * tcp_reuse tree, strange.
490 		 * Continue to add ourselves to the LRU list. */
491 		log_err("internal error: reuse_tcp_insert: in lru list but "
492 			"not in the tree");
493 	}
494 	/* insert into LRU, first is newest */
495 	pend_tcp->reuse.lru_prev = NULL;
496 	if(outnet->tcp_reuse_first) {
497 		pend_tcp->reuse.lru_next = outnet->tcp_reuse_first;
498 		log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse);
499 		outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse;
500 		log_assert(outnet->tcp_reuse_first->lru_prev !=
501 			outnet->tcp_reuse_first);
502 	} else {
503 		pend_tcp->reuse.lru_next = NULL;
504 		outnet->tcp_reuse_last = &pend_tcp->reuse;
505 	}
506 	outnet->tcp_reuse_first = &pend_tcp->reuse;
507 	pend_tcp->reuse.item_on_lru_list = 1;
508 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
509 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
510 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
511 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
512 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
513 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
514 	return 1;
515 }
516 
517 /** find reuse tcp stream to destination for query, or NULL if none */
518 static struct reuse_tcp*
519 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr,
520 	socklen_t addrlen, int use_ssl)
521 {
522 	struct waiting_tcp key_w;
523 	struct pending_tcp key_p;
524 	struct comm_point c;
525 	rbnode_type* result = NULL, *prev;
526 	verbose(VERB_CLIENT, "reuse_tcp_find");
527 	memset(&key_w, 0, sizeof(key_w));
528 	memset(&key_p, 0, sizeof(key_p));
529 	memset(&c, 0, sizeof(c));
530 	key_p.query = &key_w;
531 	key_p.c = &c;
532 	key_p.reuse.pending = &key_p;
533 	key_p.reuse.node.key = &key_p.reuse;
534 	if(use_ssl)
535 		key_p.reuse.is_ssl = 1;
536 	if(addrlen > (socklen_t)sizeof(key_p.reuse.addr))
537 		return NULL;
538 	memmove(&key_p.reuse.addr, addr, addrlen);
539 	key_p.reuse.addrlen = addrlen;
540 
541 	verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u",
542 		(unsigned)outnet->tcp_reuse.count);
543 	if(outnet->tcp_reuse.root == NULL ||
544 		outnet->tcp_reuse.root == RBTREE_NULL)
545 		return NULL;
546 	if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse,
547 		&result)) {
548 		/* exact match */
549 		/* but the key is on stack, and ptr is compared, impossible */
550 		log_assert(&key_p.reuse != (struct reuse_tcp*)result);
551 		log_assert(&key_p != ((struct reuse_tcp*)result)->pending);
552 	}
553 
554 	/* It is possible that we search for something before the first element
555 	 * in the tree. Replace a null pointer with the first element.
556 	 */
557 	if (!result) {
558 		verbose(VERB_CLIENT, "reuse_tcp_find: taking first");
559 		result = rbtree_first(&outnet->tcp_reuse);
560 	}
561 
562 	/* not found, return null */
563 	if(!result || result == RBTREE_NULL)
564 		return NULL;
565 
566 	/* It is possible that we got the previous address, but that the
567 	 * address we are looking for is in the tree. If the address we got
568 	 * is less than the address we are looking, then take the next entry.
569 	 */
570 	if (reuse_cmp_addrportssl(result->key, &key_p.reuse) < 0) {
571 		verbose(VERB_CLIENT, "reuse_tcp_find: key too low");
572 		result = rbtree_next(result);
573 	}
574 
575 	verbose(VERB_CLIENT, "reuse_tcp_find check inexact match");
576 	/* inexact match, find one of possibly several connections to the
577 	 * same destination address, with the correct port, ssl, and
578 	 * also less than max number of open queries, or else, fail to open
579 	 * a new one */
580 	/* rewind to start of sequence of same address,port,ssl */
581 	prev = rbtree_previous(result);
582 	while(prev && prev != RBTREE_NULL &&
583 		reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) {
584 		result = prev;
585 		prev = rbtree_previous(result);
586 	}
587 
588 	/* loop to find first one that has correct characteristics */
589 	while(result && result != RBTREE_NULL &&
590 		reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) {
591 		if(((struct reuse_tcp*)result)->tree_by_id.count <
592 			outnet->max_reuse_tcp_queries) {
593 			/* same address, port, ssl-yes-or-no, and has
594 			 * space for another query */
595 			return (struct reuse_tcp*)result;
596 		}
597 		result = rbtree_next(result);
598 	}
599 	return NULL;
600 }
601 
602 /** use the buffer to setup writing the query */
603 static void
604 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend,
605 	struct waiting_tcp* w)
606 {
607 	struct timeval tv;
608 	verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write "
609 		"len %d timeout %d msec",
610 		(int)w->pkt_len, w->timeout);
611 	pend->c->tcp_write_pkt = w->pkt;
612 	pend->c->tcp_write_pkt_len = w->pkt_len;
613 	pend->c->tcp_write_and_read = 1;
614 	pend->c->tcp_write_byte_count = 0;
615 	pend->c->tcp_is_reading = 0;
616 	comm_point_start_listening(pend->c, s, -1);
617 	/* set timer on the waiting_tcp entry, this is the write timeout
618 	 * for the written packet.  The timer on pend->c is the timer
619 	 * for when there is no written packet and we have readtimeouts */
620 #ifndef S_SPLINT_S
621 	tv.tv_sec = w->timeout/1000;
622 	tv.tv_usec = (w->timeout%1000)*1000;
623 #endif
624 	/* if the waiting_tcp was previously waiting for a buffer in the
625 	 * outside_network.tcpwaitlist, then the timer is reset now that
626 	 * we start writing it */
627 	comm_timer_set(w->timer, &tv);
628 }
629 
630 /** use next free buffer to service a tcp query */
631 static int
632 outnet_tcp_take_into_use(struct waiting_tcp* w)
633 {
634 	struct pending_tcp* pend = w->outnet->tcp_free;
635 	int s;
636 	log_assert(pend);
637 	log_assert(w->pkt);
638 	log_assert(w->pkt_len > 0);
639 	log_assert(w->addrlen > 0);
640 	pend->c->tcp_do_toggle_rw = 0;
641 	pend->c->tcp_do_close = 0;
642 
643 	/* Consistency check, if we have ssl_upstream but no sslctx, then
644 	 * log an error and return failure.
645 	 */
646 	if (w->ssl_upstream && !w->outnet->sslctx) {
647 		log_err("SSL upstream requested but no SSL context");
648 		return 0;
649 	}
650 
651 	/* open socket */
652 	s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp);
653 
654 	if(s == -1)
655 		return 0;
656 
657 	if(!pick_outgoing_tcp(pend, w, s))
658 		return 0;
659 
660 	fd_set_nonblock(s);
661 #ifdef USE_OSX_MSG_FASTOPEN
662 	/* API for fast open is different here. We use a connectx() function and
663 	   then writes can happen as normal even using SSL.*/
664 	/* connectx requires that the len be set in the sockaddr struct*/
665 	struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr;
666 	addr_in->sin_len = w->addrlen;
667 	sa_endpoints_t endpoints;
668 	endpoints.sae_srcif = 0;
669 	endpoints.sae_srcaddr = NULL;
670 	endpoints.sae_srcaddrlen = 0;
671 	endpoints.sae_dstaddr = (struct sockaddr *)&w->addr;
672 	endpoints.sae_dstaddrlen = w->addrlen;
673 	if (connectx(s, &endpoints, SAE_ASSOCID_ANY,
674 	             CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE,
675 	             NULL, 0, NULL, NULL) == -1) {
676 		/* if fails, failover to connect for OSX 10.10 */
677 #ifdef EINPROGRESS
678 		if(errno != EINPROGRESS) {
679 #else
680 		if(1) {
681 #endif
682 			if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
683 #else /* USE_OSX_MSG_FASTOPEN*/
684 #ifdef USE_MSG_FASTOPEN
685 	pend->c->tcp_do_fastopen = 1;
686 	/* Only do TFO for TCP in which case no connect() is required here.
687 	   Don't combine client TFO with SSL, since OpenSSL can't
688 	   currently support doing a handshake on fd that already isn't connected*/
689 	if (w->outnet->sslctx && w->ssl_upstream) {
690 		if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
691 #else /* USE_MSG_FASTOPEN*/
692 	if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
693 #endif /* USE_MSG_FASTOPEN*/
694 #endif /* USE_OSX_MSG_FASTOPEN*/
695 #ifndef USE_WINSOCK
696 #ifdef EINPROGRESS
697 		if(errno != EINPROGRESS) {
698 #else
699 		if(1) {
700 #endif
701 			if(tcp_connect_errno_needs_log(
702 				(struct sockaddr*)&w->addr, w->addrlen))
703 				log_err_addr("outgoing tcp: connect",
704 					strerror(errno), &w->addr, w->addrlen);
705 			close(s);
706 #else /* USE_WINSOCK */
707 		if(WSAGetLastError() != WSAEINPROGRESS &&
708 			WSAGetLastError() != WSAEWOULDBLOCK) {
709 			closesocket(s);
710 #endif
711 			return 0;
712 		}
713 	}
714 #ifdef USE_MSG_FASTOPEN
715 	}
716 #endif /* USE_MSG_FASTOPEN */
717 #ifdef USE_OSX_MSG_FASTOPEN
718 		}
719 	}
720 #endif /* USE_OSX_MSG_FASTOPEN */
721 	if(w->outnet->sslctx && w->ssl_upstream) {
722 		pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s);
723 		if(!pend->c->ssl) {
724 			pend->c->fd = s;
725 			comm_point_close(pend->c);
726 			return 0;
727 		}
728 		verbose(VERB_ALGO, "the query is using TLS encryption, for %s",
729 			(w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection"));
730 #ifdef USE_WINSOCK
731 		comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl);
732 #endif
733 		pend->c->ssl_shake_state = comm_ssl_shake_write;
734 		if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name,
735 			w->outnet->tls_use_sni)) {
736 			pend->c->fd = s;
737 #ifdef HAVE_SSL
738 			SSL_free(pend->c->ssl);
739 #endif
740 			pend->c->ssl = NULL;
741 			comm_point_close(pend->c);
742 			return 0;
743 		}
744 	}
745 	w->next_waiting = (void*)pend;
746 	w->outnet->num_tcp_outgoing++;
747 	w->outnet->tcp_free = pend->next_free;
748 	pend->next_free = NULL;
749 	pend->query = w;
750 	pend->reuse.outnet = w->outnet;
751 	pend->c->repinfo.remote_addrlen = w->addrlen;
752 	pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again;
753 	pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again;
754 	pend->reuse.cp_more_read_again = 0;
755 	pend->reuse.cp_more_write_again = 0;
756 	memcpy(&pend->c->repinfo.remote_addr, &w->addr, w->addrlen);
757 	pend->reuse.pending = pend;
758 
759 	/* Remove from tree in case the is_ssl will be different and causes the
760 	 * identity of the reuse_tcp to change; could result in nodes not being
761 	 * deleted from the tree (because the new identity does not match the
762 	 * previous node) but their ->key would be changed to NULL. */
763 	if(pend->reuse.node.key)
764 		reuse_tcp_remove_tree_list(w->outnet, &pend->reuse);
765 
766 	if(pend->c->ssl)
767 		pend->reuse.is_ssl = 1;
768 	else	pend->reuse.is_ssl = 0;
769 	/* insert in reuse by address tree if not already inserted there */
770 	(void)reuse_tcp_insert(w->outnet, pend);
771 	reuse_tree_by_id_insert(&pend->reuse, w);
772 	outnet_tcp_take_query_setup(s, pend, w);
773 	return 1;
774 }
775 
776 /** Touch the lru of a reuse_tcp element, it is in use.
777  * This moves it to the front of the list, where it is not likely to
778  * be closed.  Items at the back of the list are closed to make space. */
779 void
780 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse)
781 {
782 	if(!reuse->item_on_lru_list) {
783 		log_err("internal error: we need to touch the lru_list but item not in list");
784 		return; /* not on the list, no lru to modify */
785 	}
786 	log_assert(reuse->lru_prev ||
787 		(!reuse->lru_prev && outnet->tcp_reuse_first == reuse));
788 	if(!reuse->lru_prev)
789 		return; /* already first in the list */
790 	/* remove at current position */
791 	/* since it is not first, there is a previous element */
792 	reuse->lru_prev->lru_next = reuse->lru_next;
793 	log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
794 	if(reuse->lru_next)
795 		reuse->lru_next->lru_prev = reuse->lru_prev;
796 	else	outnet->tcp_reuse_last = reuse->lru_prev;
797 	log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next);
798 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
799 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
800 	/* insert at the front */
801 	reuse->lru_prev = NULL;
802 	reuse->lru_next = outnet->tcp_reuse_first;
803 	if(outnet->tcp_reuse_first) {
804 		outnet->tcp_reuse_first->lru_prev = reuse;
805 	}
806 	log_assert(reuse->lru_next != reuse);
807 	/* since it is not first, it is not the only element and
808 	 * lru_next is thus not NULL and thus reuse is now not the last in
809 	 * the list, so outnet->tcp_reuse_last does not need to be modified */
810 	outnet->tcp_reuse_first = reuse;
811 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
812 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
813 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
814 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
815 }
816 
817 /** Snip the last reuse_tcp element off of the LRU list */
818 struct reuse_tcp*
819 reuse_tcp_lru_snip(struct outside_network* outnet)
820 {
821 	struct reuse_tcp* reuse = outnet->tcp_reuse_last;
822 	if(!reuse) return NULL;
823 	/* snip off of LRU */
824 	log_assert(reuse->lru_next == NULL);
825 	if(reuse->lru_prev) {
826 		outnet->tcp_reuse_last = reuse->lru_prev;
827 		reuse->lru_prev->lru_next = NULL;
828 	} else {
829 		outnet->tcp_reuse_last = NULL;
830 		outnet->tcp_reuse_first = NULL;
831 	}
832 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
833 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
834 	reuse->item_on_lru_list = 0;
835 	reuse->lru_next = NULL;
836 	reuse->lru_prev = NULL;
837 	return reuse;
838 }
839 
840 /** remove waiting tcp from the outnet waiting list */
841 void
842 outnet_waiting_tcp_list_remove(struct outside_network* outnet, struct waiting_tcp* w)
843 {
844 	struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL;
845 	w->on_tcp_waiting_list = 0;
846 	while(p) {
847 		if(p == w) {
848 			/* remove w */
849 			if(prev)
850 				prev->next_waiting = w->next_waiting;
851 			else	outnet->tcp_wait_first = w->next_waiting;
852 			if(outnet->tcp_wait_last == w)
853 				outnet->tcp_wait_last = prev;
854 			w->next_waiting = NULL;
855 			return;
856 		}
857 		prev = p;
858 		p = p->next_waiting;
859 	}
860 	/* outnet_waiting_tcp_list_remove is currently called only with items
861 	 * that are already in the waiting list. */
862 	log_assert(0);
863 }
864 
865 /** pop the first waiting tcp from the outnet waiting list */
866 struct waiting_tcp*
867 outnet_waiting_tcp_list_pop(struct outside_network* outnet)
868 {
869 	struct waiting_tcp* w = outnet->tcp_wait_first;
870 	if(!outnet->tcp_wait_first) return NULL;
871 	log_assert(w->on_tcp_waiting_list);
872 	outnet->tcp_wait_first = w->next_waiting;
873 	if(outnet->tcp_wait_last == w)
874 		outnet->tcp_wait_last = NULL;
875 	w->on_tcp_waiting_list = 0;
876 	w->next_waiting = NULL;
877 	return w;
878 }
879 
880 /** add waiting_tcp element to the outnet tcp waiting list */
881 void
882 outnet_waiting_tcp_list_add(struct outside_network* outnet,
883 	struct waiting_tcp* w, int set_timer)
884 {
885 	struct timeval tv;
886 	log_assert(!w->on_tcp_waiting_list);
887 	if(w->on_tcp_waiting_list)
888 		return;
889 	w->next_waiting = NULL;
890 	if(outnet->tcp_wait_last)
891 		outnet->tcp_wait_last->next_waiting = w;
892 	else	outnet->tcp_wait_first = w;
893 	outnet->tcp_wait_last = w;
894 	w->on_tcp_waiting_list = 1;
895 	if(set_timer) {
896 #ifndef S_SPLINT_S
897 		tv.tv_sec = w->timeout/1000;
898 		tv.tv_usec = (w->timeout%1000)*1000;
899 #endif
900 		comm_timer_set(w->timer, &tv);
901 	}
902 }
903 
904 /** add waiting_tcp element as first to the outnet tcp waiting list */
905 void
906 outnet_waiting_tcp_list_add_first(struct outside_network* outnet,
907 	struct waiting_tcp* w, int reset_timer)
908 {
909 	struct timeval tv;
910 	log_assert(!w->on_tcp_waiting_list);
911 	if(w->on_tcp_waiting_list)
912 		return;
913 	w->next_waiting = outnet->tcp_wait_first;
914 	log_assert(w->next_waiting != w);
915 	if(!outnet->tcp_wait_last)
916 		outnet->tcp_wait_last = w;
917 	outnet->tcp_wait_first = w;
918 	w->on_tcp_waiting_list = 1;
919 	if(reset_timer) {
920 #ifndef S_SPLINT_S
921 		tv.tv_sec = w->timeout/1000;
922 		tv.tv_usec = (w->timeout%1000)*1000;
923 #endif
924 		comm_timer_set(w->timer, &tv);
925 	}
926 	log_assert(
927 		(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
928 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
929 }
930 
931 /** call callback on waiting_tcp, if not NULL */
932 static void
933 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error,
934 	struct comm_reply* reply_info)
935 {
936 	if(w && w->cb) {
937 		fptr_ok(fptr_whitelist_pending_tcp(w->cb));
938 		(void)(*w->cb)(c, w->cb_arg, error, reply_info);
939 	}
940 }
941 
942 /** see if buffers can be used to service TCP queries */
943 static void
944 use_free_buffer(struct outside_network* outnet)
945 {
946 	struct waiting_tcp* w;
947 	while(outnet->tcp_wait_first && !outnet->want_to_quit) {
948 #ifdef USE_DNSTAP
949 		struct pending_tcp* pend_tcp = NULL;
950 #endif
951 		struct reuse_tcp* reuse = NULL;
952 		w = outnet_waiting_tcp_list_pop(outnet);
953 		log_assert(
954 			(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
955 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
956 		reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen,
957 			w->ssl_upstream);
958 		/* re-select an ID when moving to a new TCP buffer */
959 		w->id = tcp_select_id(outnet, reuse);
960 		LDNS_ID_SET(w->pkt, w->id);
961 		if(reuse) {
962 			log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: "
963 				"found reuse", reuse);
964 #ifdef USE_DNSTAP
965 			pend_tcp = reuse->pending;
966 #endif
967 			reuse_tcp_lru_touch(outnet, reuse);
968 			comm_timer_disable(w->timer);
969 			w->next_waiting = (void*)reuse->pending;
970 			reuse_tree_by_id_insert(reuse, w);
971 			if(reuse->pending->query) {
972 				/* on the write wait list */
973 				reuse_write_wait_push_back(reuse, w);
974 			} else {
975 				/* write straight away */
976 				/* stop the timer on read of the fd */
977 				comm_point_stop_listening(reuse->pending->c);
978 				reuse->pending->query = w;
979 				outnet_tcp_take_query_setup(
980 					reuse->pending->c->fd, reuse->pending,
981 					w);
982 			}
983 		} else if(outnet->tcp_free) {
984 			struct pending_tcp* pend = w->outnet->tcp_free;
985 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
986 			pend->reuse.pending = pend;
987 			memcpy(&pend->reuse.addr, &w->addr, w->addrlen);
988 			pend->reuse.addrlen = w->addrlen;
989 			if(!outnet_tcp_take_into_use(w)) {
990 				waiting_tcp_callback(w, NULL, NETEVENT_CLOSED,
991 					NULL);
992 				waiting_tcp_delete(w);
993 #ifdef USE_DNSTAP
994 				w = NULL;
995 #endif
996 			}
997 #ifdef USE_DNSTAP
998 			pend_tcp = pend;
999 #endif
1000 		} else {
1001 			/* no reuse and no free buffer, put back at the start */
1002 			outnet_waiting_tcp_list_add_first(outnet, w, 0);
1003 			break;
1004 		}
1005 #ifdef USE_DNSTAP
1006 		if(outnet->dtenv && pend_tcp && w && w->sq &&
1007 			(outnet->dtenv->log_resolver_query_messages ||
1008 			outnet->dtenv->log_forwarder_query_messages)) {
1009 			sldns_buffer tmp;
1010 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
1011 			dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr,
1012 				&pend_tcp->pi->addr, comm_tcp, NULL, w->sq->zone,
1013 				w->sq->zonelen, &tmp);
1014 		}
1015 #endif
1016 	}
1017 }
1018 
1019 /** delete element from tree by id */
1020 static void
1021 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w)
1022 {
1023 #ifdef UNBOUND_DEBUG
1024 	rbnode_type* rem;
1025 #endif
1026 	log_assert(w->id_node.key != NULL);
1027 #ifdef UNBOUND_DEBUG
1028 	rem =
1029 #else
1030 	(void)
1031 #endif
1032 	rbtree_delete(&reuse->tree_by_id, w);
1033 	log_assert(rem);  /* should have been there */
1034 	w->id_node.key = NULL;
1035 }
1036 
1037 /** move writewait list to go for another connection. */
1038 static void
1039 reuse_move_writewait_away(struct outside_network* outnet,
1040 	struct pending_tcp* pend)
1041 {
1042 	/* the writewait list has not been written yet, so if the
1043 	 * stream was closed, they have not actually been failed, only
1044 	 * the queries written.  Other queries can get written to another
1045 	 * stream.  For upstreams that do not support multiple queries
1046 	 * and answers, the stream can get closed, and then the queries
1047 	 * can get written on a new socket */
1048 	struct waiting_tcp* w;
1049 	if(pend->query && pend->query->error_count == 0 &&
1050 		pend->c->tcp_write_pkt == pend->query->pkt &&
1051 		pend->c->tcp_write_pkt_len == pend->query->pkt_len) {
1052 		/* since the current query is not written, it can also
1053 		 * move to a free buffer */
1054 		if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 &&
1055 			LDNS_QDCOUNT(pend->query->pkt) > 0 &&
1056 			dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) {
1057 			char buf[LDNS_MAX_DOMAINLEN+1];
1058 			dname_str(pend->query->pkt+12, buf);
1059 			verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written",
1060 				buf, (int)pend->c->tcp_write_byte_count);
1061 		}
1062 		pend->c->tcp_write_pkt = NULL;
1063 		pend->c->tcp_write_pkt_len = 0;
1064 		pend->c->tcp_write_and_read = 0;
1065 		pend->reuse.cp_more_read_again = 0;
1066 		pend->reuse.cp_more_write_again = 0;
1067 		pend->c->tcp_is_reading = 1;
1068 		w = pend->query;
1069 		pend->query = NULL;
1070 		/* increase error count, so that if the next socket fails too
1071 		 * the server selection is run again with this query failed
1072 		 * and it can select a different server (if possible), or
1073 		 * fail the query */
1074 		w->error_count ++;
1075 		reuse_tree_by_id_delete(&pend->reuse, w);
1076 		outnet_waiting_tcp_list_add(outnet, w, 1);
1077 	}
1078 	while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) {
1079 		if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 &&
1080 			LDNS_QDCOUNT(w->pkt) > 0 &&
1081 			dname_valid(w->pkt+12, w->pkt_len-12)) {
1082 			char buf[LDNS_MAX_DOMAINLEN+1];
1083 			dname_str(w->pkt+12, buf);
1084 			verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf);
1085 		}
1086 		reuse_tree_by_id_delete(&pend->reuse, w);
1087 		outnet_waiting_tcp_list_add(outnet, w, 1);
1088 	}
1089 }
1090 
1091 /** remove reused element from tree and lru list */
1092 void
1093 reuse_tcp_remove_tree_list(struct outside_network* outnet,
1094 	struct reuse_tcp* reuse)
1095 {
1096 	verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list");
1097 	if(reuse->node.key) {
1098 		/* delete it from reuse tree */
1099 		if(!rbtree_delete(&outnet->tcp_reuse, reuse)) {
1100 			/* should not be possible, it should be there */
1101 			char buf[256];
1102 			addr_to_str(&reuse->addr, reuse->addrlen, buf,
1103 				sizeof(buf));
1104 			log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list);
1105 		}
1106 		reuse->node.key = NULL;
1107 		/* defend against loops on broken tree by zeroing the
1108 		 * rbnode structure */
1109 		memset(&reuse->node, 0, sizeof(reuse->node));
1110 	}
1111 	/* delete from reuse list */
1112 	if(reuse->item_on_lru_list) {
1113 		if(reuse->lru_prev) {
1114 			/* assert that members of the lru list are waiting
1115 			 * and thus have a pending pointer to the struct */
1116 			log_assert(reuse->lru_prev->pending);
1117 			reuse->lru_prev->lru_next = reuse->lru_next;
1118 			log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
1119 		} else {
1120 			log_assert(!reuse->lru_next || reuse->lru_next->pending);
1121 			outnet->tcp_reuse_first = reuse->lru_next;
1122 			log_assert(!outnet->tcp_reuse_first ||
1123 				(outnet->tcp_reuse_first !=
1124 				 outnet->tcp_reuse_first->lru_next &&
1125 				 outnet->tcp_reuse_first !=
1126 				 outnet->tcp_reuse_first->lru_prev));
1127 		}
1128 		if(reuse->lru_next) {
1129 			/* assert that members of the lru list are waiting
1130 			 * and thus have a pending pointer to the struct */
1131 			log_assert(reuse->lru_next->pending);
1132 			reuse->lru_next->lru_prev = reuse->lru_prev;
1133 			log_assert(reuse->lru_next->lru_prev != reuse->lru_next);
1134 		} else {
1135 			log_assert(!reuse->lru_prev || reuse->lru_prev->pending);
1136 			outnet->tcp_reuse_last = reuse->lru_prev;
1137 			log_assert(!outnet->tcp_reuse_last ||
1138 				(outnet->tcp_reuse_last !=
1139 				 outnet->tcp_reuse_last->lru_next &&
1140 				 outnet->tcp_reuse_last !=
1141 				 outnet->tcp_reuse_last->lru_prev));
1142 		}
1143 		log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
1144 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
1145 		reuse->item_on_lru_list = 0;
1146 		reuse->lru_next = NULL;
1147 		reuse->lru_prev = NULL;
1148 	}
1149 	reuse->pending = NULL;
1150 }
1151 
1152 /** helper function that deletes an element from the tree of readwait
1153  * elements in tcp reuse structure */
1154 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg))
1155 {
1156 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1157 	waiting_tcp_delete(w);
1158 }
1159 
1160 /** delete readwait waiting_tcp elements, deletes the elements in the list */
1161 void reuse_del_readwait(rbtree_type* tree_by_id)
1162 {
1163 	if(tree_by_id->root == NULL ||
1164 		tree_by_id->root == RBTREE_NULL)
1165 		return;
1166 	traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL);
1167 	rbtree_init(tree_by_id, reuse_id_cmp);
1168 }
1169 
1170 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */
1171 static void
1172 decommission_pending_tcp(struct outside_network* outnet,
1173 	struct pending_tcp* pend)
1174 {
1175 	verbose(VERB_CLIENT, "decommission_pending_tcp");
1176 	/* A certain code path can lead here twice for the same pending_tcp
1177 	 * creating a loop in the free pending_tcp list. */
1178 	if(outnet->tcp_free != pend) {
1179 		pend->next_free = outnet->tcp_free;
1180 		outnet->tcp_free = pend;
1181 	}
1182 	if(pend->reuse.node.key) {
1183 		/* needs unlink from the reuse tree to get deleted */
1184 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1185 	}
1186 	/* free SSL structure after remove from outnet tcp reuse tree,
1187 	 * because the c->ssl null or not is used for sorting in the tree */
1188 	if(pend->c->ssl) {
1189 #ifdef HAVE_SSL
1190 		SSL_shutdown(pend->c->ssl);
1191 		SSL_free(pend->c->ssl);
1192 		pend->c->ssl = NULL;
1193 #endif
1194 	}
1195 	comm_point_close(pend->c);
1196 	pend->reuse.cp_more_read_again = 0;
1197 	pend->reuse.cp_more_write_again = 0;
1198 	/* unlink the query and writewait list, it is part of the tree
1199 	 * nodes and is deleted */
1200 	pend->query = NULL;
1201 	pend->reuse.write_wait_first = NULL;
1202 	pend->reuse.write_wait_last = NULL;
1203 	reuse_del_readwait(&pend->reuse.tree_by_id);
1204 }
1205 
1206 /** perform failure callbacks for waiting queries in reuse read rbtree */
1207 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err)
1208 {
1209 	rbnode_type* node;
1210 	if(tree_by_id->root == NULL ||
1211 		tree_by_id->root == RBTREE_NULL)
1212 		return;
1213 	node = rbtree_first(tree_by_id);
1214 	while(node && node != RBTREE_NULL) {
1215 		struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1216 		waiting_tcp_callback(w, NULL, err, NULL);
1217 		node = rbtree_next(node);
1218 	}
1219 }
1220 
1221 /** mark the entry for being in the cb_and_decommission stage */
1222 static void mark_for_cb_and_decommission(rbnode_type* node,
1223 	void* ATTR_UNUSED(arg))
1224 {
1225 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1226 	/* Mark the waiting_tcp to signal later code (serviced_delete) that
1227 	 * this item is part of the backed up tree_by_id and will be deleted
1228 	 * later. */
1229 	w->in_cb_and_decommission = 1;
1230 	/* Mark the serviced_query for deletion so that later code through
1231 	 * callbacks (iter_clear .. outnet_serviced_query_stop) won't
1232 	 * prematurely delete it. */
1233 	if(w->cb)
1234 		((struct serviced_query*)w->cb_arg)->to_be_deleted = 1;
1235 }
1236 
1237 /** perform callbacks for failure and also decommission pending tcp.
1238  * the callbacks remove references in sq->pending to the waiting_tcp
1239  * members of the tree_by_id in the pending tcp.  The pending_tcp is
1240  * removed before the callbacks, so that the callbacks do not modify
1241  * the pending_tcp due to its reference in the outside_network reuse tree */
1242 static void reuse_cb_and_decommission(struct outside_network* outnet,
1243 	struct pending_tcp* pend, int error)
1244 {
1245 	rbtree_type store;
1246 	store = pend->reuse.tree_by_id;
1247 	pend->query = NULL;
1248 	rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1249 	pend->reuse.write_wait_first = NULL;
1250 	pend->reuse.write_wait_last = NULL;
1251 	decommission_pending_tcp(outnet, pend);
1252 	if(store.root != NULL && store.root != RBTREE_NULL) {
1253 		traverse_postorder(&store, &mark_for_cb_and_decommission, NULL);
1254 	}
1255 	reuse_cb_readwait_for_failure(&store, error);
1256 	reuse_del_readwait(&store);
1257 }
1258 
1259 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1260 static void
1261 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1262 {
1263 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse);
1264 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1265 }
1266 
1267 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1268 static void
1269 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1270 {
1271 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse);
1272 	sldns_buffer_clear(pend_tcp->c->buffer);
1273 	pend_tcp->c->tcp_is_reading = 1;
1274 	pend_tcp->c->tcp_byte_count = 0;
1275 	comm_point_stop_listening(pend_tcp->c);
1276 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1277 }
1278 
1279 int
1280 outnet_tcp_cb(struct comm_point* c, void* arg, int error,
1281 	struct comm_reply *reply_info)
1282 {
1283 	struct pending_tcp* pend = (struct pending_tcp*)arg;
1284 	struct outside_network* outnet = pend->reuse.outnet;
1285 	struct waiting_tcp* w = NULL;
1286 	log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key);
1287 	verbose(VERB_ALGO, "outnettcp cb");
1288 	if(error == NETEVENT_TIMEOUT) {
1289 		if(pend->c->tcp_write_and_read) {
1290 			verbose(VERB_QUERY, "outnettcp got tcp timeout "
1291 				"for read, ignored because write underway");
1292 			/* if we are writing, ignore readtimer, wait for write timer
1293 			 * or write is done */
1294 			return 0;
1295 		} else {
1296 			verbose(VERB_QUERY, "outnettcp got tcp timeout %s",
1297 				(pend->reuse.tree_by_id.count?"for reading pkt":
1298 				"for keepalive for reuse"));
1299 		}
1300 		/* must be timeout for reading or keepalive reuse,
1301 		 * close it. */
1302 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1303 	} else if(error == NETEVENT_PKT_WRITTEN) {
1304 		/* the packet we want to write has been written. */
1305 		verbose(VERB_ALGO, "outnet tcp pkt was written event");
1306 		log_assert(c == pend->c);
1307 		log_assert(pend->query->pkt == pend->c->tcp_write_pkt);
1308 		log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len);
1309 		pend->c->tcp_write_pkt = NULL;
1310 		pend->c->tcp_write_pkt_len = 0;
1311 		/* the pend.query is already in tree_by_id */
1312 		log_assert(pend->query->id_node.key);
1313 		pend->query = NULL;
1314 		/* setup to write next packet or setup read timeout */
1315 		if(pend->reuse.write_wait_first) {
1316 			verbose(VERB_ALGO, "outnet tcp setup next pkt");
1317 			/* we can write it straight away perhaps, set flag
1318 			 * because this callback called after a tcp write
1319 			 * succeeded and likely more buffer space is available
1320 			 * and we can write some more. */
1321 			pend->reuse.cp_more_write_again = 1;
1322 			pend->query = reuse_write_wait_pop(&pend->reuse);
1323 			comm_point_stop_listening(pend->c);
1324 			outnet_tcp_take_query_setup(pend->c->fd, pend,
1325 				pend->query);
1326 		} else {
1327 			verbose(VERB_ALGO, "outnet tcp writes done, wait");
1328 			pend->c->tcp_write_and_read = 0;
1329 			pend->reuse.cp_more_read_again = 0;
1330 			pend->reuse.cp_more_write_again = 0;
1331 			pend->c->tcp_is_reading = 1;
1332 			comm_point_stop_listening(pend->c);
1333 			reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout);
1334 		}
1335 		return 0;
1336 	} else if(error != NETEVENT_NOERROR) {
1337 		verbose(VERB_QUERY, "outnettcp got tcp error %d", error);
1338 		reuse_move_writewait_away(outnet, pend);
1339 		/* pass error below and exit */
1340 	} else {
1341 		/* check ID */
1342 		if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) {
1343 			log_addr(VERB_QUERY,
1344 				"outnettcp: bad ID in reply, too short, from:",
1345 				&pend->reuse.addr, pend->reuse.addrlen);
1346 			error = NETEVENT_CLOSED;
1347 		} else {
1348 			uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin(
1349 				c->buffer));
1350 			/* find the query the reply is for */
1351 			w = reuse_tcp_by_id_find(&pend->reuse, id);
1352 			/* Make sure that the reply we got is at least for a
1353 			 * sent query with the same ID; the waiting_tcp that
1354 			 * gets a reply is assumed to not be waiting to be
1355 			 * sent. */
1356 			if(w && (w->on_tcp_waiting_list || w->write_wait_queued))
1357 				w = NULL;
1358 		}
1359 	}
1360 	if(error == NETEVENT_NOERROR && !w) {
1361 		/* no struct waiting found in tree, no reply to call */
1362 		log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:",
1363 			&pend->reuse.addr, pend->reuse.addrlen);
1364 		error = NETEVENT_CLOSED;
1365 	}
1366 	if(error == NETEVENT_NOERROR) {
1367 		/* add to reuse tree so it can be reused, if not a failure.
1368 		 * This is possible if the state machine wants to make a tcp
1369 		 * query again to the same destination. */
1370 		if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) {
1371 			(void)reuse_tcp_insert(outnet, pend);
1372 		}
1373 	}
1374 	if(w) {
1375 		log_assert(!w->on_tcp_waiting_list);
1376 		log_assert(!w->write_wait_queued);
1377 		reuse_tree_by_id_delete(&pend->reuse, w);
1378 		verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d",
1379 			error, (int)sldns_buffer_limit(c->buffer));
1380 		waiting_tcp_callback(w, c, error, reply_info);
1381 		waiting_tcp_delete(w);
1382 	}
1383 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb");
1384 	if(error == NETEVENT_NOERROR && pend->reuse.node.key) {
1385 		verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it");
1386 		/* it is in the reuse_tcp tree, with other queries, or
1387 		 * on the empty list. do not decommission it */
1388 		/* if there are more outstanding queries, we could try to
1389 		 * read again, to see if it is on the input,
1390 		 * because this callback called after a successful read
1391 		 * and there could be more bytes to read on the input */
1392 		if(pend->reuse.tree_by_id.count != 0)
1393 			pend->reuse.cp_more_read_again = 1;
1394 		reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout);
1395 		return 0;
1396 	}
1397 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it");
1398 	/* no queries on it, no space to keep it. or timeout or closed due
1399 	 * to error.  Close it */
1400 	reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT?
1401 		NETEVENT_TIMEOUT:NETEVENT_CLOSED));
1402 	use_free_buffer(outnet);
1403 	return 0;
1404 }
1405 
1406 /** lower use count on pc, see if it can be closed */
1407 static void
1408 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc)
1409 {
1410 	struct port_if* pif;
1411 	pc->num_outstanding--;
1412 	if(pc->num_outstanding > 0) {
1413 		return;
1414 	}
1415 	/* close it and replace in unused list */
1416 	verbose(VERB_ALGO, "close of port %d", pc->number);
1417 	comm_point_close(pc->cp);
1418 	pif = pc->pif;
1419 	log_assert(pif->inuse > 0);
1420 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1421 	pif->avail_ports[pif->avail_total - pif->inuse] = pc->number;
1422 #endif
1423 	pif->inuse--;
1424 	pif->out[pc->index] = pif->out[pif->inuse];
1425 	pif->out[pc->index]->index = pc->index;
1426 	pc->next = outnet->unused_fds;
1427 	outnet->unused_fds = pc;
1428 }
1429 
1430 /** try to send waiting UDP queries */
1431 static void
1432 outnet_send_wait_udp(struct outside_network* outnet)
1433 {
1434 	struct pending* pend;
1435 	/* process waiting queries */
1436 	while(outnet->udp_wait_first && outnet->unused_fds
1437 		&& !outnet->want_to_quit) {
1438 		pend = outnet->udp_wait_first;
1439 		outnet->udp_wait_first = pend->next_waiting;
1440 		if(!pend->next_waiting) outnet->udp_wait_last = NULL;
1441 		sldns_buffer_clear(outnet->udp_buff);
1442 		sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len);
1443 		sldns_buffer_flip(outnet->udp_buff);
1444 		free(pend->pkt); /* freeing now makes get_mem correct */
1445 		pend->pkt = NULL;
1446 		pend->pkt_len = 0;
1447 		log_assert(!pend->sq->busy);
1448 		pend->sq->busy = 1;
1449 		if(!randomize_and_send_udp(pend, outnet->udp_buff,
1450 			pend->timeout)) {
1451 			/* callback error on pending */
1452 			if(pend->cb) {
1453 				fptr_ok(fptr_whitelist_pending_udp(pend->cb));
1454 				(void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg,
1455 					NETEVENT_CLOSED, NULL);
1456 			}
1457 			pending_delete(outnet, pend);
1458 		} else {
1459 			pend->sq->busy = 0;
1460 		}
1461 	}
1462 }
1463 
1464 int
1465 outnet_udp_cb(struct comm_point* c, void* arg, int error,
1466 	struct comm_reply *reply_info)
1467 {
1468 	struct outside_network* outnet = (struct outside_network*)arg;
1469 	struct pending key;
1470 	struct pending* p;
1471 	verbose(VERB_ALGO, "answer cb");
1472 
1473 	if(error != NETEVENT_NOERROR) {
1474 		verbose(VERB_QUERY, "outnetudp got udp error %d", error);
1475 		return 0;
1476 	}
1477 	if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1478 		verbose(VERB_QUERY, "outnetudp udp too short");
1479 		return 0;
1480 	}
1481 	log_assert(reply_info);
1482 
1483 	/* setup lookup key */
1484 	key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer));
1485 	memcpy(&key.addr, &reply_info->remote_addr, reply_info->remote_addrlen);
1486 	key.addrlen = reply_info->remote_addrlen;
1487 	verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id);
1488 	log_addr(VERB_ALGO, "Incoming reply addr =",
1489 		&reply_info->remote_addr, reply_info->remote_addrlen);
1490 
1491 	/* find it, see if this thing is a valid query response */
1492 	verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count);
1493 	p = (struct pending*)rbtree_search(outnet->pending, &key);
1494 	if(!p) {
1495 		verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped.");
1496 		log_buf(VERB_ALGO, "dropped message", c->buffer);
1497 		outnet->unwanted_replies++;
1498 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1499 			>= outnet->unwanted_threshold) {
1500 			log_warn("unwanted reply total reached threshold (%u)"
1501 				" you may be under attack."
1502 				" defensive action: clearing the cache",
1503 				(unsigned)outnet->unwanted_threshold);
1504 			fptr_ok(fptr_whitelist_alloc_cleanup(
1505 				outnet->unwanted_action));
1506 			(*outnet->unwanted_action)(outnet->unwanted_param);
1507 			outnet->unwanted_total = 0;
1508 		}
1509 		return 0;
1510 	}
1511 
1512 	verbose(VERB_ALGO, "received udp reply.");
1513 	log_buf(VERB_ALGO, "udp message", c->buffer);
1514 	if(p->pc->cp != c) {
1515 		verbose(VERB_QUERY, "received reply id,addr on wrong port. "
1516 			"dropped.");
1517 		outnet->unwanted_replies++;
1518 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1519 			>= outnet->unwanted_threshold) {
1520 			log_warn("unwanted reply total reached threshold (%u)"
1521 				" you may be under attack."
1522 				" defensive action: clearing the cache",
1523 				(unsigned)outnet->unwanted_threshold);
1524 			fptr_ok(fptr_whitelist_alloc_cleanup(
1525 				outnet->unwanted_action));
1526 			(*outnet->unwanted_action)(outnet->unwanted_param);
1527 			outnet->unwanted_total = 0;
1528 		}
1529 		return 0;
1530 	}
1531 	comm_timer_disable(p->timer);
1532 	verbose(VERB_ALGO, "outnet handle udp reply");
1533 	/* delete from tree first in case callback creates a retry */
1534 	(void)rbtree_delete(outnet->pending, p->node.key);
1535 	if(p->cb) {
1536 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1537 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info);
1538 	}
1539 	portcomm_loweruse(outnet, p->pc);
1540 	pending_delete(NULL, p);
1541 	outnet_send_wait_udp(outnet);
1542 	return 0;
1543 }
1544 
1545 /** calculate number of ip4 and ip6 interfaces*/
1546 static void
1547 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6,
1548 	int* num_ip4, int* num_ip6)
1549 {
1550 	int i;
1551 	*num_ip4 = 0;
1552 	*num_ip6 = 0;
1553 	if(num_ifs <= 0) {
1554 		if(do_ip4)
1555 			*num_ip4 = 1;
1556 		if(do_ip6)
1557 			*num_ip6 = 1;
1558 		return;
1559 	}
1560 	for(i=0; i<num_ifs; i++)
1561 	{
1562 		if(str_is_ip6(ifs[i])) {
1563 			if(do_ip6)
1564 				(*num_ip6)++;
1565 		} else {
1566 			if(do_ip4)
1567 				(*num_ip4)++;
1568 		}
1569 	}
1570 }
1571 
1572 void
1573 pending_udp_timer_delay_cb(void* arg)
1574 {
1575 	struct pending* p = (struct pending*)arg;
1576 	struct outside_network* outnet = p->outnet;
1577 	verbose(VERB_ALGO, "timeout udp with delay");
1578 	portcomm_loweruse(outnet, p->pc);
1579 	pending_delete(outnet, p);
1580 	outnet_send_wait_udp(outnet);
1581 }
1582 
1583 void
1584 pending_udp_timer_cb(void *arg)
1585 {
1586 	struct pending* p = (struct pending*)arg;
1587 	struct outside_network* outnet = p->outnet;
1588 	/* it timed out */
1589 	verbose(VERB_ALGO, "timeout udp");
1590 	if(p->cb) {
1591 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1592 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL);
1593 	}
1594 	/* if delayclose, keep port open for a longer time.
1595 	 * But if the udpwaitlist exists, then we are struggling to
1596 	 * keep up with demand for sockets, so do not wait, but service
1597 	 * the customer (customer service more important than portICMPs) */
1598 	if(outnet->delayclose && !outnet->udp_wait_first) {
1599 		p->cb = NULL;
1600 		p->timer->callback = &pending_udp_timer_delay_cb;
1601 		comm_timer_set(p->timer, &outnet->delay_tv);
1602 		return;
1603 	}
1604 	portcomm_loweruse(outnet, p->pc);
1605 	pending_delete(outnet, p);
1606 	outnet_send_wait_udp(outnet);
1607 }
1608 
1609 /** create pending_tcp buffers */
1610 static int
1611 create_pending_tcp(struct outside_network* outnet, size_t bufsize)
1612 {
1613 	size_t i;
1614 	if(outnet->num_tcp == 0)
1615 		return 1; /* no tcp needed, nothing to do */
1616 	if(!(outnet->tcp_conns = (struct pending_tcp **)calloc(
1617 			outnet->num_tcp, sizeof(struct pending_tcp*))))
1618 		return 0;
1619 	for(i=0; i<outnet->num_tcp; i++) {
1620 		if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1,
1621 			sizeof(struct pending_tcp))))
1622 			return 0;
1623 		outnet->tcp_conns[i]->next_free = outnet->tcp_free;
1624 		outnet->tcp_free = outnet->tcp_conns[i];
1625 		outnet->tcp_conns[i]->c = comm_point_create_tcp_out(
1626 			outnet->base, bufsize, outnet_tcp_cb,
1627 			outnet->tcp_conns[i]);
1628 		if(!outnet->tcp_conns[i]->c)
1629 			return 0;
1630 	}
1631 	return 1;
1632 }
1633 
1634 /** setup an outgoing interface, ready address */
1635 static int setup_if(struct port_if* pif, const char* addrstr,
1636 	int* avail, int numavail, size_t numfd)
1637 {
1638 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1639 	pif->avail_total = numavail;
1640 	pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int));
1641 	if(!pif->avail_ports)
1642 		return 0;
1643 #endif
1644 	if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) &&
1645 	   !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT,
1646 			      &pif->addr, &pif->addrlen, &pif->pfxlen))
1647 		return 0;
1648 	pif->maxout = (int)numfd;
1649 	pif->inuse = 0;
1650 	pif->out = (struct port_comm**)calloc(numfd,
1651 		sizeof(struct port_comm*));
1652 	if(!pif->out)
1653 		return 0;
1654 	return 1;
1655 }
1656 
1657 struct outside_network*
1658 outside_network_create(struct comm_base *base, size_t bufsize,
1659 	size_t num_ports, char** ifs, int num_ifs, int do_ip4,
1660 	int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra,
1661 	struct ub_randstate* rnd, int use_caps_for_id, int* availports,
1662 	int numavailports, size_t unwanted_threshold, int tcp_mss,
1663 	void (*unwanted_action)(void*), void* unwanted_param, int do_udp,
1664 	void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv,
1665 	int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout,
1666 	int tcp_auth_query_timeout)
1667 {
1668 	struct outside_network* outnet = (struct outside_network*)
1669 		calloc(1, sizeof(struct outside_network));
1670 	size_t k;
1671 	if(!outnet) {
1672 		log_err("malloc failed");
1673 		return NULL;
1674 	}
1675 	comm_base_timept(base, &outnet->now_secs, &outnet->now_tv);
1676 	outnet->base = base;
1677 	outnet->num_tcp = num_tcp;
1678 	outnet->max_reuse_tcp_queries = max_reuse_tcp_queries;
1679 	outnet->tcp_reuse_timeout= tcp_reuse_timeout;
1680 	outnet->tcp_auth_query_timeout = tcp_auth_query_timeout;
1681 	outnet->num_tcp_outgoing = 0;
1682 	outnet->num_udp_outgoing = 0;
1683 	outnet->infra = infra;
1684 	outnet->rnd = rnd;
1685 	outnet->sslctx = sslctx;
1686 	outnet->tls_use_sni = tls_use_sni;
1687 #ifdef USE_DNSTAP
1688 	outnet->dtenv = dtenv;
1689 #else
1690 	(void)dtenv;
1691 #endif
1692 	outnet->svcd_overhead = 0;
1693 	outnet->want_to_quit = 0;
1694 	outnet->unwanted_threshold = unwanted_threshold;
1695 	outnet->unwanted_action = unwanted_action;
1696 	outnet->unwanted_param = unwanted_param;
1697 	outnet->use_caps_for_id = use_caps_for_id;
1698 	outnet->do_udp = do_udp;
1699 	outnet->tcp_mss = tcp_mss;
1700 	outnet->ip_dscp = dscp;
1701 #ifndef S_SPLINT_S
1702 	if(delayclose) {
1703 		outnet->delayclose = 1;
1704 		outnet->delay_tv.tv_sec = delayclose/1000;
1705 		outnet->delay_tv.tv_usec = (delayclose%1000)*1000;
1706 	}
1707 #endif
1708 	if(udp_connect) {
1709 		outnet->udp_connect = 1;
1710 	}
1711 	if(numavailports == 0 || num_ports == 0) {
1712 		log_err("no outgoing ports available");
1713 		outside_network_delete(outnet);
1714 		return NULL;
1715 	}
1716 #ifndef INET6
1717 	do_ip6 = 0;
1718 #endif
1719 	calc_num46(ifs, num_ifs, do_ip4, do_ip6,
1720 		&outnet->num_ip4, &outnet->num_ip6);
1721 	if(outnet->num_ip4 != 0) {
1722 		if(!(outnet->ip4_ifs = (struct port_if*)calloc(
1723 			(size_t)outnet->num_ip4, sizeof(struct port_if)))) {
1724 			log_err("malloc failed");
1725 			outside_network_delete(outnet);
1726 			return NULL;
1727 		}
1728 	}
1729 	if(outnet->num_ip6 != 0) {
1730 		if(!(outnet->ip6_ifs = (struct port_if*)calloc(
1731 			(size_t)outnet->num_ip6, sizeof(struct port_if)))) {
1732 			log_err("malloc failed");
1733 			outside_network_delete(outnet);
1734 			return NULL;
1735 		}
1736 	}
1737 	if(	!(outnet->udp_buff = sldns_buffer_new(bufsize)) ||
1738 		!(outnet->pending = rbtree_create(pending_cmp)) ||
1739 		!(outnet->serviced = rbtree_create(serviced_cmp)) ||
1740 		!create_pending_tcp(outnet, bufsize)) {
1741 		log_err("malloc failed");
1742 		outside_network_delete(outnet);
1743 		return NULL;
1744 	}
1745 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1746 	outnet->tcp_reuse_max = num_tcp;
1747 
1748 	/* allocate commpoints */
1749 	for(k=0; k<num_ports; k++) {
1750 		struct port_comm* pc;
1751 		pc = (struct port_comm*)calloc(1, sizeof(*pc));
1752 		if(!pc) {
1753 			log_err("malloc failed");
1754 			outside_network_delete(outnet);
1755 			return NULL;
1756 		}
1757 		pc->cp = comm_point_create_udp(outnet->base, -1,
1758 			outnet->udp_buff, 0, outnet_udp_cb, outnet, NULL);
1759 		if(!pc->cp) {
1760 			log_err("malloc failed");
1761 			free(pc);
1762 			outside_network_delete(outnet);
1763 			return NULL;
1764 		}
1765 		pc->next = outnet->unused_fds;
1766 		outnet->unused_fds = pc;
1767 	}
1768 
1769 	/* allocate interfaces */
1770 	if(num_ifs == 0) {
1771 		if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0",
1772 			availports, numavailports, num_ports)) {
1773 			log_err("malloc failed");
1774 			outside_network_delete(outnet);
1775 			return NULL;
1776 		}
1777 		if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::",
1778 			availports, numavailports, num_ports)) {
1779 			log_err("malloc failed");
1780 			outside_network_delete(outnet);
1781 			return NULL;
1782 		}
1783 	} else {
1784 		size_t done_4 = 0, done_6 = 0;
1785 		int i;
1786 		for(i=0; i<num_ifs; i++) {
1787 			if(str_is_ip6(ifs[i]) && do_ip6) {
1788 				if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i],
1789 					availports, numavailports, num_ports)){
1790 					log_err("malloc failed");
1791 					outside_network_delete(outnet);
1792 					return NULL;
1793 				}
1794 				done_6++;
1795 			}
1796 			if(!str_is_ip6(ifs[i]) && do_ip4) {
1797 				if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i],
1798 					availports, numavailports, num_ports)){
1799 					log_err("malloc failed");
1800 					outside_network_delete(outnet);
1801 					return NULL;
1802 				}
1803 				done_4++;
1804 			}
1805 		}
1806 	}
1807 	return outnet;
1808 }
1809 
1810 /** helper pending delete */
1811 static void
1812 pending_node_del(rbnode_type* node, void* arg)
1813 {
1814 	struct pending* pend = (struct pending*)node;
1815 	struct outside_network* outnet = (struct outside_network*)arg;
1816 	pending_delete(outnet, pend);
1817 }
1818 
1819 /** helper serviced delete */
1820 static void
1821 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg))
1822 {
1823 	struct serviced_query* sq = (struct serviced_query*)node;
1824 	alloc_reg_release(sq->alloc, sq->region);
1825 	if(sq->timer)
1826 		comm_timer_delete(sq->timer);
1827 	free(sq);
1828 }
1829 
1830 void
1831 outside_network_quit_prepare(struct outside_network* outnet)
1832 {
1833 	if(!outnet)
1834 		return;
1835 	/* prevent queued items from being sent */
1836 	outnet->want_to_quit = 1;
1837 }
1838 
1839 void
1840 outside_network_delete(struct outside_network* outnet)
1841 {
1842 	if(!outnet)
1843 		return;
1844 	outnet->want_to_quit = 1;
1845 	/* check every element, since we can be called on malloc error */
1846 	if(outnet->pending) {
1847 		/* free pending elements, but do no unlink from tree. */
1848 		traverse_postorder(outnet->pending, pending_node_del, NULL);
1849 		free(outnet->pending);
1850 	}
1851 	if(outnet->serviced) {
1852 		traverse_postorder(outnet->serviced, serviced_node_del, NULL);
1853 		free(outnet->serviced);
1854 	}
1855 	if(outnet->udp_buff)
1856 		sldns_buffer_free(outnet->udp_buff);
1857 	if(outnet->unused_fds) {
1858 		struct port_comm* p = outnet->unused_fds, *np;
1859 		while(p) {
1860 			np = p->next;
1861 			comm_point_delete(p->cp);
1862 			free(p);
1863 			p = np;
1864 		}
1865 		outnet->unused_fds = NULL;
1866 	}
1867 	if(outnet->ip4_ifs) {
1868 		int i, k;
1869 		for(i=0; i<outnet->num_ip4; i++) {
1870 			for(k=0; k<outnet->ip4_ifs[i].inuse; k++) {
1871 				struct port_comm* pc = outnet->ip4_ifs[i].
1872 					out[k];
1873 				comm_point_delete(pc->cp);
1874 				free(pc);
1875 			}
1876 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1877 			free(outnet->ip4_ifs[i].avail_ports);
1878 #endif
1879 			free(outnet->ip4_ifs[i].out);
1880 		}
1881 		free(outnet->ip4_ifs);
1882 	}
1883 	if(outnet->ip6_ifs) {
1884 		int i, k;
1885 		for(i=0; i<outnet->num_ip6; i++) {
1886 			for(k=0; k<outnet->ip6_ifs[i].inuse; k++) {
1887 				struct port_comm* pc = outnet->ip6_ifs[i].
1888 					out[k];
1889 				comm_point_delete(pc->cp);
1890 				free(pc);
1891 			}
1892 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1893 			free(outnet->ip6_ifs[i].avail_ports);
1894 #endif
1895 			free(outnet->ip6_ifs[i].out);
1896 		}
1897 		free(outnet->ip6_ifs);
1898 	}
1899 	if(outnet->tcp_conns) {
1900 		size_t i;
1901 		for(i=0; i<outnet->num_tcp; i++)
1902 			if(outnet->tcp_conns[i]) {
1903 				struct pending_tcp* pend;
1904 				pend = outnet->tcp_conns[i];
1905 				if(pend->reuse.item_on_lru_list) {
1906 					/* delete waiting_tcp elements that
1907 					 * the tcp conn is working on */
1908 					decommission_pending_tcp(outnet, pend);
1909 				}
1910 				comm_point_delete(outnet->tcp_conns[i]->c);
1911 				free(outnet->tcp_conns[i]);
1912 				outnet->tcp_conns[i] = NULL;
1913 			}
1914 		free(outnet->tcp_conns);
1915 		outnet->tcp_conns = NULL;
1916 	}
1917 	if(outnet->tcp_wait_first) {
1918 		struct waiting_tcp* p = outnet->tcp_wait_first, *np;
1919 		while(p) {
1920 			np = p->next_waiting;
1921 			waiting_tcp_delete(p);
1922 			p = np;
1923 		}
1924 	}
1925 	/* was allocated in struct pending that was deleted above */
1926 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1927 	outnet->tcp_reuse_first = NULL;
1928 	outnet->tcp_reuse_last = NULL;
1929 	if(outnet->udp_wait_first) {
1930 		struct pending* p = outnet->udp_wait_first, *np;
1931 		while(p) {
1932 			np = p->next_waiting;
1933 			pending_delete(NULL, p);
1934 			p = np;
1935 		}
1936 	}
1937 	free(outnet);
1938 }
1939 
1940 void
1941 pending_delete(struct outside_network* outnet, struct pending* p)
1942 {
1943 	if(!p)
1944 		return;
1945 	if(outnet && outnet->udp_wait_first &&
1946 		(p->next_waiting || p == outnet->udp_wait_last) ) {
1947 		/* delete from waiting list, if it is in the waiting list */
1948 		struct pending* prev = NULL, *x = outnet->udp_wait_first;
1949 		while(x && x != p) {
1950 			prev = x;
1951 			x = x->next_waiting;
1952 		}
1953 		if(x) {
1954 			log_assert(x == p);
1955 			if(prev)
1956 				prev->next_waiting = p->next_waiting;
1957 			else	outnet->udp_wait_first = p->next_waiting;
1958 			if(outnet->udp_wait_last == p)
1959 				outnet->udp_wait_last = prev;
1960 		}
1961 	}
1962 	if(outnet) {
1963 		(void)rbtree_delete(outnet->pending, p->node.key);
1964 	}
1965 	if(p->timer)
1966 		comm_timer_delete(p->timer);
1967 	free(p->pkt);
1968 	free(p);
1969 }
1970 
1971 static void
1972 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd)
1973 {
1974 	int i, last;
1975 	if(!(pfxlen > 0 && pfxlen < 128))
1976 		return;
1977 	for(i = 0; i < (128 - pfxlen) / 8; i++) {
1978 		sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256);
1979 	}
1980 	last = pfxlen & 7;
1981 	if(last != 0) {
1982 		sa->sin6_addr.s6_addr[15-i] |=
1983 			((0xFF >> last) & ub_random_max(rnd, 256));
1984 	}
1985 }
1986 
1987 /**
1988  * Try to open a UDP socket for outgoing communication.
1989  * Sets sockets options as needed.
1990  * @param addr: socket address.
1991  * @param addrlen: length of address.
1992  * @param pfxlen: length of network prefix (for address randomisation).
1993  * @param port: port override for addr.
1994  * @param inuse: if -1 is returned, this bool means the port was in use.
1995  * @param rnd: random state (for address randomisation).
1996  * @param dscp: DSCP to use.
1997  * @return fd or -1
1998  */
1999 static int
2000 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen,
2001 	int port, int* inuse, struct ub_randstate* rnd, int dscp)
2002 {
2003 	int fd, noproto;
2004 	if(addr_is_ip6(addr, addrlen)) {
2005 		int freebind = 0;
2006 		struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
2007 		sa.sin6_port = (in_port_t)htons((uint16_t)port);
2008 		sa.sin6_flowinfo = 0;
2009 		sa.sin6_scope_id = 0;
2010 		if(pfxlen != 0) {
2011 			freebind = 1;
2012 			sai6_putrandom(&sa, pfxlen, rnd);
2013 		}
2014 		fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
2015 			(struct sockaddr*)&sa, addrlen, 1, inuse, &noproto,
2016 			0, 0, 0, NULL, 0, freebind, 0, dscp);
2017 	} else {
2018 		struct sockaddr_in* sa = (struct sockaddr_in*)addr;
2019 		sa->sin_port = (in_port_t)htons((uint16_t)port);
2020 		fd = create_udp_sock(AF_INET, SOCK_DGRAM,
2021 			(struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
2022 			0, 0, 0, NULL, 0, 0, 0, dscp);
2023 	}
2024 	return fd;
2025 }
2026 
2027 /** Select random ID */
2028 static int
2029 select_id(struct outside_network* outnet, struct pending* pend,
2030 	sldns_buffer* packet)
2031 {
2032 	int id_tries = 0;
2033 	pend->id = GET_RANDOM_ID(outnet->rnd);
2034 	LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
2035 
2036 	/* insert in tree */
2037 	pend->node.key = pend;
2038 	while(!rbtree_insert(outnet->pending, &pend->node)) {
2039 		/* change ID to avoid collision */
2040 		pend->id = GET_RANDOM_ID(outnet->rnd);
2041 		LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
2042 		id_tries++;
2043 		if(id_tries == MAX_ID_RETRY) {
2044 			pend->id=99999; /* non existent ID */
2045 			log_err("failed to generate unique ID, drop msg");
2046 			return 0;
2047 		}
2048 	}
2049 	verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id);
2050 	return 1;
2051 }
2052 
2053 /** return true is UDP connect error needs to be logged */
2054 static int udp_connect_needs_log(int err)
2055 {
2056 	switch(err) {
2057 	case ECONNREFUSED:
2058 #  ifdef ENETUNREACH
2059 	case ENETUNREACH:
2060 #  endif
2061 #  ifdef EHOSTDOWN
2062 	case EHOSTDOWN:
2063 #  endif
2064 #  ifdef EHOSTUNREACH
2065 	case EHOSTUNREACH:
2066 #  endif
2067 #  ifdef ENETDOWN
2068 	case ENETDOWN:
2069 #  endif
2070 #  ifdef EADDRNOTAVAIL
2071 	case EADDRNOTAVAIL:
2072 #  endif
2073 	case EPERM:
2074 	case EACCES:
2075 		if(verbosity >= VERB_ALGO)
2076 			return 1;
2077 		return 0;
2078 	default:
2079 		break;
2080 	}
2081 	return 1;
2082 }
2083 
2084 
2085 /** Select random interface and port */
2086 static int
2087 select_ifport(struct outside_network* outnet, struct pending* pend,
2088 	int num_if, struct port_if* ifs)
2089 {
2090 	int my_if, my_port, fd, portno, inuse, tries=0;
2091 	struct port_if* pif;
2092 	/* randomly select interface and port */
2093 	if(num_if == 0) {
2094 		verbose(VERB_QUERY, "Need to send query but have no "
2095 			"outgoing interfaces of that family");
2096 		return 0;
2097 	}
2098 	log_assert(outnet->unused_fds);
2099 	tries = 0;
2100 	while(1) {
2101 		my_if = ub_random_max(outnet->rnd, num_if);
2102 		pif = &ifs[my_if];
2103 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2104 		if(outnet->udp_connect) {
2105 			/* if we connect() we cannot reuse fds for a port */
2106 			if(pif->inuse >= pif->avail_total) {
2107 				tries++;
2108 				if(tries < MAX_PORT_RETRY)
2109 					continue;
2110 				log_err("failed to find an open port, drop msg");
2111 				return 0;
2112 			}
2113 			my_port = pif->inuse + ub_random_max(outnet->rnd,
2114 				pif->avail_total - pif->inuse);
2115 		} else  {
2116 			my_port = ub_random_max(outnet->rnd, pif->avail_total);
2117 			if(my_port < pif->inuse) {
2118 				/* port already open */
2119 				pend->pc = pif->out[my_port];
2120 				verbose(VERB_ALGO, "using UDP if=%d port=%d",
2121 					my_if, pend->pc->number);
2122 				break;
2123 			}
2124 		}
2125 		/* try to open new port, if fails, loop to try again */
2126 		log_assert(pif->inuse < pif->maxout);
2127 		portno = pif->avail_ports[my_port - pif->inuse];
2128 #else
2129 		my_port = portno = 0;
2130 #endif
2131 		fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen,
2132 			portno, &inuse, outnet->rnd, outnet->ip_dscp);
2133 		if(fd == -1 && !inuse) {
2134 			/* nonrecoverable error making socket */
2135 			return 0;
2136 		}
2137 		if(fd != -1) {
2138 			verbose(VERB_ALGO, "opened UDP if=%d port=%d",
2139 				my_if, portno);
2140 			if(outnet->udp_connect) {
2141 				/* connect() to the destination */
2142 				if(connect(fd, (struct sockaddr*)&pend->addr,
2143 					pend->addrlen) < 0) {
2144 					if(udp_connect_needs_log(errno)) {
2145 						log_err_addr("udp connect failed",
2146 							strerror(errno), &pend->addr,
2147 							pend->addrlen);
2148 					}
2149 					sock_close(fd);
2150 					return 0;
2151 				}
2152 			}
2153 			/* grab fd */
2154 			pend->pc = outnet->unused_fds;
2155 			outnet->unused_fds = pend->pc->next;
2156 
2157 			/* setup portcomm */
2158 			pend->pc->next = NULL;
2159 			pend->pc->number = portno;
2160 			pend->pc->pif = pif;
2161 			pend->pc->index = pif->inuse;
2162 			pend->pc->num_outstanding = 0;
2163 			comm_point_start_listening(pend->pc->cp, fd, -1);
2164 
2165 			/* grab port in interface */
2166 			pif->out[pif->inuse] = pend->pc;
2167 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2168 			pif->avail_ports[my_port - pif->inuse] =
2169 				pif->avail_ports[pif->avail_total-pif->inuse-1];
2170 #endif
2171 			pif->inuse++;
2172 			break;
2173 		}
2174 		/* failed, already in use */
2175 		verbose(VERB_QUERY, "port %d in use, trying another", portno);
2176 		tries++;
2177 		if(tries == MAX_PORT_RETRY) {
2178 			log_err("failed to find an open port, drop msg");
2179 			return 0;
2180 		}
2181 	}
2182 	log_assert(pend->pc);
2183 	pend->pc->num_outstanding++;
2184 
2185 	return 1;
2186 }
2187 
2188 static int
2189 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
2190 {
2191 	struct timeval tv;
2192 	struct outside_network* outnet = pend->sq->outnet;
2193 
2194 	/* select id */
2195 	if(!select_id(outnet, pend, packet)) {
2196 		return 0;
2197 	}
2198 
2199 	/* select src_if, port */
2200 	if(addr_is_ip6(&pend->addr, pend->addrlen)) {
2201 		if(!select_ifport(outnet, pend,
2202 			outnet->num_ip6, outnet->ip6_ifs))
2203 			return 0;
2204 	} else {
2205 		if(!select_ifport(outnet, pend,
2206 			outnet->num_ip4, outnet->ip4_ifs))
2207 			return 0;
2208 	}
2209 	log_assert(pend->pc && pend->pc->cp);
2210 
2211 	/* send it over the commlink */
2212 	if(!comm_point_send_udp_msg(pend->pc->cp, packet,
2213 		(struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) {
2214 		portcomm_loweruse(outnet, pend->pc);
2215 		return 0;
2216 	}
2217 	outnet->num_udp_outgoing++;
2218 
2219 	/* system calls to set timeout after sending UDP to make roundtrip
2220 	   smaller. */
2221 #ifndef S_SPLINT_S
2222 	tv.tv_sec = timeout/1000;
2223 	tv.tv_usec = (timeout%1000)*1000;
2224 #endif
2225 	comm_timer_set(pend->timer, &tv);
2226 
2227 #ifdef USE_DNSTAP
2228 	/*
2229 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
2230 	 * There are no chances to get the src (local service) addr if unbound
2231 	 * is not configured with specific outgoing IP-addresses. So we will
2232 	 * pass 0.0.0.0 (::) to argument for
2233 	 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls.
2234 	 */
2235 	if(outnet->dtenv &&
2236 	   (outnet->dtenv->log_resolver_query_messages ||
2237 		outnet->dtenv->log_forwarder_query_messages)) {
2238 			log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen);
2239 			log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen);
2240 			dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp, NULL,
2241 				pend->sq->zone, pend->sq->zonelen, packet);
2242 	}
2243 #endif
2244 	return 1;
2245 }
2246 
2247 struct pending*
2248 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
2249 	int timeout, comm_point_callback_type* cb, void* cb_arg)
2250 {
2251 	struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
2252 	if(!pend) return NULL;
2253 	pend->outnet = sq->outnet;
2254 	pend->sq = sq;
2255 	pend->addrlen = sq->addrlen;
2256 	memmove(&pend->addr, &sq->addr, sq->addrlen);
2257 	pend->cb = cb;
2258 	pend->cb_arg = cb_arg;
2259 	pend->node.key = pend;
2260 	pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb,
2261 		pend);
2262 	if(!pend->timer) {
2263 		free(pend);
2264 		return NULL;
2265 	}
2266 
2267 	if(sq->outnet->unused_fds == NULL) {
2268 		/* no unused fd, cannot create a new port (randomly) */
2269 		verbose(VERB_ALGO, "no fds available, udp query waiting");
2270 		pend->timeout = timeout;
2271 		pend->pkt_len = sldns_buffer_limit(packet);
2272 		pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet),
2273 			pend->pkt_len);
2274 		if(!pend->pkt) {
2275 			comm_timer_delete(pend->timer);
2276 			free(pend);
2277 			return NULL;
2278 		}
2279 		/* put at end of waiting list */
2280 		if(sq->outnet->udp_wait_last)
2281 			sq->outnet->udp_wait_last->next_waiting = pend;
2282 		else
2283 			sq->outnet->udp_wait_first = pend;
2284 		sq->outnet->udp_wait_last = pend;
2285 		return pend;
2286 	}
2287 	log_assert(!sq->busy);
2288 	sq->busy = 1;
2289 	if(!randomize_and_send_udp(pend, packet, timeout)) {
2290 		pending_delete(sq->outnet, pend);
2291 		return NULL;
2292 	}
2293 	sq->busy = 0;
2294 	return pend;
2295 }
2296 
2297 void
2298 outnet_tcptimer(void* arg)
2299 {
2300 	struct waiting_tcp* w = (struct waiting_tcp*)arg;
2301 	struct outside_network* outnet = w->outnet;
2302 	verbose(VERB_CLIENT, "outnet_tcptimer");
2303 	if(w->on_tcp_waiting_list) {
2304 		/* it is on the waiting list */
2305 		outnet_waiting_tcp_list_remove(outnet, w);
2306 		waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL);
2307 		waiting_tcp_delete(w);
2308 	} else {
2309 		/* it was in use */
2310 		struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting;
2311 		reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT);
2312 	}
2313 	use_free_buffer(outnet);
2314 }
2315 
2316 /** close the oldest reuse_tcp connection to make a fd and struct pend
2317  * available for a new stream connection */
2318 static void
2319 reuse_tcp_close_oldest(struct outside_network* outnet)
2320 {
2321 	struct reuse_tcp* reuse;
2322 	verbose(VERB_CLIENT, "reuse_tcp_close_oldest");
2323 	reuse = reuse_tcp_lru_snip(outnet);
2324 	if(!reuse) return;
2325 	/* free up */
2326 	reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED);
2327 }
2328 
2329 static uint16_t
2330 tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse)
2331 {
2332 	if(reuse)
2333 		return reuse_tcp_select_id(reuse, outnet);
2334 	return GET_RANDOM_ID(outnet->rnd);
2335 }
2336 
2337 /** find spare ID value for reuse tcp stream.  That is random and also does
2338  * not collide with an existing query ID that is in use or waiting */
2339 uint16_t
2340 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet)
2341 {
2342 	uint16_t id = 0, curid, nextid;
2343 	const int try_random = 2000;
2344 	int i;
2345 	unsigned select, count, space;
2346 	rbnode_type* node;
2347 
2348 	/* make really sure the tree is not empty */
2349 	if(reuse->tree_by_id.count == 0) {
2350 		id = GET_RANDOM_ID(outnet->rnd);
2351 		return id;
2352 	}
2353 
2354 	/* try to find random empty spots by picking them */
2355 	for(i = 0; i<try_random; i++) {
2356 		id = GET_RANDOM_ID(outnet->rnd);
2357 		if(!reuse_tcp_by_id_find(reuse, id)) {
2358 			return id;
2359 		}
2360 	}
2361 
2362 	/* equally pick a random unused element from the tree that is
2363 	 * not in use.  Pick a the n-th index of an unused number,
2364 	 * then loop over the empty spaces in the tree and find it */
2365 	log_assert(reuse->tree_by_id.count < 0xffff);
2366 	select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count);
2367 	/* select value now in 0 .. num free - 1 */
2368 
2369 	count = 0; /* number of free spaces passed by */
2370 	node = rbtree_first(&reuse->tree_by_id);
2371 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2372 	/* see if select is before first node */
2373 	if(select < (unsigned)tree_by_id_get_id(node))
2374 		return select;
2375 	count += tree_by_id_get_id(node);
2376 	/* perhaps select is between nodes */
2377 	while(node && node != RBTREE_NULL) {
2378 		rbnode_type* next = rbtree_next(node);
2379 		if(next && next != RBTREE_NULL) {
2380 			curid = tree_by_id_get_id(node);
2381 			nextid = tree_by_id_get_id(next);
2382 			log_assert(curid < nextid);
2383 			if(curid != 0xffff && curid + 1 < nextid) {
2384 				/* space between nodes */
2385 				space = nextid - curid - 1;
2386 				log_assert(select >= count);
2387 				if(select < count + space) {
2388 					/* here it is */
2389 					return curid + 1 + (select - count);
2390 				}
2391 				count += space;
2392 			}
2393 		}
2394 		node = next;
2395 	}
2396 
2397 	/* select is after the last node */
2398 	/* count is the number of free positions before the nodes in the
2399 	 * tree */
2400 	node = rbtree_last(&reuse->tree_by_id);
2401 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2402 	curid = tree_by_id_get_id(node);
2403 	log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff);
2404 	return curid + 1 + (select - count);
2405 }
2406 
2407 struct waiting_tcp*
2408 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
2409 	int timeout, comm_point_callback_type* callback, void* callback_arg)
2410 {
2411 	struct pending_tcp* pend = sq->outnet->tcp_free;
2412 	struct reuse_tcp* reuse = NULL;
2413 	struct waiting_tcp* w;
2414 
2415 	verbose(VERB_CLIENT, "pending_tcp_query");
2416 	if(sldns_buffer_limit(packet) < sizeof(uint16_t)) {
2417 		verbose(VERB_ALGO, "pending tcp query with too short buffer < 2");
2418 		return NULL;
2419 	}
2420 
2421 	/* find out if a reused stream to the target exists */
2422 	/* if so, take it into use */
2423 	reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen,
2424 		sq->ssl_upstream);
2425 	if(reuse) {
2426 		log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse);
2427 		log_assert(reuse->pending);
2428 		pend = reuse->pending;
2429 		reuse_tcp_lru_touch(sq->outnet, reuse);
2430 	}
2431 
2432 	log_assert(!reuse || (reuse && pend));
2433 	/* if !pend but we have reuse streams, close a reuse stream
2434 	 * to be able to open a new one to this target, no use waiting
2435 	 * to reuse a file descriptor while another query needs to use
2436 	 * that buffer and file descriptor now. */
2437 	if(!pend) {
2438 		reuse_tcp_close_oldest(sq->outnet);
2439 		pend = sq->outnet->tcp_free;
2440 		log_assert(!reuse || (pend == reuse->pending));
2441 	}
2442 
2443 	/* allocate space to store query */
2444 	w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp)
2445 		+ sldns_buffer_limit(packet));
2446 	if(!w) {
2447 		return NULL;
2448 	}
2449 	if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) {
2450 		free(w);
2451 		return NULL;
2452 	}
2453 	w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
2454 	w->pkt_len = sldns_buffer_limit(packet);
2455 	memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len);
2456 	w->id = tcp_select_id(sq->outnet, reuse);
2457 	LDNS_ID_SET(w->pkt, w->id);
2458 	memcpy(&w->addr, &sq->addr, sq->addrlen);
2459 	w->addrlen = sq->addrlen;
2460 	w->outnet = sq->outnet;
2461 	w->on_tcp_waiting_list = 0;
2462 	w->next_waiting = NULL;
2463 	w->cb = callback;
2464 	w->cb_arg = callback_arg;
2465 	w->ssl_upstream = sq->ssl_upstream;
2466 	w->tls_auth_name = sq->tls_auth_name;
2467 	w->timeout = timeout;
2468 	w->id_node.key = NULL;
2469 	w->write_wait_prev = NULL;
2470 	w->write_wait_next = NULL;
2471 	w->write_wait_queued = 0;
2472 	w->error_count = 0;
2473 #ifdef USE_DNSTAP
2474 	w->sq = NULL;
2475 #endif
2476 	w->in_cb_and_decommission = 0;
2477 	if(pend) {
2478 		/* we have a buffer available right now */
2479 		if(reuse) {
2480 			log_assert(reuse == &pend->reuse);
2481 			/* reuse existing fd, write query and continue */
2482 			/* store query in tree by id */
2483 			verbose(VERB_CLIENT, "pending_tcp_query: reuse, store");
2484 			w->next_waiting = (void*)pend;
2485 			reuse_tree_by_id_insert(&pend->reuse, w);
2486 			/* can we write right now? */
2487 			if(pend->query == NULL) {
2488 				/* write straight away */
2489 				/* stop the timer on read of the fd */
2490 				comm_point_stop_listening(pend->c);
2491 				pend->query = w;
2492 				outnet_tcp_take_query_setup(pend->c->fd, pend,
2493 					w);
2494 			} else {
2495 				/* put it in the waiting list for
2496 				 * this stream */
2497 				reuse_write_wait_push_back(&pend->reuse, w);
2498 			}
2499 		} else {
2500 			/* create new fd and connect to addr, setup to
2501 			 * write query */
2502 			verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect");
2503 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
2504 			pend->reuse.pending = pend;
2505 			memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen);
2506 			pend->reuse.addrlen = sq->addrlen;
2507 			if(!outnet_tcp_take_into_use(w)) {
2508 				waiting_tcp_delete(w);
2509 				return NULL;
2510 			}
2511 		}
2512 #ifdef USE_DNSTAP
2513 		if(sq->outnet->dtenv &&
2514 		   (sq->outnet->dtenv->log_resolver_query_messages ||
2515 		    sq->outnet->dtenv->log_forwarder_query_messages)) {
2516 			/* use w->pkt, because it has the ID value */
2517 			sldns_buffer tmp;
2518 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
2519 			dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr,
2520 				&pend->pi->addr, comm_tcp, NULL, sq->zone,
2521 				sq->zonelen, &tmp);
2522 		}
2523 #endif
2524 	} else {
2525 		/* queue up */
2526 		/* waiting for a buffer on the outside network buffer wait
2527 		 * list */
2528 		verbose(VERB_CLIENT, "pending_tcp_query: queue to wait");
2529 #ifdef USE_DNSTAP
2530 		w->sq = sq;
2531 #endif
2532 		outnet_waiting_tcp_list_add(sq->outnet, w, 1);
2533 	}
2534 	return w;
2535 }
2536 
2537 /** create query for serviced queries */
2538 static void
2539 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen,
2540 	uint16_t qtype, uint16_t qclass, uint16_t flags)
2541 {
2542 	sldns_buffer_clear(buff);
2543 	/* skip id */
2544 	sldns_buffer_write_u16(buff, flags);
2545 	sldns_buffer_write_u16(buff, 1); /* qdcount */
2546 	sldns_buffer_write_u16(buff, 0); /* ancount */
2547 	sldns_buffer_write_u16(buff, 0); /* nscount */
2548 	sldns_buffer_write_u16(buff, 0); /* arcount */
2549 	sldns_buffer_write(buff, qname, qnamelen);
2550 	sldns_buffer_write_u16(buff, qtype);
2551 	sldns_buffer_write_u16(buff, qclass);
2552 	sldns_buffer_flip(buff);
2553 }
2554 
2555 /** lookup serviced query in serviced query rbtree */
2556 static struct serviced_query*
2557 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2558 	struct sockaddr_storage* addr, socklen_t addrlen,
2559 	struct edns_option* opt_list)
2560 {
2561 	struct serviced_query key;
2562 	key.node.key = &key;
2563 	key.qbuf = sldns_buffer_begin(buff);
2564 	key.qbuflen = sldns_buffer_limit(buff);
2565 	key.dnssec = dnssec;
2566 	memcpy(&key.addr, addr, addrlen);
2567 	key.addrlen = addrlen;
2568 	key.outnet = outnet;
2569 	key.opt_list = opt_list;
2570 	return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
2571 }
2572 
2573 void
2574 serviced_timer_cb(void* arg)
2575 {
2576 	struct serviced_query* sq = (struct serviced_query*)arg;
2577 	struct outside_network* outnet = sq->outnet;
2578 	verbose(VERB_ALGO, "serviced send timer");
2579 	/* By the time this cb is called, if we don't have any registered
2580 	 * callbacks for this serviced_query anymore; do not send. */
2581 	if(!sq->cblist)
2582 		goto delete;
2583 	/* perform first network action */
2584 	if(outnet->do_udp && !(sq->tcp_upstream || sq->ssl_upstream)) {
2585 		if(!serviced_udp_send(sq, outnet->udp_buff))
2586 			goto delete;
2587 	} else {
2588 		if(!serviced_tcp_send(sq, outnet->udp_buff))
2589 			goto delete;
2590 	}
2591 	/* Maybe by this time we don't have callbacks attached anymore. Don't
2592 	 * proactively try to delete; let it run and maybe another callback
2593 	 * will get attached by the time we get an answer. */
2594 	return;
2595 delete:
2596 	serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
2597 }
2598 
2599 /** Create new serviced entry */
2600 static struct serviced_query*
2601 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2602 	int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
2603 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
2604 	uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list,
2605 	size_t pad_queries_block_size, struct alloc_cache* alloc,
2606 	struct regional* region)
2607 {
2608 	struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
2609 	struct timeval t;
2610 #ifdef UNBOUND_DEBUG
2611 	rbnode_type* ins;
2612 #endif
2613 	if(!sq) {
2614 		alloc_reg_release(alloc, region);
2615 		return NULL;
2616 	}
2617 	sq->node.key = sq;
2618 	sq->alloc = alloc;
2619 	sq->region = region;
2620 	sq->qbuf = regional_alloc_init(region, sldns_buffer_begin(buff),
2621 		sldns_buffer_limit(buff));
2622 	if(!sq->qbuf) {
2623 		alloc_reg_release(alloc, region);
2624 		free(sq);
2625 		return NULL;
2626 	}
2627 	sq->qbuflen = sldns_buffer_limit(buff);
2628 	sq->zone = regional_alloc_init(region, zone, zonelen);
2629 	if(!sq->zone) {
2630 		alloc_reg_release(alloc, region);
2631 		free(sq);
2632 		return NULL;
2633 	}
2634 	sq->zonelen = zonelen;
2635 	sq->qtype = qtype;
2636 	sq->dnssec = dnssec;
2637 	sq->want_dnssec = want_dnssec;
2638 	sq->nocaps = nocaps;
2639 	sq->tcp_upstream = tcp_upstream;
2640 	sq->ssl_upstream = ssl_upstream;
2641 	if(tls_auth_name) {
2642 		sq->tls_auth_name = regional_strdup(region, tls_auth_name);
2643 		if(!sq->tls_auth_name) {
2644 			alloc_reg_release(alloc, region);
2645 			free(sq);
2646 			return NULL;
2647 		}
2648 	} else {
2649 		sq->tls_auth_name = NULL;
2650 	}
2651 	memcpy(&sq->addr, addr, addrlen);
2652 	sq->addrlen = addrlen;
2653 	sq->opt_list = opt_list;
2654 	sq->busy = 0;
2655 	sq->timer = comm_timer_create(outnet->base, serviced_timer_cb, sq);
2656 	if(!sq->timer) {
2657 		alloc_reg_release(alloc, region);
2658 		free(sq);
2659 		return NULL;
2660 	}
2661 	memset(&t, 0, sizeof(t));
2662 	comm_timer_set(sq->timer, &t);
2663 	sq->outnet = outnet;
2664 	sq->cblist = NULL;
2665 	sq->pending = NULL;
2666 	sq->status = serviced_initial;
2667 	sq->retry = 0;
2668 	sq->to_be_deleted = 0;
2669 	sq->padding_block_size = pad_queries_block_size;
2670 #ifdef UNBOUND_DEBUG
2671 	ins =
2672 #else
2673 	(void)
2674 #endif
2675 	rbtree_insert(outnet->serviced, &sq->node);
2676 	log_assert(ins != NULL); /* must not be already present */
2677 	return sq;
2678 }
2679 
2680 /** reuse tcp stream, remove serviced query from stream,
2681  * return true if the stream is kept, false if it is to be closed */
2682 static int
2683 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w,
2684 	struct serviced_query* sq)
2685 {
2686 	struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting;
2687 	verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep");
2688 	/* remove the callback. let query continue to write to not cancel
2689 	 * the stream itself.  also keep it as an entry in the tree_by_id,
2690 	 * in case the answer returns (that we no longer want), but we cannot
2691 	 * pick the same ID number meanwhile */
2692 	w->cb = NULL;
2693 	/* see if can be entered in reuse tree
2694 	 * for that the FD has to be non-1 */
2695 	if(pend_tcp->c->fd == -1) {
2696 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd");
2697 		return 0;
2698 	}
2699 	/* if in tree and used by other queries */
2700 	if(pend_tcp->reuse.node.key) {
2701 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries");
2702 		/* do not reset the keepalive timer, for that
2703 		 * we'd need traffic, and this is where the serviced is
2704 		 * removed due to state machine internal reasons,
2705 		 * eg. iterator no longer interested in this query */
2706 		return 1;
2707 	}
2708 	/* if still open and want to keep it open */
2709 	if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count <
2710 		sq->outnet->tcp_reuse_max) {
2711 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open");
2712 		/* set a keepalive timer on it */
2713 		if(!reuse_tcp_insert(sq->outnet, pend_tcp)) {
2714 			return 0;
2715 		}
2716 		reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout);
2717 		return 1;
2718 	}
2719 	return 0;
2720 }
2721 
2722 /** cleanup serviced query entry */
2723 static void
2724 serviced_delete(struct serviced_query* sq)
2725 {
2726 	verbose(VERB_CLIENT, "serviced_delete");
2727 	if(sq->pending) {
2728 		/* clear up the pending query */
2729 		if(sq->status == serviced_query_UDP_EDNS ||
2730 			sq->status == serviced_query_UDP ||
2731 			sq->status == serviced_query_UDP_EDNS_FRAG ||
2732 			sq->status == serviced_query_UDP_EDNS_fallback) {
2733 			struct pending* p = (struct pending*)sq->pending;
2734 			verbose(VERB_CLIENT, "serviced_delete: UDP");
2735 			if(p->pc)
2736 				portcomm_loweruse(sq->outnet, p->pc);
2737 			pending_delete(sq->outnet, p);
2738 			/* this call can cause reentrant calls back into the
2739 			 * mesh */
2740 			outnet_send_wait_udp(sq->outnet);
2741 		} else {
2742 			struct waiting_tcp* w = (struct waiting_tcp*)
2743 				sq->pending;
2744 			verbose(VERB_CLIENT, "serviced_delete: TCP");
2745 			log_assert(!(w->write_wait_queued && w->on_tcp_waiting_list));
2746 			/* if on stream-write-waiting list then
2747 			 * remove from waiting list and waiting_tcp_delete */
2748 			if(w->write_wait_queued) {
2749 				struct pending_tcp* pend =
2750 					(struct pending_tcp*)w->next_waiting;
2751 				verbose(VERB_CLIENT, "serviced_delete: writewait");
2752 				if(!w->in_cb_and_decommission)
2753 					reuse_tree_by_id_delete(&pend->reuse, w);
2754 				reuse_write_wait_remove(&pend->reuse, w);
2755 				if(!w->in_cb_and_decommission)
2756 					waiting_tcp_delete(w);
2757 			} else if(!w->on_tcp_waiting_list) {
2758 				struct pending_tcp* pend =
2759 					(struct pending_tcp*)w->next_waiting;
2760 				verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep");
2761 				/* w needs to stay on tree_by_id to not assign
2762 				 * the same ID; remove the callback since its
2763 				 * serviced_query will be gone. */
2764 				w->cb = NULL;
2765 				if(!reuse_tcp_remove_serviced_keep(w, sq)) {
2766 					if(!w->in_cb_and_decommission)
2767 						reuse_cb_and_decommission(sq->outnet,
2768 							pend, NETEVENT_CLOSED);
2769 					use_free_buffer(sq->outnet);
2770 				}
2771 				sq->pending = NULL;
2772 			} else {
2773 				verbose(VERB_CLIENT, "serviced_delete: tcpwait");
2774 				outnet_waiting_tcp_list_remove(sq->outnet, w);
2775 				if(!w->in_cb_and_decommission)
2776 					waiting_tcp_delete(w);
2777 			}
2778 		}
2779 	}
2780 	/* does not delete from tree, caller has to do that */
2781 	serviced_node_del(&sq->node, NULL);
2782 }
2783 
2784 /** perturb a dname capitalization randomly */
2785 static void
2786 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len)
2787 {
2788 	uint8_t lablen;
2789 	uint8_t* d = qbuf + 10;
2790 	long int random = 0;
2791 	int bits = 0;
2792 	log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */);
2793 	(void)len;
2794 	lablen = *d++;
2795 	while(lablen) {
2796 		while(lablen--) {
2797 			/* only perturb A-Z, a-z */
2798 			if(isalpha((unsigned char)*d)) {
2799 				/* get a random bit */
2800 				if(bits == 0) {
2801 					random = ub_random(rnd);
2802 					bits = 30;
2803 				}
2804 				if(random & 0x1) {
2805 					*d = (uint8_t)toupper((unsigned char)*d);
2806 				} else {
2807 					*d = (uint8_t)tolower((unsigned char)*d);
2808 				}
2809 				random >>= 1;
2810 				bits--;
2811 			}
2812 			d++;
2813 		}
2814 		lablen = *d++;
2815 	}
2816 	if(verbosity >= VERB_ALGO) {
2817 		char buf[LDNS_MAX_DOMAINLEN+1];
2818 		dname_str(qbuf+10, buf);
2819 		verbose(VERB_ALGO, "qname perturbed to %s", buf);
2820 	}
2821 }
2822 
2823 static uint16_t
2824 serviced_query_udp_size(struct serviced_query* sq, enum serviced_query_status status) {
2825 	uint16_t udp_size;
2826 	if(status == serviced_query_UDP_EDNS_FRAG) {
2827 		if(addr_is_ip6(&sq->addr, sq->addrlen)) {
2828 			if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE)
2829 				udp_size = EDNS_FRAG_SIZE_IP6;
2830 			else	udp_size = EDNS_ADVERTISED_SIZE;
2831 		} else {
2832 			if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE)
2833 				udp_size = EDNS_FRAG_SIZE_IP4;
2834 			else	udp_size = EDNS_ADVERTISED_SIZE;
2835 		}
2836 	} else {
2837 		udp_size = EDNS_ADVERTISED_SIZE;
2838 	}
2839 	return udp_size;
2840 }
2841 
2842 /** put serviced query into a buffer */
2843 static void
2844 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns)
2845 {
2846 	/* if we are using 0x20 bits for ID randomness, perturb them */
2847 	if(sq->outnet->use_caps_for_id && !sq->nocaps) {
2848 		serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen);
2849 	}
2850 	/* generate query */
2851 	sldns_buffer_clear(buff);
2852 	sldns_buffer_write_u16(buff, 0); /* id placeholder */
2853 	sldns_buffer_write(buff, sq->qbuf, sq->qbuflen);
2854 	sldns_buffer_flip(buff);
2855 	if(with_edns) {
2856 		/* add edns section */
2857 		struct edns_data edns;
2858 		struct edns_option padding_option;
2859 		edns.edns_present = 1;
2860 		edns.ext_rcode = 0;
2861 		edns.edns_version = EDNS_ADVERTISED_VERSION;
2862 		edns.opt_list_in = NULL;
2863 		edns.opt_list_out = sq->opt_list;
2864 		edns.opt_list_inplace_cb_out = NULL;
2865 		edns.udp_size = serviced_query_udp_size(sq, sq->status);
2866 		edns.bits = 0;
2867 		if(sq->dnssec & EDNS_DO)
2868 			edns.bits = EDNS_DO;
2869 		if(sq->dnssec & BIT_CD)
2870 			LDNS_CD_SET(sldns_buffer_begin(buff));
2871 		if (sq->ssl_upstream && sq->padding_block_size) {
2872 			padding_option.opt_code = LDNS_EDNS_PADDING;
2873 			padding_option.opt_len = 0;
2874 			padding_option.opt_data = NULL;
2875 			padding_option.next = edns.opt_list_out;
2876 			edns.opt_list_out = &padding_option;
2877 			edns.padding_block_size = sq->padding_block_size;
2878 		}
2879 		attach_edns_record(buff, &edns);
2880 	}
2881 }
2882 
2883 /**
2884  * Perform serviced query UDP sending operation.
2885  * Sends UDP with EDNS, unless infra host marked non EDNS.
2886  * @param sq: query to send.
2887  * @param buff: buffer scratch space.
2888  * @return 0 on error.
2889  */
2890 static int
2891 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff)
2892 {
2893 	int rtt, vs;
2894 	uint8_t edns_lame_known;
2895 	time_t now = *sq->outnet->now_secs;
2896 
2897 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
2898 		sq->zonelen, now, &vs, &edns_lame_known, &rtt))
2899 		return 0;
2900 	sq->last_rtt = rtt;
2901 	verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs);
2902 	if(sq->status == serviced_initial) {
2903 		if(vs != -1) {
2904 			sq->status = serviced_query_UDP_EDNS;
2905 		} else {
2906 			sq->status = serviced_query_UDP;
2907 		}
2908 	}
2909 	serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) ||
2910 		(sq->status == serviced_query_UDP_EDNS_FRAG));
2911 	sq->last_sent_time = *sq->outnet->now_tv;
2912 	sq->edns_lame_known = (int)edns_lame_known;
2913 	verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt);
2914 	sq->pending = pending_udp_query(sq, buff, rtt,
2915 		serviced_udp_callback, sq);
2916 	if(!sq->pending)
2917 		return 0;
2918 	return 1;
2919 }
2920 
2921 /** check that perturbed qname is identical */
2922 static int
2923 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen)
2924 {
2925 	uint8_t* d1 = sldns_buffer_begin(pkt)+12;
2926 	uint8_t* d2 = qbuf+10;
2927 	uint8_t len1, len2;
2928 	int count = 0;
2929 	if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */
2930 		return 0;
2931 	log_assert(qbuflen >= 15 /* 10 header, root, type, class */);
2932 	len1 = *d1++;
2933 	len2 = *d2++;
2934 	while(len1 != 0 || len2 != 0) {
2935 		if(LABEL_IS_PTR(len1)) {
2936 			/* check if we can read *d1 with compression ptr rest */
2937 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2938 				return 0;
2939 			d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1);
2940 			/* check if we can read the destination *d1 */
2941 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2942 				return 0;
2943 			len1 = *d1++;
2944 			if(count++ > MAX_COMPRESS_PTRS)
2945 				return 0;
2946 			continue;
2947 		}
2948 		if(d2 > qbuf+qbuflen)
2949 			return 0;
2950 		if(len1 != len2)
2951 			return 0;
2952 		if(len1 > LDNS_MAX_LABELLEN)
2953 			return 0;
2954 		/* check len1 + 1(next length) are okay to read */
2955 		if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2956 			return 0;
2957 		log_assert(len1 <= LDNS_MAX_LABELLEN);
2958 		log_assert(len2 <= LDNS_MAX_LABELLEN);
2959 		log_assert(len1 == len2 && len1 != 0);
2960 		/* compare the labels - bitwise identical */
2961 		if(memcmp(d1, d2, len1) != 0)
2962 			return 0;
2963 		d1 += len1;
2964 		d2 += len2;
2965 		len1 = *d1++;
2966 		len2 = *d2++;
2967 	}
2968 	return 1;
2969 }
2970 
2971 /** call the callbacks for a serviced query */
2972 static void
2973 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
2974 	struct comm_reply* rep)
2975 {
2976 	struct service_callback* p;
2977 	int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/
2978 	uint8_t *backup_p = NULL;
2979 	size_t backlen = 0;
2980 #ifdef UNBOUND_DEBUG
2981 	rbnode_type* rem =
2982 #else
2983 	(void)
2984 #endif
2985 	/* remove from tree, and schedule for deletion, so that callbacks
2986 	 * can safely deregister themselves and even create new serviced
2987 	 * queries that are identical to this one. */
2988 	rbtree_delete(sq->outnet->serviced, sq);
2989 	log_assert(rem); /* should have been present */
2990 	sq->to_be_deleted = 1;
2991 	verbose(VERB_ALGO, "svcd callbacks start");
2992 	if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c &&
2993 		!sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
2994 		/* for type PTR do not check perturbed name in answer,
2995 		 * compatibility with cisco dns guard boxes that mess up
2996 		 * reverse queries 0x20 contents */
2997 		/* noerror and nxdomain must have a qname in reply */
2998 		if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
2999 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3000 				== LDNS_RCODE_NOERROR ||
3001 			 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3002 				== LDNS_RCODE_NXDOMAIN)) {
3003 			verbose(VERB_DETAIL, "no qname in reply to check 0x20ID");
3004 			log_addr(VERB_DETAIL, "from server",
3005 				&sq->addr, sq->addrlen);
3006 			log_buf(VERB_DETAIL, "for packet", c->buffer);
3007 			error = NETEVENT_CLOSED;
3008 			c = NULL;
3009 		} else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 &&
3010 			!serviced_check_qname(c->buffer, sq->qbuf,
3011 			sq->qbuflen)) {
3012 			verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname");
3013 			log_addr(VERB_DETAIL, "from server",
3014 				&sq->addr, sq->addrlen);
3015 			log_buf(VERB_DETAIL, "for packet", c->buffer);
3016 			error = NETEVENT_CAPSFAIL;
3017 			/* and cleanup too */
3018 			pkt_dname_tolower(c->buffer,
3019 				sldns_buffer_at(c->buffer, 12));
3020 		} else {
3021 			verbose(VERB_ALGO, "good 0x20-ID in reply qname");
3022 			/* cleanup caps, prettier cache contents. */
3023 			pkt_dname_tolower(c->buffer,
3024 				sldns_buffer_at(c->buffer, 12));
3025 		}
3026 	}
3027 	if(dobackup && c) {
3028 		/* make a backup of the query, since the querystate processing
3029 		 * may send outgoing queries that overwrite the buffer.
3030 		 * use secondary buffer to store the query.
3031 		 * This is a data copy, but faster than packet to server */
3032 		backlen = sldns_buffer_limit(c->buffer);
3033 		backup_p = regional_alloc_init(sq->region,
3034 			sldns_buffer_begin(c->buffer), backlen);
3035 		if(!backup_p) {
3036 			log_err("malloc failure in serviced query callbacks");
3037 			error = NETEVENT_CLOSED;
3038 			c = NULL;
3039 		}
3040 		sq->outnet->svcd_overhead = backlen;
3041 	}
3042 	/* test the actual sq->cblist, because the next elem could be deleted*/
3043 	while((p=sq->cblist) != NULL) {
3044 		sq->cblist = p->next; /* remove this element */
3045 		if(dobackup && c) {
3046 			sldns_buffer_clear(c->buffer);
3047 			sldns_buffer_write(c->buffer, backup_p, backlen);
3048 			sldns_buffer_flip(c->buffer);
3049 		}
3050 		fptr_ok(fptr_whitelist_serviced_query(p->cb));
3051 		(void)(*p->cb)(c, p->cb_arg, error, rep);
3052 	}
3053 	if(backup_p) {
3054 		sq->outnet->svcd_overhead = 0;
3055 	}
3056 	verbose(VERB_ALGO, "svcd callbacks end");
3057 	log_assert(sq->cblist == NULL);
3058 	serviced_delete(sq);
3059 }
3060 
3061 int
3062 serviced_tcp_callback(struct comm_point* c, void* arg, int error,
3063         struct comm_reply* rep)
3064 {
3065 	struct serviced_query* sq = (struct serviced_query*)arg;
3066 	struct comm_reply r2;
3067 #ifdef USE_DNSTAP
3068 	struct waiting_tcp* w = (struct waiting_tcp*)sq->pending;
3069 	struct pending_tcp* pend_tcp = NULL;
3070 	struct port_if* pi = NULL;
3071 	if(w && !w->on_tcp_waiting_list && w->next_waiting) {
3072 		pend_tcp = (struct pending_tcp*)w->next_waiting;
3073 		pi = pend_tcp->pi;
3074 	}
3075 #endif
3076 	sq->pending = NULL; /* removed after this callback */
3077 	if(error != NETEVENT_NOERROR)
3078 		log_addr(VERB_QUERY, "tcp error for address",
3079 			&sq->addr, sq->addrlen);
3080 	if(error==NETEVENT_NOERROR)
3081 		infra_update_tcp_works(sq->outnet->infra, &sq->addr,
3082 			sq->addrlen, sq->zone, sq->zonelen);
3083 #ifdef USE_DNSTAP
3084 	/*
3085 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3086 	 */
3087 	if(error==NETEVENT_NOERROR && pi && sq->outnet->dtenv &&
3088 	   (sq->outnet->dtenv->log_resolver_response_messages ||
3089 	    sq->outnet->dtenv->log_forwarder_response_messages)) {
3090 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3091 		log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
3092 		dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr,
3093 			&pi->addr, c->type, c->ssl, sq->zone, sq->zonelen, sq->qbuf,
3094 			sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv,
3095 			c->buffer);
3096 	}
3097 #endif
3098 	if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS &&
3099 		(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3100 		LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin(
3101 		c->buffer)) == LDNS_RCODE_NOTIMPL) ) {
3102 		/* attempt to fallback to nonEDNS */
3103 		sq->status = serviced_query_TCP_EDNS_fallback;
3104 		serviced_tcp_initiate(sq, c->buffer);
3105 		return 0;
3106 	} else if(error==NETEVENT_NOERROR &&
3107 		sq->status == serviced_query_TCP_EDNS_fallback &&
3108 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3109 			LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE(
3110 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN
3111 			|| LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3112 			== LDNS_RCODE_YXDOMAIN)) {
3113 		/* the fallback produced a result that looks promising, note
3114 		 * that this server should be approached without EDNS */
3115 		/* only store noEDNS in cache if domain is noDNSSEC */
3116 		if(!sq->want_dnssec)
3117 		  if(!infra_edns_update(sq->outnet->infra, &sq->addr,
3118 			sq->addrlen, sq->zone, sq->zonelen, -1,
3119 			*sq->outnet->now_secs))
3120 			log_err("Out of memory caching no edns for host");
3121 		sq->status = serviced_query_TCP;
3122 	}
3123 	if(sq->tcp_upstream || sq->ssl_upstream) {
3124 	    struct timeval now = *sq->outnet->now_tv;
3125 	    if(error!=NETEVENT_NOERROR) {
3126 	        if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3127 		    sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3128 		    -1, sq->last_rtt, (time_t)now.tv_sec))
3129 		    log_err("out of memory in TCP exponential backoff.");
3130 	    } else if(now.tv_sec > sq->last_sent_time.tv_sec ||
3131 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3132 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3133 		/* convert from microseconds to milliseconds */
3134 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3135 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3136 		verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime);
3137 		log_assert(roundtime >= 0);
3138 		/* only store if less then AUTH_TIMEOUT seconds, it could be
3139 		 * huge due to system-hibernated and we woke up */
3140 		if(roundtime < 60000) {
3141 		    if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3142 			sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3143 			roundtime, sq->last_rtt, (time_t)now.tv_sec))
3144 			log_err("out of memory noting rtt.");
3145 		}
3146 	    }
3147 	}
3148 	/* insert address into reply info */
3149 	if(!rep) {
3150 		/* create one if there isn't (on errors) */
3151 		rep = &r2;
3152 		r2.c = c;
3153 	}
3154 	memcpy(&rep->remote_addr, &sq->addr, sq->addrlen);
3155 	rep->remote_addrlen = sq->addrlen;
3156 	serviced_callbacks(sq, error, c, rep);
3157 	return 0;
3158 }
3159 
3160 static void
3161 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff)
3162 {
3163 	verbose(VERB_ALGO, "initiate TCP query %s",
3164 		sq->status==serviced_query_TCP_EDNS?"EDNS":"");
3165 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3166 	sq->last_sent_time = *sq->outnet->now_tv;
3167 	log_assert(!sq->busy);
3168 	sq->busy = 1;
3169 	sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout,
3170 		serviced_tcp_callback, sq);
3171 	sq->busy = 0;
3172 	if(!sq->pending) {
3173 		/* delete from tree so that a retry by above layer does not
3174 		 * clash with this entry */
3175 		verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query");
3176 		serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
3177 	}
3178 }
3179 
3180 /** Send serviced query over TCP return false on initial failure */
3181 static int
3182 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff)
3183 {
3184 	int vs, rtt, timeout;
3185 	uint8_t edns_lame_known;
3186 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
3187 		sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known,
3188 		&rtt))
3189 		return 0;
3190 	sq->last_rtt = rtt;
3191 	if(vs != -1)
3192 		sq->status = serviced_query_TCP_EDNS;
3193 	else 	sq->status = serviced_query_TCP;
3194 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3195 	sq->last_sent_time = *sq->outnet->now_tv;
3196 	if(sq->tcp_upstream || sq->ssl_upstream) {
3197 		timeout = rtt;
3198 		if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout)
3199 			timeout = sq->outnet->tcp_auth_query_timeout;
3200 	} else {
3201 		timeout = sq->outnet->tcp_auth_query_timeout;
3202 	}
3203 	log_assert(!sq->busy);
3204 	sq->busy = 1;
3205 	sq->pending = pending_tcp_query(sq, buff, timeout,
3206 		serviced_tcp_callback, sq);
3207 	sq->busy = 0;
3208 	return sq->pending != NULL;
3209 }
3210 
3211 /* see if packet is edns malformed; got zeroes at start.
3212  * This is from servers that return malformed packets to EDNS0 queries,
3213  * but they return good packets for nonEDNS0 queries.
3214  * We try to detect their output; without resorting to a full parse or
3215  * check for too many bytes after the end of the packet. */
3216 static int
3217 packet_edns_malformed(struct sldns_buffer* buf, int qtype)
3218 {
3219 	size_t len;
3220 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE)
3221 		return 1; /* malformed */
3222 	/* they have NOERROR rcode, 1 answer. */
3223 	if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR)
3224 		return 0;
3225 	/* one query (to skip) and answer records */
3226 	if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 ||
3227 		LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0)
3228 		return 0;
3229 	/* skip qname */
3230 	len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE),
3231 		sldns_buffer_limit(buf)-LDNS_HEADER_SIZE);
3232 	if(len == 0)
3233 		return 0;
3234 	if(len == 1 && qtype == 0)
3235 		return 0; /* we asked for '.' and type 0 */
3236 	/* and then 4 bytes (type and class of query) */
3237 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3)
3238 		return 0;
3239 
3240 	/* and start with 11 zeroes as the answer RR */
3241 	/* so check the qtype of the answer record, qname=0, type=0 */
3242 	if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 &&
3243 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 &&
3244 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0)
3245 		return 1;
3246 	return 0;
3247 }
3248 
3249 int
3250 serviced_udp_callback(struct comm_point* c, void* arg, int error,
3251         struct comm_reply* rep)
3252 {
3253 	struct serviced_query* sq = (struct serviced_query*)arg;
3254 	struct outside_network* outnet = sq->outnet;
3255 	struct timeval now = *sq->outnet->now_tv;
3256 #ifdef USE_DNSTAP
3257 	struct pending* p = (struct pending*)sq->pending;
3258 #endif
3259 
3260 	sq->pending = NULL; /* removed after callback */
3261 	if(error == NETEVENT_TIMEOUT) {
3262 		if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000 &&
3263 		   (serviced_query_udp_size(sq, serviced_query_UDP_EDNS_FRAG) < serviced_query_udp_size(sq, serviced_query_UDP_EDNS))) {
3264 			/* fallback to 1480/1280 */
3265 			sq->status = serviced_query_UDP_EDNS_FRAG;
3266 			log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10,
3267 				&sq->addr, sq->addrlen);
3268 			if(!serviced_udp_send(sq, c->buffer)) {
3269 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3270 			}
3271 			return 0;
3272 		}
3273 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
3274 			/* fragmentation size did not fix it */
3275 			sq->status = serviced_query_UDP_EDNS;
3276 		}
3277 		sq->retry++;
3278 		if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3279 			sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
3280 			(time_t)now.tv_sec))
3281 			log_err("out of memory in UDP exponential backoff");
3282 		if(sq->retry < OUTBOUND_UDP_RETRY) {
3283 			log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
3284 				&sq->addr, sq->addrlen);
3285 			if(!serviced_udp_send(sq, c->buffer)) {
3286 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3287 			}
3288 			return 0;
3289 		}
3290 	}
3291 	if(error != NETEVENT_NOERROR) {
3292 		/* udp returns error (due to no ID or interface available) */
3293 		serviced_callbacks(sq, error, c, rep);
3294 		return 0;
3295 	}
3296 #ifdef USE_DNSTAP
3297 	/*
3298 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3299 	 */
3300 	if(error == NETEVENT_NOERROR && outnet->dtenv && p->pc &&
3301 		(outnet->dtenv->log_resolver_response_messages ||
3302 		outnet->dtenv->log_forwarder_response_messages)) {
3303 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3304 		log_addr(VERB_ALGO, "to local addr", &p->pc->pif->addr,
3305 			p->pc->pif->addrlen);
3306 		dt_msg_send_outside_response(outnet->dtenv, &sq->addr,
3307 			&p->pc->pif->addr, c->type, c->ssl, sq->zone, sq->zonelen,
3308 			sq->qbuf, sq->qbuflen, &sq->last_sent_time,
3309 			sq->outnet->now_tv, c->buffer);
3310 	}
3311 #endif
3312 	if( (sq->status == serviced_query_UDP_EDNS
3313 		||sq->status == serviced_query_UDP_EDNS_FRAG)
3314 		&& (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3315 			== LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(
3316 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL
3317 		    || packet_edns_malformed(c->buffer, sq->qtype)
3318 			)) {
3319 		/* try to get an answer by falling back without EDNS */
3320 		verbose(VERB_ALGO, "serviced query: attempt without EDNS");
3321 		sq->status = serviced_query_UDP_EDNS_fallback;
3322 		sq->retry = 0;
3323 		if(!serviced_udp_send(sq, c->buffer)) {
3324 			serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3325 		}
3326 		return 0;
3327 	} else if(sq->status == serviced_query_UDP_EDNS &&
3328 		!sq->edns_lame_known) {
3329 		/* now we know that edns queries received answers store that */
3330 		log_addr(VERB_ALGO, "serviced query: EDNS works for",
3331 			&sq->addr, sq->addrlen);
3332 		if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3333 			sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
3334 			log_err("Out of memory caching edns works");
3335 		}
3336 		sq->edns_lame_known = 1;
3337 	} else if(sq->status == serviced_query_UDP_EDNS_fallback &&
3338 		!sq->edns_lame_known && (LDNS_RCODE_WIRE(
3339 		sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR ||
3340 		LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3341 		LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin(
3342 		c->buffer)) == LDNS_RCODE_YXDOMAIN)) {
3343 		/* the fallback produced a result that looks promising, note
3344 		 * that this server should be approached without EDNS */
3345 		/* only store noEDNS in cache if domain is noDNSSEC */
3346 		if(!sq->want_dnssec) {
3347 		  log_addr(VERB_ALGO, "serviced query: EDNS fails for",
3348 			&sq->addr, sq->addrlen);
3349 		  if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3350 			sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
3351 			log_err("Out of memory caching no edns for host");
3352 		  }
3353 		} else {
3354 		  log_addr(VERB_ALGO, "serviced query: EDNS fails, but "
3355 			"not stored because need DNSSEC for", &sq->addr,
3356 			sq->addrlen);
3357 		}
3358 		sq->status = serviced_query_UDP;
3359 	}
3360 	if(now.tv_sec > sq->last_sent_time.tv_sec ||
3361 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3362 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3363 		/* convert from microseconds to milliseconds */
3364 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3365 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3366 		verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime);
3367 		log_assert(roundtime >= 0);
3368 		/* in case the system hibernated, do not enter a huge value,
3369 		 * above this value gives trouble with server selection */
3370 		if(roundtime < 60000) {
3371 		    if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3372 			sq->zone, sq->zonelen, sq->qtype, roundtime,
3373 			sq->last_rtt, (time_t)now.tv_sec))
3374 			log_err("out of memory noting rtt.");
3375 		}
3376 	}
3377 	/* perform TC flag check and TCP fallback after updating our
3378 	 * cache entries for EDNS status and RTT times */
3379 	if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) {
3380 		/* fallback to TCP */
3381 		/* this discards partial UDP contents */
3382 		if(sq->status == serviced_query_UDP_EDNS ||
3383 			sq->status == serviced_query_UDP_EDNS_FRAG ||
3384 			sq->status == serviced_query_UDP_EDNS_fallback)
3385 			/* if we have unfinished EDNS_fallback, start again */
3386 			sq->status = serviced_query_TCP_EDNS;
3387 		else	sq->status = serviced_query_TCP;
3388 		serviced_tcp_initiate(sq, c->buffer);
3389 		return 0;
3390 	}
3391 	/* yay! an answer */
3392 	serviced_callbacks(sq, error, c, rep);
3393 	return 0;
3394 }
3395 
3396 struct serviced_query*
3397 outnet_serviced_query(struct outside_network* outnet,
3398 	struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec,
3399 	int nocaps, int check_ratelimit, int tcp_upstream, int ssl_upstream,
3400 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
3401 	uint8_t* zone, size_t zonelen, struct module_qstate* qstate,
3402 	comm_point_callback_type* callback, void* callback_arg,
3403 	sldns_buffer* buff, struct module_env* env, int* was_ratelimited)
3404 {
3405 	struct serviced_query* sq;
3406 	struct service_callback* cb;
3407 	struct edns_string_addr* client_string_addr;
3408 	struct regional* region;
3409 	struct edns_option* backed_up_opt_list = qstate->edns_opts_back_out;
3410 	struct edns_option* per_upstream_opt_list = NULL;
3411 	time_t timenow = 0;
3412 
3413 	/* If we have an already populated EDNS option list make a copy since
3414 	 * we may now add upstream specific EDNS options. */
3415 	/* Use a region that could be attached to a serviced_query, if it needs
3416 	 * to be created. If an existing one is found then this region will be
3417 	 * destroyed here. */
3418 	region = alloc_reg_obtain(env->alloc);
3419 	if(!region) return NULL;
3420 	if(qstate->edns_opts_back_out) {
3421 		per_upstream_opt_list = edns_opt_copy_region(
3422 			qstate->edns_opts_back_out, region);
3423 		if(!per_upstream_opt_list) {
3424 			alloc_reg_release(env->alloc, region);
3425 			return NULL;
3426 		}
3427 		qstate->edns_opts_back_out = per_upstream_opt_list;
3428 	}
3429 
3430 	if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone,
3431 		zonelen, qstate, region)) {
3432 		alloc_reg_release(env->alloc, region);
3433 		return NULL;
3434 	}
3435 	/* Restore the option list; we can explicitly use the copied one from
3436 	 * now on. */
3437 	per_upstream_opt_list = qstate->edns_opts_back_out;
3438 	qstate->edns_opts_back_out = backed_up_opt_list;
3439 
3440 	if((client_string_addr = edns_string_addr_lookup(
3441 		&env->edns_strings->client_strings, addr, addrlen))) {
3442 		edns_opt_list_append(&per_upstream_opt_list,
3443 			env->edns_strings->client_string_opcode,
3444 			client_string_addr->string_len,
3445 			client_string_addr->string, region);
3446 	}
3447 
3448 	serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype,
3449 		qinfo->qclass, flags);
3450 	sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen,
3451 		per_upstream_opt_list);
3452 	if(!sq) {
3453 		/* Check ratelimit only for new serviced_query */
3454 		if(check_ratelimit) {
3455 			timenow = *env->now;
3456 			if(!infra_ratelimit_inc(env->infra_cache, zone,
3457 				zonelen, timenow, env->cfg->ratelimit_backoff,
3458 				&qstate->qinfo, qstate->reply)) {
3459 				/* Can we pass through with slip factor? */
3460 				if(env->cfg->ratelimit_factor == 0 ||
3461 					ub_random_max(env->rnd,
3462 					env->cfg->ratelimit_factor) != 1) {
3463 					*was_ratelimited = 1;
3464 					alloc_reg_release(env->alloc, region);
3465 					return NULL;
3466 				}
3467 				log_nametypeclass(VERB_ALGO,
3468 					"ratelimit allowed through for "
3469 					"delegation point", zone,
3470 					LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN);
3471 			}
3472 		}
3473 		/* make new serviced query entry */
3474 		sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
3475 			tcp_upstream, ssl_upstream, tls_auth_name, addr,
3476 			addrlen, zone, zonelen, (int)qinfo->qtype,
3477 			per_upstream_opt_list,
3478 			( ssl_upstream && env->cfg->pad_queries
3479 			? env->cfg->pad_queries_block_size : 0 ),
3480 			env->alloc, region);
3481 		if(!sq) {
3482 			if(check_ratelimit) {
3483 				infra_ratelimit_dec(env->infra_cache,
3484 					zone, zonelen, timenow);
3485 			}
3486 			return NULL;
3487 		}
3488 		if(!(cb = (struct service_callback*)regional_alloc(
3489 			sq->region, sizeof(*cb)))) {
3490 			if(check_ratelimit) {
3491 				infra_ratelimit_dec(env->infra_cache,
3492 					zone, zonelen, timenow);
3493 			}
3494 			(void)rbtree_delete(outnet->serviced, sq);
3495 			serviced_node_del(&sq->node, NULL);
3496 			return NULL;
3497 		}
3498 		/* No network action at this point; it will be invoked with the
3499 		 * serviced_query timer instead to run outside of the mesh. */
3500 	} else {
3501 		/* We don't need this region anymore. */
3502 		alloc_reg_release(env->alloc, region);
3503 		/* duplicate entries are included in the callback list, because
3504 		 * there is a counterpart registration by our caller that needs
3505 		 * to be doubly-removed (with callbacks perhaps). */
3506 		if(!(cb = (struct service_callback*)regional_alloc(
3507 			sq->region, sizeof(*cb)))) {
3508 			return NULL;
3509 		}
3510 	}
3511 	/* add callback to list of callbacks */
3512 	cb->cb = callback;
3513 	cb->cb_arg = callback_arg;
3514 	cb->next = sq->cblist;
3515 	sq->cblist = cb;
3516 	return sq;
3517 }
3518 
3519 /** remove callback from list */
3520 static void
3521 callback_list_remove(struct serviced_query* sq, void* cb_arg)
3522 {
3523 	struct service_callback** pp = &sq->cblist;
3524 	while(*pp) {
3525 		if((*pp)->cb_arg == cb_arg) {
3526 			struct service_callback* del = *pp;
3527 			*pp = del->next;
3528 			return;
3529 		}
3530 		pp = &(*pp)->next;
3531 	}
3532 }
3533 
3534 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg)
3535 {
3536 	if(!sq)
3537 		return;
3538 	callback_list_remove(sq, cb_arg);
3539 	/* if callbacks() routine scheduled deletion, let it do that */
3540 	if(!sq->cblist && !sq->busy && !sq->to_be_deleted) {
3541 		(void)rbtree_delete(sq->outnet->serviced, sq);
3542 		serviced_delete(sq);
3543 	}
3544 }
3545 
3546 /** create fd to send to this destination */
3547 static int
3548 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr,
3549 	socklen_t to_addrlen)
3550 {
3551 	struct sockaddr_storage* addr;
3552 	socklen_t addrlen;
3553 	int i, try, pnum, dscp;
3554 	struct port_if* pif;
3555 
3556 	/* create fd */
3557 	dscp = outnet->ip_dscp;
3558 	for(try = 0; try<1000; try++) {
3559 		int port = 0;
3560 		int freebind = 0;
3561 		int noproto = 0;
3562 		int inuse = 0;
3563 		int fd = -1;
3564 
3565 		/* select interface */
3566 		if(addr_is_ip6(to_addr, to_addrlen)) {
3567 			if(outnet->num_ip6 == 0) {
3568 				char to[64];
3569 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3570 				verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to);
3571 				return -1;
3572 			}
3573 			i = ub_random_max(outnet->rnd, outnet->num_ip6);
3574 			pif = &outnet->ip6_ifs[i];
3575 		} else {
3576 			if(outnet->num_ip4 == 0) {
3577 				char to[64];
3578 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3579 				verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to);
3580 				return -1;
3581 			}
3582 			i = ub_random_max(outnet->rnd, outnet->num_ip4);
3583 			pif = &outnet->ip4_ifs[i];
3584 		}
3585 		addr = &pif->addr;
3586 		addrlen = pif->addrlen;
3587 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3588 		pnum = ub_random_max(outnet->rnd, pif->avail_total);
3589 		if(pnum < pif->inuse) {
3590 			/* port already open */
3591 			port = pif->out[pnum]->number;
3592 		} else {
3593 			/* unused ports in start part of array */
3594 			port = pif->avail_ports[pnum - pif->inuse];
3595 		}
3596 #else
3597 		pnum = port = 0;
3598 #endif
3599 		if(addr_is_ip6(to_addr, to_addrlen)) {
3600 			struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
3601 			sa.sin6_port = (in_port_t)htons((uint16_t)port);
3602 			fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
3603 				(struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto,
3604 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3605 		} else {
3606 			struct sockaddr_in* sa = (struct sockaddr_in*)addr;
3607 			sa->sin_port = (in_port_t)htons((uint16_t)port);
3608 			fd = create_udp_sock(AF_INET, SOCK_DGRAM,
3609 				(struct sockaddr*)addr, addrlen, 1, &inuse, &noproto,
3610 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3611 		}
3612 		if(fd != -1) {
3613 			return fd;
3614 		}
3615 		if(!inuse) {
3616 			return -1;
3617 		}
3618 	}
3619 	/* too many tries */
3620 	log_err("cannot send probe, ports are in use");
3621 	return -1;
3622 }
3623 
3624 struct comm_point*
3625 outnet_comm_point_for_udp(struct outside_network* outnet,
3626 	comm_point_callback_type* cb, void* cb_arg,
3627 	struct sockaddr_storage* to_addr, socklen_t to_addrlen)
3628 {
3629 	struct comm_point* cp;
3630 	int fd = fd_for_dest(outnet, to_addr, to_addrlen);
3631 	if(fd == -1) {
3632 		return NULL;
3633 	}
3634 	cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff, 0,
3635 		cb, cb_arg, NULL);
3636 	if(!cp) {
3637 		log_err("malloc failure");
3638 		close(fd);
3639 		return NULL;
3640 	}
3641 	return cp;
3642 }
3643 
3644 /** setup SSL for comm point */
3645 static int
3646 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet,
3647 	int fd, char* host)
3648 {
3649 	cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd);
3650 	if(!cp->ssl) {
3651 		log_err("cannot create SSL object");
3652 		return 0;
3653 	}
3654 #ifdef USE_WINSOCK
3655 	comm_point_tcp_win_bio_cb(cp, cp->ssl);
3656 #endif
3657 	cp->ssl_shake_state = comm_ssl_shake_write;
3658 	/* https verification */
3659 #ifdef HAVE_SSL
3660 	if(outnet->tls_use_sni) {
3661 		(void)SSL_set_tlsext_host_name(cp->ssl, host);
3662 	}
3663 #endif
3664 #ifdef HAVE_SSL_SET1_HOST
3665 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3666 		/* because we set SSL_VERIFY_PEER, in netevent in
3667 		 * ssl_handshake, it'll check if the certificate
3668 		 * verification has succeeded */
3669 		/* SSL_VERIFY_PEER is set on the sslctx */
3670 		/* and the certificates to verify with are loaded into
3671 		 * it with SSL_load_verify_locations or
3672 		 * SSL_CTX_set_default_verify_paths */
3673 		/* setting the hostname makes openssl verify the
3674 		 * host name in the x509 certificate in the
3675 		 * SSL connection*/
3676 		if(!SSL_set1_host(cp->ssl, host)) {
3677 			log_err("SSL_set1_host failed");
3678 			return 0;
3679 		}
3680 	}
3681 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST)
3682 	/* openssl 1.0.2 has this function that can be used for
3683 	 * set1_host like verification */
3684 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3685 		X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl);
3686 #  ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS
3687 		X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
3688 #  endif
3689 		if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) {
3690 			log_err("X509_VERIFY_PARAM_set1_host failed");
3691 			return 0;
3692 		}
3693 	}
3694 #else
3695 	(void)host;
3696 #endif /* HAVE_SSL_SET1_HOST */
3697 	return 1;
3698 }
3699 
3700 struct comm_point*
3701 outnet_comm_point_for_tcp(struct outside_network* outnet,
3702 	comm_point_callback_type* cb, void* cb_arg,
3703 	struct sockaddr_storage* to_addr, socklen_t to_addrlen,
3704 	sldns_buffer* query, int timeout, int ssl, char* host)
3705 {
3706 	struct comm_point* cp;
3707 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3708 	if(fd == -1) {
3709 		return 0;
3710 	}
3711 	fd_set_nonblock(fd);
3712 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3713 		/* outnet_tcp_connect has closed fd on error for us */
3714 		return 0;
3715 	}
3716 	cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg);
3717 	if(!cp) {
3718 		log_err("malloc failure");
3719 		close(fd);
3720 		return 0;
3721 	}
3722 	cp->repinfo.remote_addrlen = to_addrlen;
3723 	memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen);
3724 
3725 	/* setup for SSL (if needed) */
3726 	if(ssl) {
3727 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3728 			log_err("cannot setup XoT");
3729 			comm_point_delete(cp);
3730 			return NULL;
3731 		}
3732 	}
3733 
3734 	/* set timeout on TCP connection */
3735 	comm_point_start_listening(cp, fd, timeout);
3736 	/* copy scratch buffer to cp->buffer */
3737 	sldns_buffer_copy(cp->buffer, query);
3738 	return cp;
3739 }
3740 
3741 /** setup the User-Agent HTTP header based on http-user-agent configuration */
3742 static void
3743 setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg)
3744 {
3745 	if(cfg->hide_http_user_agent) return;
3746 	if(cfg->http_user_agent==NULL || cfg->http_user_agent[0] == 0) {
3747 		sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME,
3748 			PACKAGE_VERSION);
3749 	} else {
3750 		sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent);
3751 	}
3752 }
3753 
3754 /** setup http request headers in buffer for sending query to destination */
3755 static int
3756 setup_http_request(sldns_buffer* buf, char* host, char* path,
3757 	struct config_file* cfg)
3758 {
3759 	sldns_buffer_clear(buf);
3760 	sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path);
3761 	sldns_buffer_printf(buf, "Host: %s\r\n", host);
3762 	setup_http_user_agent(buf, cfg);
3763 	/* We do not really do multiple queries per connection,
3764 	 * but this header setting is also not needed.
3765 	 * sldns_buffer_printf(buf, "Connection: close\r\n") */
3766 	sldns_buffer_printf(buf, "\r\n");
3767 	if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf))
3768 		return 0; /* somehow buffer too short, but it is about 60K
3769 		and the request is only a couple bytes long. */
3770 	sldns_buffer_flip(buf);
3771 	return 1;
3772 }
3773 
3774 struct comm_point*
3775 outnet_comm_point_for_http(struct outside_network* outnet,
3776 	comm_point_callback_type* cb, void* cb_arg,
3777 	struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout,
3778 	int ssl, char* host, char* path, struct config_file* cfg)
3779 {
3780 	/* cp calls cb with err=NETEVENT_DONE when transfer is done */
3781 	struct comm_point* cp;
3782 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3783 	if(fd == -1) {
3784 		return 0;
3785 	}
3786 	fd_set_nonblock(fd);
3787 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3788 		/* outnet_tcp_connect has closed fd on error for us */
3789 		return 0;
3790 	}
3791 	cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg,
3792 		outnet->udp_buff);
3793 	if(!cp) {
3794 		log_err("malloc failure");
3795 		close(fd);
3796 		return 0;
3797 	}
3798 	cp->repinfo.remote_addrlen = to_addrlen;
3799 	memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen);
3800 
3801 	/* setup for SSL (if needed) */
3802 	if(ssl) {
3803 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3804 			log_err("cannot setup https");
3805 			comm_point_delete(cp);
3806 			return NULL;
3807 		}
3808 	}
3809 
3810 	/* set timeout on TCP connection */
3811 	comm_point_start_listening(cp, fd, timeout);
3812 
3813 	/* setup http request in cp->buffer */
3814 	if(!setup_http_request(cp->buffer, host, path, cfg)) {
3815 		log_err("error setting up http request");
3816 		comm_point_delete(cp);
3817 		return NULL;
3818 	}
3819 	return cp;
3820 }
3821 
3822 /** get memory used by waiting tcp entry (in use or not) */
3823 static size_t
3824 waiting_tcp_get_mem(struct waiting_tcp* w)
3825 {
3826 	size_t s;
3827 	if(!w) return 0;
3828 	s = sizeof(*w) + w->pkt_len;
3829 	if(w->timer)
3830 		s += comm_timer_get_mem(w->timer);
3831 	return s;
3832 }
3833 
3834 /** get memory used by port if */
3835 static size_t
3836 if_get_mem(struct port_if* pif)
3837 {
3838 	size_t s;
3839 	int i;
3840 	s = sizeof(*pif) +
3841 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3842 	    sizeof(int)*pif->avail_total +
3843 #endif
3844 		sizeof(struct port_comm*)*pif->maxout;
3845 	for(i=0; i<pif->inuse; i++)
3846 		s += sizeof(*pif->out[i]) +
3847 			comm_point_get_mem(pif->out[i]->cp);
3848 	return s;
3849 }
3850 
3851 /** get memory used by waiting udp */
3852 static size_t
3853 waiting_udp_get_mem(struct pending* w)
3854 {
3855 	size_t s;
3856 	s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len;
3857 	return s;
3858 }
3859 
3860 size_t outnet_get_mem(struct outside_network* outnet)
3861 {
3862 	size_t i;
3863 	int k;
3864 	struct waiting_tcp* w;
3865 	struct pending* u;
3866 	struct serviced_query* sq;
3867 	struct service_callback* sb;
3868 	struct port_comm* pc;
3869 	size_t s = sizeof(*outnet) + sizeof(*outnet->base) +
3870 		sizeof(*outnet->udp_buff) +
3871 		sldns_buffer_capacity(outnet->udp_buff);
3872 	/* second buffer is not ours */
3873 	for(pc = outnet->unused_fds; pc; pc = pc->next) {
3874 		s += sizeof(*pc) + comm_point_get_mem(pc->cp);
3875 	}
3876 	for(k=0; k<outnet->num_ip4; k++)
3877 		s += if_get_mem(&outnet->ip4_ifs[k]);
3878 	for(k=0; k<outnet->num_ip6; k++)
3879 		s += if_get_mem(&outnet->ip6_ifs[k]);
3880 	for(u=outnet->udp_wait_first; u; u=u->next_waiting)
3881 		s += waiting_udp_get_mem(u);
3882 
3883 	s += sizeof(struct pending_tcp*)*outnet->num_tcp;
3884 	for(i=0; i<outnet->num_tcp; i++) {
3885 		s += sizeof(struct pending_tcp);
3886 		s += comm_point_get_mem(outnet->tcp_conns[i]->c);
3887 		if(outnet->tcp_conns[i]->query)
3888 			s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query);
3889 	}
3890 	for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
3891 		s += waiting_tcp_get_mem(w);
3892 	s += sizeof(*outnet->pending);
3893 	s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) *
3894 		outnet->pending->count;
3895 	s += sizeof(*outnet->serviced);
3896 	s += outnet->svcd_overhead;
3897 	RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) {
3898 		s += sizeof(*sq) + sq->qbuflen;
3899 		for(sb = sq->cblist; sb; sb = sb->next)
3900 			s += sizeof(*sb);
3901 	}
3902 	return s;
3903 }
3904 
3905 size_t
3906 serviced_get_mem(struct serviced_query* sq)
3907 {
3908 	struct service_callback* sb;
3909 	size_t s;
3910 	s = sizeof(*sq) + sq->qbuflen;
3911 	for(sb = sq->cblist; sb; sb = sb->next)
3912 		s += sizeof(*sb);
3913 	if(sq->status == serviced_query_UDP_EDNS ||
3914 		sq->status == serviced_query_UDP ||
3915 		sq->status == serviced_query_UDP_EDNS_FRAG ||
3916 		sq->status == serviced_query_UDP_EDNS_fallback) {
3917 		s += sizeof(struct pending);
3918 		s += comm_timer_get_mem(NULL);
3919 	} else {
3920 		/* does not have size of the pkt pointer */
3921 		/* always has a timer except on malloc failures */
3922 
3923 		/* these sizes are part of the main outside network mem */
3924 		/*
3925 		s += sizeof(struct waiting_tcp);
3926 		s += comm_timer_get_mem(NULL);
3927 		*/
3928 	}
3929 	return s;
3930 }
3931 
3932