xref: /freebsd/contrib/unbound/services/outside_network.c (revision be771a7b7f4580a30d99e41a5bb1b93a385a119d)
1 /*
2  * services/outside_network.c - implement sending of queries and wait answer.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file has functions to send queries to authoritative servers and
40  * wait for the pending answer events.
41  */
42 #include "config.h"
43 #include <ctype.h>
44 #ifdef HAVE_SYS_TYPES_H
45 #  include <sys/types.h>
46 #endif
47 #include <sys/time.h>
48 #include "services/outside_network.h"
49 #include "services/listen_dnsport.h"
50 #include "services/cache/infra.h"
51 #include "iterator/iterator.h"
52 #include "util/data/msgparse.h"
53 #include "util/data/msgreply.h"
54 #include "util/data/msgencode.h"
55 #include "util/data/dname.h"
56 #include "util/netevent.h"
57 #include "util/log.h"
58 #include "util/net_help.h"
59 #include "util/random.h"
60 #include "util/fptr_wlist.h"
61 #include "util/edns.h"
62 #include "sldns/sbuffer.h"
63 #include "dnstap/dnstap.h"
64 #ifdef HAVE_OPENSSL_SSL_H
65 #include <openssl/ssl.h>
66 #endif
67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST
68 #include <openssl/x509v3.h>
69 #endif
70 
71 #ifdef HAVE_NETDB_H
72 #include <netdb.h>
73 #endif
74 #include <fcntl.h>
75 
76 /** number of times to retry making a random ID that is unique. */
77 #define MAX_ID_RETRY 1000
78 /** number of times to retry finding interface, port that can be opened. */
79 #define MAX_PORT_RETRY 10000
80 /** number of retries on outgoing UDP queries */
81 #define OUTBOUND_UDP_RETRY 1
82 
83 /** initiate TCP transaction for serviced query */
84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff);
85 /** with a fd available, randomize and send UDP */
86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet,
87 	int timeout);
88 
89 /** select a DNS ID for a TCP stream */
90 static uint16_t tcp_select_id(struct outside_network* outnet,
91 	struct reuse_tcp* reuse);
92 
93 /** Perform serviced query UDP sending operation */
94 static int serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff);
95 
96 /** Send serviced query over TCP return false on initial failure */
97 static int serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff);
98 
99 /** call the callbacks for a serviced query */
100 static void serviced_callbacks(struct serviced_query* sq, int error,
101 	struct comm_point* c, struct comm_reply* rep);
102 
103 int
104 pending_cmp(const void* key1, const void* key2)
105 {
106 	struct pending *p1 = (struct pending*)key1;
107 	struct pending *p2 = (struct pending*)key2;
108 	if(p1->id < p2->id)
109 		return -1;
110 	if(p1->id > p2->id)
111 		return 1;
112 	log_assert(p1->id == p2->id);
113 	return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen);
114 }
115 
116 int
117 serviced_cmp(const void* key1, const void* key2)
118 {
119 	struct serviced_query* q1 = (struct serviced_query*)key1;
120 	struct serviced_query* q2 = (struct serviced_query*)key2;
121 	int r;
122 	if(q1->qbuflen < q2->qbuflen)
123 		return -1;
124 	if(q1->qbuflen > q2->qbuflen)
125 		return 1;
126 	log_assert(q1->qbuflen == q2->qbuflen);
127 	log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */);
128 	/* alternate casing of qname is still the same query */
129 	if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0)
130 		return r;
131 	if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0)
132 		return r;
133 	if(q1->dnssec != q2->dnssec) {
134 		if(q1->dnssec < q2->dnssec)
135 			return -1;
136 		return 1;
137 	}
138 	if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
139 		return r;
140 	if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
141 		return r;
142 	return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
143 }
144 
145 /** compare if the reuse element has the same address, port and same ssl-is
146  * used-for-it characteristic */
147 static int
148 reuse_cmp_addrportssl(const void* key1, const void* key2)
149 {
150 	struct reuse_tcp* r1 = (struct reuse_tcp*)key1;
151 	struct reuse_tcp* r2 = (struct reuse_tcp*)key2;
152 	int r;
153 	/* compare address and port */
154 	r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen);
155 	if(r != 0)
156 		return r;
157 
158 	/* compare if SSL-enabled */
159 	if(r1->is_ssl && !r2->is_ssl)
160 		return 1;
161 	if(!r1->is_ssl && r2->is_ssl)
162 		return -1;
163 	return 0;
164 }
165 
166 int
167 reuse_cmp(const void* key1, const void* key2)
168 {
169 	int r;
170 	r = reuse_cmp_addrportssl(key1, key2);
171 	if(r != 0)
172 		return r;
173 
174 	/* compare ptr value */
175 	if(key1 < key2) return -1;
176 	if(key1 > key2) return 1;
177 	return 0;
178 }
179 
180 int reuse_id_cmp(const void* key1, const void* key2)
181 {
182 	struct waiting_tcp* w1 = (struct waiting_tcp*)key1;
183 	struct waiting_tcp* w2 = (struct waiting_tcp*)key2;
184 	if(w1->id < w2->id)
185 		return -1;
186 	if(w1->id > w2->id)
187 		return 1;
188 	return 0;
189 }
190 
191 /** delete waiting_tcp entry. Does not unlink from waiting list.
192  * @param w: to delete.
193  */
194 static void
195 waiting_tcp_delete(struct waiting_tcp* w)
196 {
197 	if(!w) return;
198 	if(w->timer)
199 		comm_timer_delete(w->timer);
200 	free(w);
201 }
202 
203 /**
204  * Pick random outgoing-interface of that family, and bind it.
205  * port set to 0 so OS picks a port number for us.
206  * if it is the ANY address, do not bind.
207  * @param pend: pending tcp structure, for storing the local address choice.
208  * @param w: tcp structure with destination address.
209  * @param s: socket fd.
210  * @return false on error, socket closed.
211  */
212 static int
213 pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s)
214 {
215 	struct port_if* pi = NULL;
216 	int num;
217 	pend->pi = NULL;
218 #ifdef INET6
219 	if(addr_is_ip6(&w->addr, w->addrlen))
220 		num = w->outnet->num_ip6;
221 	else
222 #endif
223 		num = w->outnet->num_ip4;
224 	if(num == 0) {
225 		log_err("no TCP outgoing interfaces of family");
226 		log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen);
227 		sock_close(s);
228 		return 0;
229 	}
230 #ifdef INET6
231 	if(addr_is_ip6(&w->addr, w->addrlen))
232 		pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)];
233 	else
234 #endif
235 		pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)];
236 	log_assert(pi);
237 	pend->pi = pi;
238 	if(addr_is_any(&pi->addr, pi->addrlen)) {
239 		/* binding to the ANY interface is for listening sockets */
240 		return 1;
241 	}
242 	/* set port to 0 */
243 	if(addr_is_ip6(&pi->addr, pi->addrlen))
244 		((struct sockaddr_in6*)&pi->addr)->sin6_port = 0;
245 	else	((struct sockaddr_in*)&pi->addr)->sin_port = 0;
246 	if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) {
247 #ifndef USE_WINSOCK
248 #ifdef EADDRNOTAVAIL
249 		if(!(verbosity < 4 && errno == EADDRNOTAVAIL))
250 #endif
251 #else /* USE_WINSOCK */
252 		if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL))
253 #endif
254 		    log_err("outgoing tcp: bind: %s", sock_strerror(errno));
255 		sock_close(s);
256 		return 0;
257 	}
258 	log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen);
259 	return 1;
260 }
261 
262 /** get TCP file descriptor for address, returns -1 on failure,
263  * tcp_mss is 0 or maxseg size to set for TCP packets. */
264 int
265 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen,
266 	int tcp_mss, int dscp, int nodelay)
267 {
268 	int s;
269 	int af;
270 	char* err;
271 #if defined(SO_REUSEADDR) || defined(IP_BIND_ADDRESS_NO_PORT)	\
272 	|| defined(TCP_NODELAY)
273 	int on = 1;
274 #endif
275 #ifdef INET6
276 	if(addr_is_ip6(addr, addrlen)){
277 		s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP);
278 		af = AF_INET6;
279 	} else {
280 #else
281 	{
282 #endif
283 		af = AF_INET;
284 		s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
285 	}
286 	if(s == -1) {
287 		log_err_addr("outgoing tcp: socket", sock_strerror(errno),
288 			addr, addrlen);
289 		return -1;
290 	}
291 
292 #ifdef SO_REUSEADDR
293 	if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on,
294 		(socklen_t)sizeof(on)) < 0) {
295 		verbose(VERB_ALGO, "outgoing tcp:"
296 			" setsockopt(.. SO_REUSEADDR ..) failed");
297 	}
298 #endif
299 
300 	err = set_ip_dscp(s, af, dscp);
301 	if(err != NULL) {
302 		verbose(VERB_ALGO, "outgoing tcp:"
303 			"error setting IP DiffServ codepoint on socket");
304 	}
305 
306 	if(tcp_mss > 0) {
307 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG)
308 		if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG,
309 			(void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) {
310 			verbose(VERB_ALGO, "outgoing tcp:"
311 				" setsockopt(.. TCP_MAXSEG ..) failed");
312 		}
313 #else
314 		verbose(VERB_ALGO, "outgoing tcp:"
315 			" setsockopt(TCP_MAXSEG) unsupported");
316 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */
317 	}
318 #ifdef IP_BIND_ADDRESS_NO_PORT
319 	if(setsockopt(s, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, (void*)&on,
320 		(socklen_t)sizeof(on)) < 0) {
321 		verbose(VERB_ALGO, "outgoing tcp:"
322 			" setsockopt(.. IP_BIND_ADDRESS_NO_PORT ..) failed");
323 	}
324 #endif /* IP_BIND_ADDRESS_NO_PORT */
325 	if(nodelay) {
326 #if defined(IPPROTO_TCP) && defined(TCP_NODELAY)
327 		if(setsockopt(s, IPPROTO_TCP, TCP_NODELAY, (void*)&on,
328 			(socklen_t)sizeof(on)) < 0) {
329 			verbose(VERB_ALGO, "outgoing tcp:"
330 				" setsockopt(.. TCP_NODELAY ..) failed");
331 		}
332 #else
333 		verbose(VERB_ALGO, "outgoing tcp:"
334 			" setsockopt(.. TCP_NODELAY ..) unsupported");
335 #endif /* defined(IPPROTO_TCP) && defined(TCP_NODELAY) */
336 	}
337 	return s;
338 }
339 
340 /** connect tcp connection to addr, 0 on failure */
341 int
342 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen)
343 {
344 	if(connect(s, (struct sockaddr*)addr, addrlen) == -1) {
345 #ifndef USE_WINSOCK
346 #ifdef EINPROGRESS
347 		if(errno != EINPROGRESS) {
348 #endif
349 			if(tcp_connect_errno_needs_log(
350 				(struct sockaddr*)addr, addrlen))
351 				log_err_addr("outgoing tcp: connect",
352 					strerror(errno), addr, addrlen);
353 			close(s);
354 			return 0;
355 #ifdef EINPROGRESS
356 		}
357 #endif
358 #else /* USE_WINSOCK */
359 		if(WSAGetLastError() != WSAEINPROGRESS &&
360 			WSAGetLastError() != WSAEWOULDBLOCK) {
361 			closesocket(s);
362 			return 0;
363 		}
364 #endif
365 	}
366 	return 1;
367 }
368 
369 /** log reuse item addr and ptr with message */
370 static void
371 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse)
372 {
373 	uint16_t port;
374 	char addrbuf[128];
375 	if(verbosity < v) return;
376 	if(!reuse || !reuse->pending || !reuse->pending->c)
377 		return;
378 	addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf));
379 	port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port);
380 	verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port,
381 		reuse->pending->c->fd);
382 }
383 
384 /** pop the first element from the writewait list */
385 struct waiting_tcp*
386 reuse_write_wait_pop(struct reuse_tcp* reuse)
387 {
388 	struct waiting_tcp* w = reuse->write_wait_first;
389 	if(!w)
390 		return NULL;
391 	log_assert(w->write_wait_queued);
392 	log_assert(!w->write_wait_prev);
393 	reuse->write_wait_first = w->write_wait_next;
394 	if(w->write_wait_next)
395 		w->write_wait_next->write_wait_prev = NULL;
396 	else	reuse->write_wait_last = NULL;
397 	w->write_wait_queued = 0;
398 	w->write_wait_next = NULL;
399 	w->write_wait_prev = NULL;
400 	return w;
401 }
402 
403 /** remove the element from the writewait list */
404 void
405 reuse_write_wait_remove(struct reuse_tcp* reuse, struct waiting_tcp* w)
406 {
407 	log_assert(w);
408 	log_assert(w->write_wait_queued);
409 	if(!w)
410 		return;
411 	if(!w->write_wait_queued)
412 		return;
413 	if(w->write_wait_prev)
414 		w->write_wait_prev->write_wait_next = w->write_wait_next;
415 	else	reuse->write_wait_first = w->write_wait_next;
416 	log_assert(!w->write_wait_prev ||
417 		w->write_wait_prev->write_wait_next != w->write_wait_prev);
418 	if(w->write_wait_next)
419 		w->write_wait_next->write_wait_prev = w->write_wait_prev;
420 	else	reuse->write_wait_last = w->write_wait_prev;
421 	log_assert(!w->write_wait_next
422 		|| w->write_wait_next->write_wait_prev != w->write_wait_next);
423 	w->write_wait_queued = 0;
424 	w->write_wait_next = NULL;
425 	w->write_wait_prev = NULL;
426 }
427 
428 /** push the element after the last on the writewait list */
429 void
430 reuse_write_wait_push_back(struct reuse_tcp* reuse, struct waiting_tcp* w)
431 {
432 	if(!w) return;
433 	log_assert(!w->write_wait_queued);
434 	if(reuse->write_wait_last) {
435 		reuse->write_wait_last->write_wait_next = w;
436 		log_assert(reuse->write_wait_last->write_wait_next !=
437 			reuse->write_wait_last);
438 		w->write_wait_prev = reuse->write_wait_last;
439 	} else {
440 		reuse->write_wait_first = w;
441 		w->write_wait_prev = NULL;
442 	}
443 	w->write_wait_next = NULL;
444 	reuse->write_wait_last = w;
445 	w->write_wait_queued = 1;
446 }
447 
448 /** insert element in tree by id */
449 void
450 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w)
451 {
452 #ifdef UNBOUND_DEBUG
453 	rbnode_type* added;
454 #endif
455 	log_assert(w->id_node.key == NULL);
456 	w->id_node.key = w;
457 #ifdef UNBOUND_DEBUG
458 	added =
459 #else
460 	(void)
461 #endif
462 	rbtree_insert(&reuse->tree_by_id, &w->id_node);
463 	log_assert(added);  /* should have been added */
464 }
465 
466 /** find element in tree by id */
467 struct waiting_tcp*
468 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id)
469 {
470 	struct waiting_tcp key_w;
471 	rbnode_type* n;
472 	memset(&key_w, 0, sizeof(key_w));
473 	key_w.id_node.key = &key_w;
474 	key_w.id = id;
475 	n = rbtree_search(&reuse->tree_by_id, &key_w);
476 	if(!n) return NULL;
477 	return (struct waiting_tcp*)n->key;
478 }
479 
480 /** return ID value of rbnode in tree_by_id */
481 static uint16_t
482 tree_by_id_get_id(rbnode_type* node)
483 {
484 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
485 	return w->id;
486 }
487 
488 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */
489 int
490 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp)
491 {
492 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse);
493 	if(pend_tcp->reuse.item_on_lru_list) {
494 		if(!pend_tcp->reuse.node.key)
495 			log_err("internal error: reuse_tcp_insert: "
496 				"in lru list without key");
497 		return 1;
498 	}
499 	pend_tcp->reuse.node.key = &pend_tcp->reuse;
500 	pend_tcp->reuse.pending = pend_tcp;
501 	if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) {
502 		/* We are not in the LRU list but we are already in the
503 		 * tcp_reuse tree, strange.
504 		 * Continue to add ourselves to the LRU list. */
505 		log_err("internal error: reuse_tcp_insert: in lru list but "
506 			"not in the tree");
507 	}
508 	/* insert into LRU, first is newest */
509 	pend_tcp->reuse.lru_prev = NULL;
510 	if(outnet->tcp_reuse_first) {
511 		pend_tcp->reuse.lru_next = outnet->tcp_reuse_first;
512 		log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse);
513 		outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse;
514 		log_assert(outnet->tcp_reuse_first->lru_prev !=
515 			outnet->tcp_reuse_first);
516 	} else {
517 		pend_tcp->reuse.lru_next = NULL;
518 		outnet->tcp_reuse_last = &pend_tcp->reuse;
519 	}
520 	outnet->tcp_reuse_first = &pend_tcp->reuse;
521 	pend_tcp->reuse.item_on_lru_list = 1;
522 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
523 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
524 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
525 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
526 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
527 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
528 	return 1;
529 }
530 
531 /** find reuse tcp stream to destination for query, or NULL if none */
532 static struct reuse_tcp*
533 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr,
534 	socklen_t addrlen, int use_ssl)
535 {
536 	struct waiting_tcp key_w;
537 	struct pending_tcp key_p;
538 	struct comm_point c;
539 	rbnode_type* result = NULL, *prev;
540 	verbose(VERB_CLIENT, "reuse_tcp_find");
541 	memset(&key_w, 0, sizeof(key_w));
542 	memset(&key_p, 0, sizeof(key_p));
543 	memset(&c, 0, sizeof(c));
544 	key_p.query = &key_w;
545 	key_p.c = &c;
546 	key_p.reuse.pending = &key_p;
547 	key_p.reuse.node.key = &key_p.reuse;
548 	if(use_ssl)
549 		key_p.reuse.is_ssl = 1;
550 	if(addrlen > (socklen_t)sizeof(key_p.reuse.addr))
551 		return NULL;
552 	memmove(&key_p.reuse.addr, addr, addrlen);
553 	key_p.reuse.addrlen = addrlen;
554 
555 	verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u",
556 		(unsigned)outnet->tcp_reuse.count);
557 	if(outnet->tcp_reuse.root == NULL ||
558 		outnet->tcp_reuse.root == RBTREE_NULL)
559 		return NULL;
560 	if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse,
561 		&result)) {
562 		/* exact match */
563 		/* but the key is on stack, and ptr is compared, impossible */
564 		log_assert(&key_p.reuse != (struct reuse_tcp*)result);
565 		log_assert(&key_p != ((struct reuse_tcp*)result)->pending);
566 	}
567 
568 	/* It is possible that we search for something before the first element
569 	 * in the tree. Replace a null pointer with the first element.
570 	 */
571 	if (!result) {
572 		verbose(VERB_CLIENT, "reuse_tcp_find: taking first");
573 		result = rbtree_first(&outnet->tcp_reuse);
574 	}
575 
576 	/* not found, return null */
577 	if(!result || result == RBTREE_NULL)
578 		return NULL;
579 
580 	/* It is possible that we got the previous address, but that the
581 	 * address we are looking for is in the tree. If the address we got
582 	 * is less than the address we are looking, then take the next entry.
583 	 */
584 	if (reuse_cmp_addrportssl(result->key, &key_p.reuse) < 0) {
585 		verbose(VERB_CLIENT, "reuse_tcp_find: key too low");
586 		result = rbtree_next(result);
587 	}
588 
589 	verbose(VERB_CLIENT, "reuse_tcp_find check inexact match");
590 	/* inexact match, find one of possibly several connections to the
591 	 * same destination address, with the correct port, ssl, and
592 	 * also less than max number of open queries, or else, fail to open
593 	 * a new one */
594 	/* rewind to start of sequence of same address,port,ssl */
595 	prev = rbtree_previous(result);
596 	while(prev && prev != RBTREE_NULL &&
597 		reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) {
598 		result = prev;
599 		prev = rbtree_previous(result);
600 	}
601 
602 	/* loop to find first one that has correct characteristics */
603 	while(result && result != RBTREE_NULL &&
604 		reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) {
605 		if(((struct reuse_tcp*)result)->tree_by_id.count <
606 			outnet->max_reuse_tcp_queries) {
607 			/* same address, port, ssl-yes-or-no, and has
608 			 * space for another query */
609 			return (struct reuse_tcp*)result;
610 		}
611 		result = rbtree_next(result);
612 	}
613 	return NULL;
614 }
615 
616 /** use the buffer to setup writing the query */
617 static void
618 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend,
619 	struct waiting_tcp* w)
620 {
621 	struct timeval tv;
622 	verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write "
623 		"len %d timeout %d msec",
624 		(int)w->pkt_len, w->timeout);
625 	pend->c->tcp_write_pkt = w->pkt;
626 	pend->c->tcp_write_pkt_len = w->pkt_len;
627 	pend->c->tcp_write_and_read = 1;
628 	pend->c->tcp_write_byte_count = 0;
629 	pend->c->tcp_is_reading = 0;
630 	comm_point_start_listening(pend->c, s, -1);
631 	/* set timer on the waiting_tcp entry, this is the write timeout
632 	 * for the written packet.  The timer on pend->c is the timer
633 	 * for when there is no written packet and we have readtimeouts */
634 #ifndef S_SPLINT_S
635 	tv.tv_sec = w->timeout/1000;
636 	tv.tv_usec = (w->timeout%1000)*1000;
637 #endif
638 	/* if the waiting_tcp was previously waiting for a buffer in the
639 	 * outside_network.tcpwaitlist, then the timer is reset now that
640 	 * we start writing it */
641 	comm_timer_set(w->timer, &tv);
642 }
643 
644 /** use next free buffer to service a tcp query */
645 static int
646 outnet_tcp_take_into_use(struct waiting_tcp* w)
647 {
648 	struct pending_tcp* pend = w->outnet->tcp_free;
649 	int s;
650 	log_assert(pend);
651 	log_assert(w->pkt);
652 	log_assert(w->pkt_len > 0);
653 	log_assert(w->addrlen > 0);
654 	pend->c->tcp_do_toggle_rw = 0;
655 	pend->c->tcp_do_close = 0;
656 
657 	/* Consistency check, if we have ssl_upstream but no sslctx, then
658 	 * log an error and return failure.
659 	 */
660 	if (w->ssl_upstream && !w->outnet->sslctx) {
661 		log_err("SSL upstream requested but no SSL context");
662 		return 0;
663 	}
664 
665 	/* open socket */
666 	s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss,
667 		w->outnet->ip_dscp, w->ssl_upstream);
668 
669 	if(s == -1)
670 		return 0;
671 
672 	if(!pick_outgoing_tcp(pend, w, s))
673 		return 0;
674 
675 	fd_set_nonblock(s);
676 #ifdef USE_OSX_MSG_FASTOPEN
677 	/* API for fast open is different here. We use a connectx() function and
678 	   then writes can happen as normal even using SSL.*/
679 	/* connectx requires that the len be set in the sockaddr struct*/
680 	struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr;
681 	addr_in->sin_len = w->addrlen;
682 	sa_endpoints_t endpoints;
683 	endpoints.sae_srcif = 0;
684 	endpoints.sae_srcaddr = NULL;
685 	endpoints.sae_srcaddrlen = 0;
686 	endpoints.sae_dstaddr = (struct sockaddr *)&w->addr;
687 	endpoints.sae_dstaddrlen = w->addrlen;
688 	if (connectx(s, &endpoints, SAE_ASSOCID_ANY,
689 	             CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE,
690 	             NULL, 0, NULL, NULL) == -1) {
691 		/* if fails, failover to connect for OSX 10.10 */
692 #ifdef EINPROGRESS
693 		if(errno != EINPROGRESS) {
694 #else
695 		if(1) {
696 #endif
697 			if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
698 #else /* USE_OSX_MSG_FASTOPEN*/
699 #ifdef USE_MSG_FASTOPEN
700 	pend->c->tcp_do_fastopen = 1;
701 	/* Only do TFO for TCP in which case no connect() is required here.
702 	   Don't combine client TFO with SSL, since OpenSSL can't
703 	   currently support doing a handshake on fd that already isn't connected*/
704 	if (w->outnet->sslctx && w->ssl_upstream) {
705 		if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
706 #else /* USE_MSG_FASTOPEN*/
707 	if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
708 #endif /* USE_MSG_FASTOPEN*/
709 #endif /* USE_OSX_MSG_FASTOPEN*/
710 #ifndef USE_WINSOCK
711 #ifdef EINPROGRESS
712 		if(errno != EINPROGRESS) {
713 #else
714 		if(1) {
715 #endif
716 			if(tcp_connect_errno_needs_log(
717 				(struct sockaddr*)&w->addr, w->addrlen))
718 				log_err_addr("outgoing tcp: connect",
719 					strerror(errno), &w->addr, w->addrlen);
720 			close(s);
721 #else /* USE_WINSOCK */
722 		if(WSAGetLastError() != WSAEINPROGRESS &&
723 			WSAGetLastError() != WSAEWOULDBLOCK) {
724 			closesocket(s);
725 #endif
726 			return 0;
727 		}
728 	}
729 #ifdef USE_MSG_FASTOPEN
730 	}
731 #endif /* USE_MSG_FASTOPEN */
732 #ifdef USE_OSX_MSG_FASTOPEN
733 		}
734 	}
735 #endif /* USE_OSX_MSG_FASTOPEN */
736 	if(w->outnet->sslctx && w->ssl_upstream) {
737 		pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s);
738 		if(!pend->c->ssl) {
739 			pend->c->fd = s;
740 			comm_point_close(pend->c);
741 			return 0;
742 		}
743 		verbose(VERB_ALGO, "the query is using TLS encryption, for %s",
744 			(w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection"));
745 #ifdef USE_WINSOCK
746 		comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl);
747 #endif
748 		pend->c->ssl_shake_state = comm_ssl_shake_write;
749 		if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name,
750 			w->outnet->tls_use_sni)) {
751 			pend->c->fd = s;
752 #ifdef HAVE_SSL
753 			SSL_free(pend->c->ssl);
754 #endif
755 			pend->c->ssl = NULL;
756 			comm_point_close(pend->c);
757 			return 0;
758 		}
759 	}
760 	w->next_waiting = (void*)pend;
761 	w->outnet->num_tcp_outgoing++;
762 	w->outnet->tcp_free = pend->next_free;
763 	pend->next_free = NULL;
764 	pend->query = w;
765 	pend->reuse.outnet = w->outnet;
766 	pend->c->repinfo.remote_addrlen = w->addrlen;
767 	pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again;
768 	pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again;
769 	pend->reuse.cp_more_read_again = 0;
770 	pend->reuse.cp_more_write_again = 0;
771 	memcpy(&pend->c->repinfo.remote_addr, &w->addr, w->addrlen);
772 	pend->reuse.pending = pend;
773 
774 	/* Remove from tree in case the is_ssl will be different and causes the
775 	 * identity of the reuse_tcp to change; could result in nodes not being
776 	 * deleted from the tree (because the new identity does not match the
777 	 * previous node) but their ->key would be changed to NULL. */
778 	if(pend->reuse.node.key)
779 		reuse_tcp_remove_tree_list(w->outnet, &pend->reuse);
780 
781 	if(pend->c->ssl)
782 		pend->reuse.is_ssl = 1;
783 	else	pend->reuse.is_ssl = 0;
784 	/* insert in reuse by address tree if not already inserted there */
785 	(void)reuse_tcp_insert(w->outnet, pend);
786 	reuse_tree_by_id_insert(&pend->reuse, w);
787 	outnet_tcp_take_query_setup(s, pend, w);
788 	return 1;
789 }
790 
791 /** Touch the lru of a reuse_tcp element, it is in use.
792  * This moves it to the front of the list, where it is not likely to
793  * be closed.  Items at the back of the list are closed to make space. */
794 void
795 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse)
796 {
797 	if(!reuse->item_on_lru_list) {
798 		log_err("internal error: we need to touch the lru_list but item not in list");
799 		return; /* not on the list, no lru to modify */
800 	}
801 	log_assert(reuse->lru_prev ||
802 		(!reuse->lru_prev && outnet->tcp_reuse_first == reuse));
803 	if(!reuse->lru_prev)
804 		return; /* already first in the list */
805 	/* remove at current position */
806 	/* since it is not first, there is a previous element */
807 	reuse->lru_prev->lru_next = reuse->lru_next;
808 	log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
809 	if(reuse->lru_next)
810 		reuse->lru_next->lru_prev = reuse->lru_prev;
811 	else	outnet->tcp_reuse_last = reuse->lru_prev;
812 	log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next);
813 	log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
814 		outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
815 	/* insert at the front */
816 	reuse->lru_prev = NULL;
817 	reuse->lru_next = outnet->tcp_reuse_first;
818 	if(outnet->tcp_reuse_first) {
819 		outnet->tcp_reuse_first->lru_prev = reuse;
820 	}
821 	log_assert(reuse->lru_next != reuse);
822 	/* since it is not first, it is not the only element and
823 	 * lru_next is thus not NULL and thus reuse is now not the last in
824 	 * the list, so outnet->tcp_reuse_last does not need to be modified */
825 	outnet->tcp_reuse_first = reuse;
826 	log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
827 		outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
828 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
829 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
830 }
831 
832 /** Snip the last reuse_tcp element off of the LRU list */
833 struct reuse_tcp*
834 reuse_tcp_lru_snip(struct outside_network* outnet)
835 {
836 	struct reuse_tcp* reuse = outnet->tcp_reuse_last;
837 	if(!reuse) return NULL;
838 	/* snip off of LRU */
839 	log_assert(reuse->lru_next == NULL);
840 	if(reuse->lru_prev) {
841 		outnet->tcp_reuse_last = reuse->lru_prev;
842 		reuse->lru_prev->lru_next = NULL;
843 	} else {
844 		outnet->tcp_reuse_last = NULL;
845 		outnet->tcp_reuse_first = NULL;
846 	}
847 	log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
848 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
849 	reuse->item_on_lru_list = 0;
850 	reuse->lru_next = NULL;
851 	reuse->lru_prev = NULL;
852 	return reuse;
853 }
854 
855 /** remove waiting tcp from the outnet waiting list */
856 void
857 outnet_waiting_tcp_list_remove(struct outside_network* outnet, struct waiting_tcp* w)
858 {
859 	struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL;
860 	w->on_tcp_waiting_list = 0;
861 	while(p) {
862 		if(p == w) {
863 			/* remove w */
864 			if(prev)
865 				prev->next_waiting = w->next_waiting;
866 			else	outnet->tcp_wait_first = w->next_waiting;
867 			if(outnet->tcp_wait_last == w)
868 				outnet->tcp_wait_last = prev;
869 			w->next_waiting = NULL;
870 			return;
871 		}
872 		prev = p;
873 		p = p->next_waiting;
874 	}
875 	/* outnet_waiting_tcp_list_remove is currently called only with items
876 	 * that are already in the waiting list. */
877 	log_assert(0);
878 }
879 
880 /** pop the first waiting tcp from the outnet waiting list */
881 struct waiting_tcp*
882 outnet_waiting_tcp_list_pop(struct outside_network* outnet)
883 {
884 	struct waiting_tcp* w = outnet->tcp_wait_first;
885 	if(!outnet->tcp_wait_first) return NULL;
886 	log_assert(w->on_tcp_waiting_list);
887 	outnet->tcp_wait_first = w->next_waiting;
888 	if(outnet->tcp_wait_last == w)
889 		outnet->tcp_wait_last = NULL;
890 	w->on_tcp_waiting_list = 0;
891 	w->next_waiting = NULL;
892 	return w;
893 }
894 
895 /** add waiting_tcp element to the outnet tcp waiting list */
896 void
897 outnet_waiting_tcp_list_add(struct outside_network* outnet,
898 	struct waiting_tcp* w, int set_timer)
899 {
900 	struct timeval tv;
901 	log_assert(!w->on_tcp_waiting_list);
902 	if(w->on_tcp_waiting_list)
903 		return;
904 	w->next_waiting = NULL;
905 	if(outnet->tcp_wait_last)
906 		outnet->tcp_wait_last->next_waiting = w;
907 	else	outnet->tcp_wait_first = w;
908 	outnet->tcp_wait_last = w;
909 	w->on_tcp_waiting_list = 1;
910 	if(set_timer) {
911 #ifndef S_SPLINT_S
912 		tv.tv_sec = w->timeout/1000;
913 		tv.tv_usec = (w->timeout%1000)*1000;
914 #endif
915 		comm_timer_set(w->timer, &tv);
916 	}
917 }
918 
919 /** add waiting_tcp element as first to the outnet tcp waiting list */
920 void
921 outnet_waiting_tcp_list_add_first(struct outside_network* outnet,
922 	struct waiting_tcp* w, int reset_timer)
923 {
924 	struct timeval tv;
925 	log_assert(!w->on_tcp_waiting_list);
926 	if(w->on_tcp_waiting_list)
927 		return;
928 	w->next_waiting = outnet->tcp_wait_first;
929 	log_assert(w->next_waiting != w);
930 	if(!outnet->tcp_wait_last)
931 		outnet->tcp_wait_last = w;
932 	outnet->tcp_wait_first = w;
933 	w->on_tcp_waiting_list = 1;
934 	if(reset_timer) {
935 #ifndef S_SPLINT_S
936 		tv.tv_sec = w->timeout/1000;
937 		tv.tv_usec = (w->timeout%1000)*1000;
938 #endif
939 		comm_timer_set(w->timer, &tv);
940 	}
941 	log_assert(
942 		(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
943 		(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
944 }
945 
946 /** call callback on waiting_tcp, if not NULL */
947 static void
948 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error,
949 	struct comm_reply* reply_info)
950 {
951 	if(w && w->cb) {
952 		fptr_ok(fptr_whitelist_pending_tcp(w->cb));
953 		(void)(*w->cb)(c, w->cb_arg, error, reply_info);
954 	}
955 }
956 
957 /** see if buffers can be used to service TCP queries */
958 static void
959 use_free_buffer(struct outside_network* outnet)
960 {
961 	struct waiting_tcp* w;
962 	while(outnet->tcp_wait_first && !outnet->want_to_quit) {
963 #ifdef USE_DNSTAP
964 		struct pending_tcp* pend_tcp = NULL;
965 #endif
966 		struct reuse_tcp* reuse = NULL;
967 		w = outnet_waiting_tcp_list_pop(outnet);
968 		log_assert(
969 			(!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
970 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
971 		reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen,
972 			w->ssl_upstream);
973 		/* re-select an ID when moving to a new TCP buffer */
974 		w->id = tcp_select_id(outnet, reuse);
975 		LDNS_ID_SET(w->pkt, w->id);
976 		if(reuse) {
977 			log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: "
978 				"found reuse", reuse);
979 #ifdef USE_DNSTAP
980 			pend_tcp = reuse->pending;
981 #endif
982 			reuse_tcp_lru_touch(outnet, reuse);
983 			comm_timer_disable(w->timer);
984 			w->next_waiting = (void*)reuse->pending;
985 			reuse_tree_by_id_insert(reuse, w);
986 			if(reuse->pending->query) {
987 				/* on the write wait list */
988 				reuse_write_wait_push_back(reuse, w);
989 			} else {
990 				/* write straight away */
991 				/* stop the timer on read of the fd */
992 				comm_point_stop_listening(reuse->pending->c);
993 				reuse->pending->query = w;
994 				outnet_tcp_take_query_setup(
995 					reuse->pending->c->fd, reuse->pending,
996 					w);
997 			}
998 		} else if(outnet->tcp_free) {
999 			struct pending_tcp* pend = w->outnet->tcp_free;
1000 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1001 			pend->reuse.pending = pend;
1002 			memcpy(&pend->reuse.addr, &w->addr, w->addrlen);
1003 			pend->reuse.addrlen = w->addrlen;
1004 			if(!outnet_tcp_take_into_use(w)) {
1005 				waiting_tcp_callback(w, NULL, NETEVENT_CLOSED,
1006 					NULL);
1007 				waiting_tcp_delete(w);
1008 #ifdef USE_DNSTAP
1009 				w = NULL;
1010 #endif
1011 			}
1012 #ifdef USE_DNSTAP
1013 			pend_tcp = pend;
1014 #endif
1015 		} else {
1016 			/* no reuse and no free buffer, put back at the start */
1017 			outnet_waiting_tcp_list_add_first(outnet, w, 0);
1018 			break;
1019 		}
1020 #ifdef USE_DNSTAP
1021 		if(outnet->dtenv && pend_tcp && w && w->sq &&
1022 			(outnet->dtenv->log_resolver_query_messages ||
1023 			outnet->dtenv->log_forwarder_query_messages)) {
1024 			sldns_buffer tmp;
1025 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
1026 			dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr,
1027 				&pend_tcp->pi->addr, comm_tcp, NULL, w->sq->zone,
1028 				w->sq->zonelen, &tmp);
1029 		}
1030 #endif
1031 	}
1032 }
1033 
1034 /** delete element from tree by id */
1035 static void
1036 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w)
1037 {
1038 #ifdef UNBOUND_DEBUG
1039 	rbnode_type* rem;
1040 #endif
1041 	log_assert(w->id_node.key != NULL);
1042 #ifdef UNBOUND_DEBUG
1043 	rem =
1044 #else
1045 	(void)
1046 #endif
1047 	rbtree_delete(&reuse->tree_by_id, w);
1048 	log_assert(rem);  /* should have been there */
1049 	w->id_node.key = NULL;
1050 }
1051 
1052 /** move writewait list to go for another connection. */
1053 static void
1054 reuse_move_writewait_away(struct outside_network* outnet,
1055 	struct pending_tcp* pend)
1056 {
1057 	/* the writewait list has not been written yet, so if the
1058 	 * stream was closed, they have not actually been failed, only
1059 	 * the queries written.  Other queries can get written to another
1060 	 * stream.  For upstreams that do not support multiple queries
1061 	 * and answers, the stream can get closed, and then the queries
1062 	 * can get written on a new socket */
1063 	struct waiting_tcp* w;
1064 	if(pend->query && pend->query->error_count == 0 &&
1065 		pend->c->tcp_write_pkt == pend->query->pkt &&
1066 		pend->c->tcp_write_pkt_len == pend->query->pkt_len) {
1067 		/* since the current query is not written, it can also
1068 		 * move to a free buffer */
1069 		if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 &&
1070 			LDNS_QDCOUNT(pend->query->pkt) > 0 &&
1071 			dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) {
1072 			char buf[LDNS_MAX_DOMAINLEN];
1073 			dname_str(pend->query->pkt+12, buf);
1074 			verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written",
1075 				buf, (int)pend->c->tcp_write_byte_count);
1076 		}
1077 		pend->c->tcp_write_pkt = NULL;
1078 		pend->c->tcp_write_pkt_len = 0;
1079 		pend->c->tcp_write_and_read = 0;
1080 		pend->reuse.cp_more_read_again = 0;
1081 		pend->reuse.cp_more_write_again = 0;
1082 		pend->c->tcp_is_reading = 1;
1083 		w = pend->query;
1084 		pend->query = NULL;
1085 		/* increase error count, so that if the next socket fails too
1086 		 * the server selection is run again with this query failed
1087 		 * and it can select a different server (if possible), or
1088 		 * fail the query */
1089 		w->error_count ++;
1090 		reuse_tree_by_id_delete(&pend->reuse, w);
1091 		outnet_waiting_tcp_list_add(outnet, w, 1);
1092 	}
1093 	while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) {
1094 		if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 &&
1095 			LDNS_QDCOUNT(w->pkt) > 0 &&
1096 			dname_valid(w->pkt+12, w->pkt_len-12)) {
1097 			char buf[LDNS_MAX_DOMAINLEN];
1098 			dname_str(w->pkt+12, buf);
1099 			verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf);
1100 		}
1101 		reuse_tree_by_id_delete(&pend->reuse, w);
1102 		outnet_waiting_tcp_list_add(outnet, w, 1);
1103 	}
1104 }
1105 
1106 /** remove reused element from tree and lru list */
1107 void
1108 reuse_tcp_remove_tree_list(struct outside_network* outnet,
1109 	struct reuse_tcp* reuse)
1110 {
1111 	verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list");
1112 	if(reuse->node.key) {
1113 		/* delete it from reuse tree */
1114 		if(!rbtree_delete(&outnet->tcp_reuse, reuse)) {
1115 			/* should not be possible, it should be there */
1116 			char buf[256];
1117 			addr_to_str(&reuse->addr, reuse->addrlen, buf,
1118 				sizeof(buf));
1119 			log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list);
1120 		}
1121 		reuse->node.key = NULL;
1122 		/* defend against loops on broken tree by zeroing the
1123 		 * rbnode structure */
1124 		memset(&reuse->node, 0, sizeof(reuse->node));
1125 	}
1126 	/* delete from reuse list */
1127 	if(reuse->item_on_lru_list) {
1128 		if(reuse->lru_prev) {
1129 			/* assert that members of the lru list are waiting
1130 			 * and thus have a pending pointer to the struct */
1131 			log_assert(reuse->lru_prev->pending);
1132 			reuse->lru_prev->lru_next = reuse->lru_next;
1133 			log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
1134 		} else {
1135 			log_assert(!reuse->lru_next || reuse->lru_next->pending);
1136 			outnet->tcp_reuse_first = reuse->lru_next;
1137 			log_assert(!outnet->tcp_reuse_first ||
1138 				(outnet->tcp_reuse_first !=
1139 				 outnet->tcp_reuse_first->lru_next &&
1140 				 outnet->tcp_reuse_first !=
1141 				 outnet->tcp_reuse_first->lru_prev));
1142 		}
1143 		if(reuse->lru_next) {
1144 			/* assert that members of the lru list are waiting
1145 			 * and thus have a pending pointer to the struct */
1146 			log_assert(reuse->lru_next->pending);
1147 			reuse->lru_next->lru_prev = reuse->lru_prev;
1148 			log_assert(reuse->lru_next->lru_prev != reuse->lru_next);
1149 		} else {
1150 			log_assert(!reuse->lru_prev || reuse->lru_prev->pending);
1151 			outnet->tcp_reuse_last = reuse->lru_prev;
1152 			log_assert(!outnet->tcp_reuse_last ||
1153 				(outnet->tcp_reuse_last !=
1154 				 outnet->tcp_reuse_last->lru_next &&
1155 				 outnet->tcp_reuse_last !=
1156 				 outnet->tcp_reuse_last->lru_prev));
1157 		}
1158 		log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
1159 			(outnet->tcp_reuse_first && outnet->tcp_reuse_last));
1160 		reuse->item_on_lru_list = 0;
1161 		reuse->lru_next = NULL;
1162 		reuse->lru_prev = NULL;
1163 	}
1164 	reuse->pending = NULL;
1165 }
1166 
1167 /** helper function that deletes an element from the tree of readwait
1168  * elements in tcp reuse structure */
1169 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg))
1170 {
1171 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1172 	waiting_tcp_delete(w);
1173 }
1174 
1175 /** delete readwait waiting_tcp elements, deletes the elements in the list */
1176 void reuse_del_readwait(rbtree_type* tree_by_id)
1177 {
1178 	if(tree_by_id->root == NULL ||
1179 		tree_by_id->root == RBTREE_NULL)
1180 		return;
1181 	traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL);
1182 	rbtree_init(tree_by_id, reuse_id_cmp);
1183 }
1184 
1185 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */
1186 static void
1187 decommission_pending_tcp(struct outside_network* outnet,
1188 	struct pending_tcp* pend)
1189 {
1190 	verbose(VERB_CLIENT, "decommission_pending_tcp");
1191 	/* A certain code path can lead here twice for the same pending_tcp
1192 	 * creating a loop in the free pending_tcp list. */
1193 	if(outnet->tcp_free != pend) {
1194 		pend->next_free = outnet->tcp_free;
1195 		outnet->tcp_free = pend;
1196 	}
1197 	if(pend->reuse.node.key) {
1198 		/* needs unlink from the reuse tree to get deleted */
1199 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1200 	}
1201 	/* free SSL structure after remove from outnet tcp reuse tree,
1202 	 * because the c->ssl null or not is used for sorting in the tree */
1203 	if(pend->c->ssl) {
1204 #ifdef HAVE_SSL
1205 		SSL_shutdown(pend->c->ssl);
1206 		SSL_free(pend->c->ssl);
1207 		pend->c->ssl = NULL;
1208 #endif
1209 	}
1210 	comm_point_close(pend->c);
1211 	pend->reuse.cp_more_read_again = 0;
1212 	pend->reuse.cp_more_write_again = 0;
1213 	/* unlink the query and writewait list, it is part of the tree
1214 	 * nodes and is deleted */
1215 	pend->query = NULL;
1216 	pend->reuse.write_wait_first = NULL;
1217 	pend->reuse.write_wait_last = NULL;
1218 	reuse_del_readwait(&pend->reuse.tree_by_id);
1219 }
1220 
1221 /** perform failure callbacks for waiting queries in reuse read rbtree */
1222 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err)
1223 {
1224 	rbnode_type* node;
1225 	if(tree_by_id->root == NULL ||
1226 		tree_by_id->root == RBTREE_NULL)
1227 		return;
1228 	node = rbtree_first(tree_by_id);
1229 	while(node && node != RBTREE_NULL) {
1230 		struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1231 		waiting_tcp_callback(w, NULL, err, NULL);
1232 		node = rbtree_next(node);
1233 	}
1234 }
1235 
1236 /** mark the entry for being in the cb_and_decommission stage */
1237 static void mark_for_cb_and_decommission(rbnode_type* node,
1238 	void* ATTR_UNUSED(arg))
1239 {
1240 	struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1241 	/* Mark the waiting_tcp to signal later code (serviced_delete) that
1242 	 * this item is part of the backed up tree_by_id and will be deleted
1243 	 * later. */
1244 	w->in_cb_and_decommission = 1;
1245 	/* Mark the serviced_query for deletion so that later code through
1246 	 * callbacks (iter_clear .. outnet_serviced_query_stop) won't
1247 	 * prematurely delete it. */
1248 	if(w->cb)
1249 		((struct serviced_query*)w->cb_arg)->to_be_deleted = 1;
1250 }
1251 
1252 /** perform callbacks for failure and also decommission pending tcp.
1253  * the callbacks remove references in sq->pending to the waiting_tcp
1254  * members of the tree_by_id in the pending tcp.  The pending_tcp is
1255  * removed before the callbacks, so that the callbacks do not modify
1256  * the pending_tcp due to its reference in the outside_network reuse tree */
1257 static void reuse_cb_and_decommission(struct outside_network* outnet,
1258 	struct pending_tcp* pend, int error)
1259 {
1260 	rbtree_type store;
1261 	store = pend->reuse.tree_by_id;
1262 	pend->query = NULL;
1263 	rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1264 	pend->reuse.write_wait_first = NULL;
1265 	pend->reuse.write_wait_last = NULL;
1266 	decommission_pending_tcp(outnet, pend);
1267 	if(store.root != NULL && store.root != RBTREE_NULL) {
1268 		traverse_postorder(&store, &mark_for_cb_and_decommission, NULL);
1269 	}
1270 	reuse_cb_readwait_for_failure(&store, error);
1271 	reuse_del_readwait(&store);
1272 }
1273 
1274 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1275 static void
1276 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1277 {
1278 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse);
1279 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1280 }
1281 
1282 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1283 static void
1284 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1285 {
1286 	log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse);
1287 	sldns_buffer_clear(pend_tcp->c->buffer);
1288 	pend_tcp->c->tcp_is_reading = 1;
1289 	pend_tcp->c->tcp_byte_count = 0;
1290 	comm_point_stop_listening(pend_tcp->c);
1291 	comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1292 }
1293 
1294 int
1295 outnet_tcp_cb(struct comm_point* c, void* arg, int error,
1296 	struct comm_reply *reply_info)
1297 {
1298 	struct pending_tcp* pend = (struct pending_tcp*)arg;
1299 	struct outside_network* outnet = pend->reuse.outnet;
1300 	struct waiting_tcp* w = NULL;
1301 	log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key);
1302 	verbose(VERB_ALGO, "outnettcp cb");
1303 	if(error == NETEVENT_TIMEOUT) {
1304 		if(pend->c->tcp_write_and_read) {
1305 			verbose(VERB_QUERY, "outnettcp got tcp timeout "
1306 				"for read, ignored because write underway");
1307 			/* if we are writing, ignore readtimer, wait for write timer
1308 			 * or write is done */
1309 			return 0;
1310 		} else {
1311 			verbose(VERB_QUERY, "outnettcp got tcp timeout %s",
1312 				(pend->reuse.tree_by_id.count?"for reading pkt":
1313 				"for keepalive for reuse"));
1314 		}
1315 		/* must be timeout for reading or keepalive reuse,
1316 		 * close it. */
1317 		reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1318 	} else if(error == NETEVENT_PKT_WRITTEN) {
1319 		/* the packet we want to write has been written. */
1320 		verbose(VERB_ALGO, "outnet tcp pkt was written event");
1321 		log_assert(c == pend->c);
1322 		log_assert(pend->query->pkt == pend->c->tcp_write_pkt);
1323 		log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len);
1324 		pend->c->tcp_write_pkt = NULL;
1325 		pend->c->tcp_write_pkt_len = 0;
1326 		/* the pend.query is already in tree_by_id */
1327 		log_assert(pend->query->id_node.key);
1328 		pend->query = NULL;
1329 		/* setup to write next packet or setup read timeout */
1330 		if(pend->reuse.write_wait_first) {
1331 			verbose(VERB_ALGO, "outnet tcp setup next pkt");
1332 			/* we can write it straight away perhaps, set flag
1333 			 * because this callback called after a tcp write
1334 			 * succeeded and likely more buffer space is available
1335 			 * and we can write some more. */
1336 			pend->reuse.cp_more_write_again = 1;
1337 			pend->query = reuse_write_wait_pop(&pend->reuse);
1338 			comm_point_stop_listening(pend->c);
1339 			outnet_tcp_take_query_setup(pend->c->fd, pend,
1340 				pend->query);
1341 		} else {
1342 			verbose(VERB_ALGO, "outnet tcp writes done, wait");
1343 			pend->c->tcp_write_and_read = 0;
1344 			pend->reuse.cp_more_read_again = 0;
1345 			pend->reuse.cp_more_write_again = 0;
1346 			pend->c->tcp_is_reading = 1;
1347 			comm_point_stop_listening(pend->c);
1348 			reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout);
1349 		}
1350 		return 0;
1351 	} else if(error != NETEVENT_NOERROR) {
1352 		verbose(VERB_QUERY, "outnettcp got tcp error %d", error);
1353 		reuse_move_writewait_away(outnet, pend);
1354 		/* pass error below and exit */
1355 	} else {
1356 		/* check ID */
1357 		if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) {
1358 			log_addr(VERB_QUERY,
1359 				"outnettcp: bad ID in reply, too short, from:",
1360 				&pend->reuse.addr, pend->reuse.addrlen);
1361 			error = NETEVENT_CLOSED;
1362 		} else {
1363 			uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin(
1364 				c->buffer));
1365 			/* find the query the reply is for */
1366 			w = reuse_tcp_by_id_find(&pend->reuse, id);
1367 			/* Make sure that the reply we got is at least for a
1368 			 * sent query with the same ID; the waiting_tcp that
1369 			 * gets a reply is assumed to not be waiting to be
1370 			 * sent. */
1371 			if(w && (w->on_tcp_waiting_list || w->write_wait_queued))
1372 				w = NULL;
1373 		}
1374 	}
1375 	if(error == NETEVENT_NOERROR && !w) {
1376 		/* no struct waiting found in tree, no reply to call */
1377 		log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:",
1378 			&pend->reuse.addr, pend->reuse.addrlen);
1379 		error = NETEVENT_CLOSED;
1380 	}
1381 	if(error == NETEVENT_NOERROR) {
1382 		/* add to reuse tree so it can be reused, if not a failure.
1383 		 * This is possible if the state machine wants to make a tcp
1384 		 * query again to the same destination. */
1385 		if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) {
1386 			(void)reuse_tcp_insert(outnet, pend);
1387 		}
1388 	}
1389 	if(w) {
1390 		log_assert(!w->on_tcp_waiting_list);
1391 		log_assert(!w->write_wait_queued);
1392 		reuse_tree_by_id_delete(&pend->reuse, w);
1393 		verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d",
1394 			error, (int)sldns_buffer_limit(c->buffer));
1395 		waiting_tcp_callback(w, c, error, reply_info);
1396 		waiting_tcp_delete(w);
1397 	}
1398 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb");
1399 	if(error == NETEVENT_NOERROR && pend->reuse.node.key) {
1400 		verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it");
1401 		/* it is in the reuse_tcp tree, with other queries, or
1402 		 * on the empty list. do not decommission it */
1403 		/* if there are more outstanding queries, we could try to
1404 		 * read again, to see if it is on the input,
1405 		 * because this callback called after a successful read
1406 		 * and there could be more bytes to read on the input */
1407 		if(pend->reuse.tree_by_id.count != 0)
1408 			pend->reuse.cp_more_read_again = 1;
1409 		reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout);
1410 		return 0;
1411 	}
1412 	verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it");
1413 	/* no queries on it, no space to keep it. or timeout or closed due
1414 	 * to error.  Close it */
1415 	reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT?
1416 		NETEVENT_TIMEOUT:NETEVENT_CLOSED));
1417 	use_free_buffer(outnet);
1418 	return 0;
1419 }
1420 
1421 /** lower use count on pc, see if it can be closed */
1422 static void
1423 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc)
1424 {
1425 	struct port_if* pif;
1426 	pc->num_outstanding--;
1427 	if(pc->num_outstanding > 0) {
1428 		return;
1429 	}
1430 	/* close it and replace in unused list */
1431 	verbose(VERB_ALGO, "close of port %d", pc->number);
1432 	comm_point_close(pc->cp);
1433 	pif = pc->pif;
1434 	log_assert(pif->inuse > 0);
1435 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1436 	pif->avail_ports[pif->avail_total - pif->inuse] = pc->number;
1437 #endif
1438 	pif->inuse--;
1439 	pif->out[pc->index] = pif->out[pif->inuse];
1440 	pif->out[pc->index]->index = pc->index;
1441 	pc->next = outnet->unused_fds;
1442 	outnet->unused_fds = pc;
1443 }
1444 
1445 /** try to send waiting UDP queries */
1446 static void
1447 outnet_send_wait_udp(struct outside_network* outnet)
1448 {
1449 	struct pending* pend;
1450 	/* process waiting queries */
1451 	while(outnet->udp_wait_first && outnet->unused_fds
1452 		&& !outnet->want_to_quit) {
1453 		pend = outnet->udp_wait_first;
1454 		outnet->udp_wait_first = pend->next_waiting;
1455 		if(!pend->next_waiting) outnet->udp_wait_last = NULL;
1456 		sldns_buffer_clear(outnet->udp_buff);
1457 		sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len);
1458 		sldns_buffer_flip(outnet->udp_buff);
1459 		free(pend->pkt); /* freeing now makes get_mem correct */
1460 		pend->pkt = NULL;
1461 		pend->pkt_len = 0;
1462 		log_assert(!pend->sq->busy);
1463 		pend->sq->busy = 1;
1464 		if(!randomize_and_send_udp(pend, outnet->udp_buff,
1465 			pend->timeout)) {
1466 			/* callback error on pending */
1467 			if(pend->cb) {
1468 				fptr_ok(fptr_whitelist_pending_udp(pend->cb));
1469 				(void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg,
1470 					NETEVENT_CLOSED, NULL);
1471 			}
1472 			pending_delete(outnet, pend);
1473 		} else {
1474 			pend->sq->busy = 0;
1475 		}
1476 	}
1477 }
1478 
1479 int
1480 outnet_udp_cb(struct comm_point* c, void* arg, int error,
1481 	struct comm_reply *reply_info)
1482 {
1483 	struct outside_network* outnet = (struct outside_network*)arg;
1484 	struct pending key;
1485 	struct pending* p;
1486 	verbose(VERB_ALGO, "answer cb");
1487 
1488 	if(error != NETEVENT_NOERROR) {
1489 		verbose(VERB_QUERY, "outnetudp got udp error %d", error);
1490 		return 0;
1491 	}
1492 	if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) {
1493 		verbose(VERB_QUERY, "outnetudp udp too short");
1494 		return 0;
1495 	}
1496 	log_assert(reply_info);
1497 
1498 	/* setup lookup key */
1499 	key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer));
1500 	memcpy(&key.addr, &reply_info->remote_addr, reply_info->remote_addrlen);
1501 	key.addrlen = reply_info->remote_addrlen;
1502 	verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id);
1503 	log_addr(VERB_ALGO, "Incoming reply addr =",
1504 		&reply_info->remote_addr, reply_info->remote_addrlen);
1505 
1506 	/* find it, see if this thing is a valid query response */
1507 	verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count);
1508 	p = (struct pending*)rbtree_search(outnet->pending, &key);
1509 	if(!p) {
1510 		verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped.");
1511 		log_buf(VERB_ALGO, "dropped message", c->buffer);
1512 		outnet->unwanted_replies++;
1513 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1514 			>= outnet->unwanted_threshold) {
1515 			log_warn("unwanted reply total reached threshold (%u)"
1516 				" you may be under attack."
1517 				" defensive action: clearing the cache",
1518 				(unsigned)outnet->unwanted_threshold);
1519 			fptr_ok(fptr_whitelist_alloc_cleanup(
1520 				outnet->unwanted_action));
1521 			(*outnet->unwanted_action)(outnet->unwanted_param);
1522 			outnet->unwanted_total = 0;
1523 		}
1524 		return 0;
1525 	}
1526 
1527 	verbose(VERB_ALGO, "received udp reply.");
1528 	log_buf(VERB_ALGO, "udp message", c->buffer);
1529 	if(p->pc->cp != c) {
1530 		verbose(VERB_QUERY, "received reply id,addr on wrong port. "
1531 			"dropped.");
1532 		outnet->unwanted_replies++;
1533 		if(outnet->unwanted_threshold && ++outnet->unwanted_total
1534 			>= outnet->unwanted_threshold) {
1535 			log_warn("unwanted reply total reached threshold (%u)"
1536 				" you may be under attack."
1537 				" defensive action: clearing the cache",
1538 				(unsigned)outnet->unwanted_threshold);
1539 			fptr_ok(fptr_whitelist_alloc_cleanup(
1540 				outnet->unwanted_action));
1541 			(*outnet->unwanted_action)(outnet->unwanted_param);
1542 			outnet->unwanted_total = 0;
1543 		}
1544 		return 0;
1545 	}
1546 	comm_timer_disable(p->timer);
1547 	verbose(VERB_ALGO, "outnet handle udp reply");
1548 	/* delete from tree first in case callback creates a retry */
1549 	(void)rbtree_delete(outnet->pending, p->node.key);
1550 	if(p->cb) {
1551 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1552 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info);
1553 	}
1554 	portcomm_loweruse(outnet, p->pc);
1555 	pending_delete(NULL, p);
1556 	outnet_send_wait_udp(outnet);
1557 	return 0;
1558 }
1559 
1560 /** calculate number of ip4 and ip6 interfaces*/
1561 static void
1562 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6,
1563 	int* num_ip4, int* num_ip6)
1564 {
1565 	int i;
1566 	*num_ip4 = 0;
1567 	*num_ip6 = 0;
1568 	if(num_ifs <= 0) {
1569 		if(do_ip4)
1570 			*num_ip4 = 1;
1571 		if(do_ip6)
1572 			*num_ip6 = 1;
1573 		return;
1574 	}
1575 	for(i=0; i<num_ifs; i++)
1576 	{
1577 		if(str_is_ip6(ifs[i])) {
1578 			if(do_ip6)
1579 				(*num_ip6)++;
1580 		} else {
1581 			if(do_ip4)
1582 				(*num_ip4)++;
1583 		}
1584 	}
1585 }
1586 
1587 void
1588 pending_udp_timer_delay_cb(void* arg)
1589 {
1590 	struct pending* p = (struct pending*)arg;
1591 	struct outside_network* outnet = p->outnet;
1592 	verbose(VERB_ALGO, "timeout udp with delay");
1593 	portcomm_loweruse(outnet, p->pc);
1594 	pending_delete(outnet, p);
1595 	outnet_send_wait_udp(outnet);
1596 }
1597 
1598 void
1599 pending_udp_timer_cb(void *arg)
1600 {
1601 	struct pending* p = (struct pending*)arg;
1602 	struct outside_network* outnet = p->outnet;
1603 	/* it timed out */
1604 	verbose(VERB_ALGO, "timeout udp");
1605 	if(p->cb) {
1606 		fptr_ok(fptr_whitelist_pending_udp(p->cb));
1607 		(void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL);
1608 	}
1609 	/* if delayclose, keep port open for a longer time.
1610 	 * But if the udpwaitlist exists, then we are struggling to
1611 	 * keep up with demand for sockets, so do not wait, but service
1612 	 * the customer (customer service more important than portICMPs) */
1613 	if(outnet->delayclose && !outnet->udp_wait_first) {
1614 		p->cb = NULL;
1615 		p->timer->callback = &pending_udp_timer_delay_cb;
1616 		comm_timer_set(p->timer, &outnet->delay_tv);
1617 		return;
1618 	}
1619 	portcomm_loweruse(outnet, p->pc);
1620 	pending_delete(outnet, p);
1621 	outnet_send_wait_udp(outnet);
1622 }
1623 
1624 /** create pending_tcp buffers */
1625 static int
1626 create_pending_tcp(struct outside_network* outnet, size_t bufsize)
1627 {
1628 	size_t i;
1629 	if(outnet->num_tcp == 0)
1630 		return 1; /* no tcp needed, nothing to do */
1631 	if(!(outnet->tcp_conns = (struct pending_tcp **)calloc(
1632 			outnet->num_tcp, sizeof(struct pending_tcp*))))
1633 		return 0;
1634 	for(i=0; i<outnet->num_tcp; i++) {
1635 		if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1,
1636 			sizeof(struct pending_tcp))))
1637 			return 0;
1638 		outnet->tcp_conns[i]->next_free = outnet->tcp_free;
1639 		outnet->tcp_free = outnet->tcp_conns[i];
1640 		outnet->tcp_conns[i]->c = comm_point_create_tcp_out(
1641 			outnet->base, bufsize, outnet_tcp_cb,
1642 			outnet->tcp_conns[i]);
1643 		if(!outnet->tcp_conns[i]->c)
1644 			return 0;
1645 	}
1646 	return 1;
1647 }
1648 
1649 /** setup an outgoing interface, ready address */
1650 static int setup_if(struct port_if* pif, const char* addrstr,
1651 	int* avail, int numavail, size_t numfd)
1652 {
1653 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1654 	pif->avail_total = numavail;
1655 	pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int));
1656 	if(!pif->avail_ports)
1657 		return 0;
1658 #endif
1659 	if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) &&
1660 	   !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT,
1661 			      &pif->addr, &pif->addrlen, &pif->pfxlen))
1662 		return 0;
1663 	pif->maxout = (int)numfd;
1664 	pif->inuse = 0;
1665 	pif->out = (struct port_comm**)calloc(numfd,
1666 		sizeof(struct port_comm*));
1667 	if(!pif->out)
1668 		return 0;
1669 	return 1;
1670 }
1671 
1672 struct outside_network*
1673 outside_network_create(struct comm_base *base, size_t bufsize,
1674 	size_t num_ports, char** ifs, int num_ifs, int do_ip4,
1675 	int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra,
1676 	struct ub_randstate* rnd, int use_caps_for_id, int* availports,
1677 	int numavailports, size_t unwanted_threshold, int tcp_mss,
1678 	void (*unwanted_action)(void*), void* unwanted_param, int do_udp,
1679 	void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv,
1680 	int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout,
1681 	int tcp_auth_query_timeout)
1682 {
1683 	struct outside_network* outnet = (struct outside_network*)
1684 		calloc(1, sizeof(struct outside_network));
1685 	size_t k;
1686 	if(!outnet) {
1687 		log_err("malloc failed");
1688 		return NULL;
1689 	}
1690 	comm_base_timept(base, &outnet->now_secs, &outnet->now_tv);
1691 	outnet->base = base;
1692 	outnet->num_tcp = num_tcp;
1693 	outnet->max_reuse_tcp_queries = max_reuse_tcp_queries;
1694 	outnet->tcp_reuse_timeout= tcp_reuse_timeout;
1695 	outnet->tcp_auth_query_timeout = tcp_auth_query_timeout;
1696 	outnet->num_tcp_outgoing = 0;
1697 	outnet->num_udp_outgoing = 0;
1698 	outnet->infra = infra;
1699 	outnet->rnd = rnd;
1700 	outnet->sslctx = sslctx;
1701 	outnet->tls_use_sni = tls_use_sni;
1702 #ifdef USE_DNSTAP
1703 	outnet->dtenv = dtenv;
1704 #else
1705 	(void)dtenv;
1706 #endif
1707 	outnet->svcd_overhead = 0;
1708 	outnet->want_to_quit = 0;
1709 	outnet->unwanted_threshold = unwanted_threshold;
1710 	outnet->unwanted_action = unwanted_action;
1711 	outnet->unwanted_param = unwanted_param;
1712 	outnet->use_caps_for_id = use_caps_for_id;
1713 	outnet->do_udp = do_udp;
1714 	outnet->tcp_mss = tcp_mss;
1715 	outnet->ip_dscp = dscp;
1716 #ifndef S_SPLINT_S
1717 	if(delayclose) {
1718 		outnet->delayclose = 1;
1719 		outnet->delay_tv.tv_sec = delayclose/1000;
1720 		outnet->delay_tv.tv_usec = (delayclose%1000)*1000;
1721 	}
1722 #endif
1723 	if(udp_connect) {
1724 		outnet->udp_connect = 1;
1725 	}
1726 	if(numavailports == 0 || num_ports == 0) {
1727 		log_err("no outgoing ports available");
1728 		outside_network_delete(outnet);
1729 		return NULL;
1730 	}
1731 #ifndef INET6
1732 	do_ip6 = 0;
1733 #endif
1734 	calc_num46(ifs, num_ifs, do_ip4, do_ip6,
1735 		&outnet->num_ip4, &outnet->num_ip6);
1736 	if(outnet->num_ip4 != 0) {
1737 		if(!(outnet->ip4_ifs = (struct port_if*)calloc(
1738 			(size_t)outnet->num_ip4, sizeof(struct port_if)))) {
1739 			log_err("malloc failed");
1740 			outside_network_delete(outnet);
1741 			return NULL;
1742 		}
1743 	}
1744 	if(outnet->num_ip6 != 0) {
1745 		if(!(outnet->ip6_ifs = (struct port_if*)calloc(
1746 			(size_t)outnet->num_ip6, sizeof(struct port_if)))) {
1747 			log_err("malloc failed");
1748 			outside_network_delete(outnet);
1749 			return NULL;
1750 		}
1751 	}
1752 	if(	!(outnet->udp_buff = sldns_buffer_new(bufsize)) ||
1753 		!(outnet->pending = rbtree_create(pending_cmp)) ||
1754 		!(outnet->serviced = rbtree_create(serviced_cmp)) ||
1755 		!create_pending_tcp(outnet, bufsize)) {
1756 		log_err("malloc failed");
1757 		outside_network_delete(outnet);
1758 		return NULL;
1759 	}
1760 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1761 	outnet->tcp_reuse_max = num_tcp;
1762 
1763 	/* allocate commpoints */
1764 	for(k=0; k<num_ports; k++) {
1765 		struct port_comm* pc;
1766 		pc = (struct port_comm*)calloc(1, sizeof(*pc));
1767 		if(!pc) {
1768 			log_err("malloc failed");
1769 			outside_network_delete(outnet);
1770 			return NULL;
1771 		}
1772 		pc->cp = comm_point_create_udp(outnet->base, -1,
1773 			outnet->udp_buff, 0, outnet_udp_cb, outnet, NULL);
1774 		if(!pc->cp) {
1775 			log_err("malloc failed");
1776 			free(pc);
1777 			outside_network_delete(outnet);
1778 			return NULL;
1779 		}
1780 		pc->next = outnet->unused_fds;
1781 		outnet->unused_fds = pc;
1782 	}
1783 
1784 	/* allocate interfaces */
1785 	if(num_ifs == 0) {
1786 		if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0",
1787 			availports, numavailports, num_ports)) {
1788 			log_err("malloc failed");
1789 			outside_network_delete(outnet);
1790 			return NULL;
1791 		}
1792 		if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::",
1793 			availports, numavailports, num_ports)) {
1794 			log_err("malloc failed");
1795 			outside_network_delete(outnet);
1796 			return NULL;
1797 		}
1798 	} else {
1799 		size_t done_4 = 0, done_6 = 0;
1800 		int i;
1801 		for(i=0; i<num_ifs; i++) {
1802 			if(str_is_ip6(ifs[i]) && do_ip6) {
1803 				if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i],
1804 					availports, numavailports, num_ports)){
1805 					log_err("malloc failed");
1806 					outside_network_delete(outnet);
1807 					return NULL;
1808 				}
1809 				done_6++;
1810 			}
1811 			if(!str_is_ip6(ifs[i]) && do_ip4) {
1812 				if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i],
1813 					availports, numavailports, num_ports)){
1814 					log_err("malloc failed");
1815 					outside_network_delete(outnet);
1816 					return NULL;
1817 				}
1818 				done_4++;
1819 			}
1820 		}
1821 	}
1822 	return outnet;
1823 }
1824 
1825 /** helper pending delete */
1826 static void
1827 pending_node_del(rbnode_type* node, void* arg)
1828 {
1829 	struct pending* pend = (struct pending*)node;
1830 	struct outside_network* outnet = (struct outside_network*)arg;
1831 	pending_delete(outnet, pend);
1832 }
1833 
1834 /** helper serviced delete */
1835 static void
1836 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg))
1837 {
1838 	struct serviced_query* sq = (struct serviced_query*)node;
1839 	alloc_reg_release(sq->alloc, sq->region);
1840 	if(sq->timer)
1841 		comm_timer_delete(sq->timer);
1842 	free(sq);
1843 }
1844 
1845 void
1846 outside_network_quit_prepare(struct outside_network* outnet)
1847 {
1848 	if(!outnet)
1849 		return;
1850 	/* prevent queued items from being sent */
1851 	outnet->want_to_quit = 1;
1852 }
1853 
1854 void
1855 outside_network_delete(struct outside_network* outnet)
1856 {
1857 	if(!outnet)
1858 		return;
1859 	outnet->want_to_quit = 1;
1860 	/* check every element, since we can be called on malloc error */
1861 	if(outnet->pending) {
1862 		/* free pending elements, but do no unlink from tree. */
1863 		traverse_postorder(outnet->pending, pending_node_del, NULL);
1864 		free(outnet->pending);
1865 	}
1866 	if(outnet->serviced) {
1867 		traverse_postorder(outnet->serviced, serviced_node_del, NULL);
1868 		free(outnet->serviced);
1869 	}
1870 	if(outnet->udp_buff)
1871 		sldns_buffer_free(outnet->udp_buff);
1872 	if(outnet->unused_fds) {
1873 		struct port_comm* p = outnet->unused_fds, *np;
1874 		while(p) {
1875 			np = p->next;
1876 			comm_point_delete(p->cp);
1877 			free(p);
1878 			p = np;
1879 		}
1880 		outnet->unused_fds = NULL;
1881 	}
1882 	if(outnet->ip4_ifs) {
1883 		int i, k;
1884 		for(i=0; i<outnet->num_ip4; i++) {
1885 			for(k=0; k<outnet->ip4_ifs[i].inuse; k++) {
1886 				struct port_comm* pc = outnet->ip4_ifs[i].
1887 					out[k];
1888 				comm_point_delete(pc->cp);
1889 				free(pc);
1890 			}
1891 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1892 			free(outnet->ip4_ifs[i].avail_ports);
1893 #endif
1894 			free(outnet->ip4_ifs[i].out);
1895 		}
1896 		free(outnet->ip4_ifs);
1897 	}
1898 	if(outnet->ip6_ifs) {
1899 		int i, k;
1900 		for(i=0; i<outnet->num_ip6; i++) {
1901 			for(k=0; k<outnet->ip6_ifs[i].inuse; k++) {
1902 				struct port_comm* pc = outnet->ip6_ifs[i].
1903 					out[k];
1904 				comm_point_delete(pc->cp);
1905 				free(pc);
1906 			}
1907 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
1908 			free(outnet->ip6_ifs[i].avail_ports);
1909 #endif
1910 			free(outnet->ip6_ifs[i].out);
1911 		}
1912 		free(outnet->ip6_ifs);
1913 	}
1914 	if(outnet->tcp_conns) {
1915 		size_t i;
1916 		for(i=0; i<outnet->num_tcp; i++)
1917 			if(outnet->tcp_conns[i]) {
1918 				struct pending_tcp* pend;
1919 				pend = outnet->tcp_conns[i];
1920 				if(pend->reuse.item_on_lru_list) {
1921 					/* delete waiting_tcp elements that
1922 					 * the tcp conn is working on */
1923 					decommission_pending_tcp(outnet, pend);
1924 				}
1925 				comm_point_delete(outnet->tcp_conns[i]->c);
1926 				free(outnet->tcp_conns[i]);
1927 				outnet->tcp_conns[i] = NULL;
1928 			}
1929 		free(outnet->tcp_conns);
1930 		outnet->tcp_conns = NULL;
1931 	}
1932 	if(outnet->tcp_wait_first) {
1933 		struct waiting_tcp* p = outnet->tcp_wait_first, *np;
1934 		while(p) {
1935 			np = p->next_waiting;
1936 			waiting_tcp_delete(p);
1937 			p = np;
1938 		}
1939 	}
1940 	/* was allocated in struct pending that was deleted above */
1941 	rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1942 	outnet->tcp_reuse_first = NULL;
1943 	outnet->tcp_reuse_last = NULL;
1944 	if(outnet->udp_wait_first) {
1945 		struct pending* p = outnet->udp_wait_first, *np;
1946 		while(p) {
1947 			np = p->next_waiting;
1948 			pending_delete(NULL, p);
1949 			p = np;
1950 		}
1951 	}
1952 	free(outnet);
1953 }
1954 
1955 void
1956 pending_delete(struct outside_network* outnet, struct pending* p)
1957 {
1958 	if(!p)
1959 		return;
1960 	if(outnet && outnet->udp_wait_first &&
1961 		(p->next_waiting || p == outnet->udp_wait_last) ) {
1962 		/* delete from waiting list, if it is in the waiting list */
1963 		struct pending* prev = NULL, *x = outnet->udp_wait_first;
1964 		while(x && x != p) {
1965 			prev = x;
1966 			x = x->next_waiting;
1967 		}
1968 		if(x) {
1969 			log_assert(x == p);
1970 			if(prev)
1971 				prev->next_waiting = p->next_waiting;
1972 			else	outnet->udp_wait_first = p->next_waiting;
1973 			if(outnet->udp_wait_last == p)
1974 				outnet->udp_wait_last = prev;
1975 		}
1976 	}
1977 	if(outnet) {
1978 		(void)rbtree_delete(outnet->pending, p->node.key);
1979 	}
1980 	if(p->timer)
1981 		comm_timer_delete(p->timer);
1982 	free(p->pkt);
1983 	free(p);
1984 }
1985 
1986 static void
1987 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd)
1988 {
1989 	int i, last;
1990 	if(!(pfxlen > 0 && pfxlen < 128))
1991 		return;
1992 	for(i = 0; i < (128 - pfxlen) / 8; i++) {
1993 		sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256);
1994 	}
1995 	last = pfxlen & 7;
1996 	if(last != 0) {
1997 		sa->sin6_addr.s6_addr[15-i] |=
1998 			((0xFF >> last) & ub_random_max(rnd, 256));
1999 	}
2000 }
2001 
2002 /**
2003  * Try to open a UDP socket for outgoing communication.
2004  * Sets sockets options as needed.
2005  * @param addr: socket address.
2006  * @param addrlen: length of address.
2007  * @param pfxlen: length of network prefix (for address randomisation).
2008  * @param port: port override for addr.
2009  * @param inuse: if -1 is returned, this bool means the port was in use.
2010  * @param rnd: random state (for address randomisation).
2011  * @param dscp: DSCP to use.
2012  * @return fd or -1
2013  */
2014 static int
2015 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen,
2016 	int port, int* inuse, struct ub_randstate* rnd, int dscp)
2017 {
2018 	int fd, noproto;
2019 	if(addr_is_ip6(addr, addrlen)) {
2020 		int freebind = 0;
2021 		struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
2022 		sa.sin6_port = (in_port_t)htons((uint16_t)port);
2023 		sa.sin6_flowinfo = 0;
2024 		sa.sin6_scope_id = 0;
2025 		if(pfxlen != 0) {
2026 			freebind = 1;
2027 			sai6_putrandom(&sa, pfxlen, rnd);
2028 		}
2029 		fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
2030 			(struct sockaddr*)&sa, addrlen, 1, inuse, &noproto,
2031 			0, 0, 0, NULL, 0, freebind, 0, dscp);
2032 	} else {
2033 		struct sockaddr_in* sa = (struct sockaddr_in*)addr;
2034 		sa->sin_port = (in_port_t)htons((uint16_t)port);
2035 		fd = create_udp_sock(AF_INET, SOCK_DGRAM,
2036 			(struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
2037 			0, 0, 0, NULL, 0, 0, 0, dscp);
2038 	}
2039 	return fd;
2040 }
2041 
2042 /** Select random ID */
2043 static int
2044 select_id(struct outside_network* outnet, struct pending* pend,
2045 	sldns_buffer* packet)
2046 {
2047 	int id_tries = 0;
2048 	pend->id = GET_RANDOM_ID(outnet->rnd);
2049 	LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
2050 
2051 	/* insert in tree */
2052 	pend->node.key = pend;
2053 	while(!rbtree_insert(outnet->pending, &pend->node)) {
2054 		/* change ID to avoid collision */
2055 		pend->id = GET_RANDOM_ID(outnet->rnd);
2056 		LDNS_ID_SET(sldns_buffer_begin(packet), pend->id);
2057 		id_tries++;
2058 		if(id_tries == MAX_ID_RETRY) {
2059 			pend->id=99999; /* non existent ID */
2060 			log_err("failed to generate unique ID, drop msg");
2061 			return 0;
2062 		}
2063 	}
2064 	verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id);
2065 	return 1;
2066 }
2067 
2068 /** return true is UDP connect error needs to be logged */
2069 static int udp_connect_needs_log(int err, struct sockaddr_storage* addr,
2070 	socklen_t addrlen)
2071 {
2072 	switch(err) {
2073 	case ECONNREFUSED:
2074 #  ifdef ENETUNREACH
2075 	case ENETUNREACH:
2076 #  endif
2077 #  ifdef EHOSTDOWN
2078 	case EHOSTDOWN:
2079 #  endif
2080 #  ifdef EHOSTUNREACH
2081 	case EHOSTUNREACH:
2082 #  endif
2083 #  ifdef ENETDOWN
2084 	case ENETDOWN:
2085 #  endif
2086 #  ifdef EADDRNOTAVAIL
2087 	case EADDRNOTAVAIL:
2088 #  endif
2089 	case EPERM:
2090 	case EACCES:
2091 		if(verbosity >= VERB_ALGO)
2092 			return 1;
2093 		return 0;
2094 	case EINVAL:
2095 		/* Stop 'Invalid argument for fe80::/10' addresses appearing
2096 		 * in the logs, at low verbosity. They cannot be sent to. */
2097 		if(addr_is_ip6linklocal(addr, addrlen)) {
2098 			if(verbosity >= VERB_ALGO)
2099 				return 1;
2100 			return 0;
2101 		}
2102 		break;
2103 	default:
2104 		break;
2105 	}
2106 	return 1;
2107 }
2108 
2109 
2110 /** Select random interface and port */
2111 static int
2112 select_ifport(struct outside_network* outnet, struct pending* pend,
2113 	int num_if, struct port_if* ifs)
2114 {
2115 	int my_if, my_port, fd, portno, inuse, tries=0;
2116 	struct port_if* pif;
2117 	/* randomly select interface and port */
2118 	if(num_if == 0) {
2119 		verbose(VERB_QUERY, "Need to send query but have no "
2120 			"outgoing interfaces of that family");
2121 		return 0;
2122 	}
2123 	log_assert(outnet->unused_fds);
2124 	tries = 0;
2125 	while(1) {
2126 		my_if = ub_random_max(outnet->rnd, num_if);
2127 		pif = &ifs[my_if];
2128 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2129 		if(outnet->udp_connect) {
2130 			/* if we connect() we cannot reuse fds for a port */
2131 			if(pif->inuse >= pif->avail_total) {
2132 				tries++;
2133 				if(tries < MAX_PORT_RETRY)
2134 					continue;
2135 				log_err("failed to find an open port, drop msg");
2136 				return 0;
2137 			}
2138 			my_port = pif->inuse + ub_random_max(outnet->rnd,
2139 				pif->avail_total - pif->inuse);
2140 		} else  {
2141 			my_port = ub_random_max(outnet->rnd, pif->avail_total);
2142 			if(my_port < pif->inuse) {
2143 				/* port already open */
2144 				pend->pc = pif->out[my_port];
2145 				verbose(VERB_ALGO, "using UDP if=%d port=%d",
2146 					my_if, pend->pc->number);
2147 				break;
2148 			}
2149 		}
2150 		/* try to open new port, if fails, loop to try again */
2151 		log_assert(pif->inuse < pif->maxout);
2152 		portno = pif->avail_ports[my_port - pif->inuse];
2153 #else
2154 		my_port = portno = 0;
2155 #endif
2156 		fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen,
2157 			portno, &inuse, outnet->rnd, outnet->ip_dscp);
2158 		if(fd == -1 && !inuse) {
2159 			/* nonrecoverable error making socket */
2160 			return 0;
2161 		}
2162 		if(fd != -1) {
2163 			verbose(VERB_ALGO, "opened UDP if=%d port=%d",
2164 				my_if, portno);
2165 			if(outnet->udp_connect) {
2166 				/* connect() to the destination */
2167 				if(connect(fd, (struct sockaddr*)&pend->addr,
2168 					pend->addrlen) < 0) {
2169 					if(udp_connect_needs_log(errno,
2170 						&pend->addr, pend->addrlen)) {
2171 						log_err_addr("udp connect failed",
2172 							strerror(errno), &pend->addr,
2173 							pend->addrlen);
2174 					}
2175 					sock_close(fd);
2176 					return 0;
2177 				}
2178 			}
2179 			/* grab fd */
2180 			pend->pc = outnet->unused_fds;
2181 			outnet->unused_fds = pend->pc->next;
2182 
2183 			/* setup portcomm */
2184 			pend->pc->next = NULL;
2185 			pend->pc->number = portno;
2186 			pend->pc->pif = pif;
2187 			pend->pc->index = pif->inuse;
2188 			pend->pc->num_outstanding = 0;
2189 			comm_point_start_listening(pend->pc->cp, fd, -1);
2190 
2191 			/* grab port in interface */
2192 			pif->out[pif->inuse] = pend->pc;
2193 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
2194 			pif->avail_ports[my_port - pif->inuse] =
2195 				pif->avail_ports[pif->avail_total-pif->inuse-1];
2196 #endif
2197 			pif->inuse++;
2198 			break;
2199 		}
2200 		/* failed, already in use */
2201 		verbose(VERB_QUERY, "port %d in use, trying another", portno);
2202 		tries++;
2203 		if(tries == MAX_PORT_RETRY) {
2204 			log_err("failed to find an open port, drop msg");
2205 			return 0;
2206 		}
2207 	}
2208 	log_assert(pend->pc);
2209 	pend->pc->num_outstanding++;
2210 
2211 	return 1;
2212 }
2213 
2214 static int
2215 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
2216 {
2217 	struct timeval tv;
2218 	struct outside_network* outnet = pend->sq->outnet;
2219 
2220 	/* select id */
2221 	if(!select_id(outnet, pend, packet)) {
2222 		return 0;
2223 	}
2224 
2225 	/* select src_if, port */
2226 	if(addr_is_ip6(&pend->addr, pend->addrlen)) {
2227 		if(!select_ifport(outnet, pend,
2228 			outnet->num_ip6, outnet->ip6_ifs))
2229 			return 0;
2230 	} else {
2231 		if(!select_ifport(outnet, pend,
2232 			outnet->num_ip4, outnet->ip4_ifs))
2233 			return 0;
2234 	}
2235 	log_assert(pend->pc && pend->pc->cp);
2236 
2237 	/* send it over the commlink */
2238 	if(!comm_point_send_udp_msg(pend->pc->cp, packet,
2239 		(struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) {
2240 		portcomm_loweruse(outnet, pend->pc);
2241 		return 0;
2242 	}
2243 	outnet->num_udp_outgoing++;
2244 
2245 	/* system calls to set timeout after sending UDP to make roundtrip
2246 	   smaller. */
2247 #ifndef S_SPLINT_S
2248 	tv.tv_sec = timeout/1000;
2249 	tv.tv_usec = (timeout%1000)*1000;
2250 #endif
2251 	comm_timer_set(pend->timer, &tv);
2252 
2253 #ifdef USE_DNSTAP
2254 	/*
2255 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
2256 	 * There are no chances to get the src (local service) addr if unbound
2257 	 * is not configured with specific outgoing IP-addresses. So we will
2258 	 * pass 0.0.0.0 (::) to argument for
2259 	 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls.
2260 	 */
2261 	if(outnet->dtenv &&
2262 	   (outnet->dtenv->log_resolver_query_messages ||
2263 		outnet->dtenv->log_forwarder_query_messages)) {
2264 			log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen);
2265 			log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen);
2266 			dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp, NULL,
2267 				pend->sq->zone, pend->sq->zonelen, packet);
2268 	}
2269 #endif
2270 	return 1;
2271 }
2272 
2273 struct pending*
2274 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
2275 	int timeout, comm_point_callback_type* cb, void* cb_arg)
2276 {
2277 	struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
2278 	if(!pend) return NULL;
2279 	pend->outnet = sq->outnet;
2280 	pend->sq = sq;
2281 	pend->addrlen = sq->addrlen;
2282 	memmove(&pend->addr, &sq->addr, sq->addrlen);
2283 	pend->cb = cb;
2284 	pend->cb_arg = cb_arg;
2285 	pend->node.key = pend;
2286 	pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb,
2287 		pend);
2288 	if(!pend->timer) {
2289 		free(pend);
2290 		return NULL;
2291 	}
2292 
2293 	if(sq->outnet->unused_fds == NULL) {
2294 		/* no unused fd, cannot create a new port (randomly) */
2295 		verbose(VERB_ALGO, "no fds available, udp query waiting");
2296 		pend->timeout = timeout;
2297 		pend->pkt_len = sldns_buffer_limit(packet);
2298 		pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet),
2299 			pend->pkt_len);
2300 		if(!pend->pkt) {
2301 			comm_timer_delete(pend->timer);
2302 			free(pend);
2303 			return NULL;
2304 		}
2305 		/* put at end of waiting list */
2306 		if(sq->outnet->udp_wait_last)
2307 			sq->outnet->udp_wait_last->next_waiting = pend;
2308 		else
2309 			sq->outnet->udp_wait_first = pend;
2310 		sq->outnet->udp_wait_last = pend;
2311 		return pend;
2312 	}
2313 	log_assert(!sq->busy);
2314 	sq->busy = 1;
2315 	if(!randomize_and_send_udp(pend, packet, timeout)) {
2316 		pending_delete(sq->outnet, pend);
2317 		return NULL;
2318 	}
2319 	sq->busy = 0;
2320 	return pend;
2321 }
2322 
2323 void
2324 outnet_tcptimer(void* arg)
2325 {
2326 	struct waiting_tcp* w = (struct waiting_tcp*)arg;
2327 	struct outside_network* outnet = w->outnet;
2328 	verbose(VERB_CLIENT, "outnet_tcptimer");
2329 	if(w->on_tcp_waiting_list) {
2330 		/* it is on the waiting list */
2331 		outnet_waiting_tcp_list_remove(outnet, w);
2332 		waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL);
2333 		waiting_tcp_delete(w);
2334 	} else {
2335 		/* it was in use */
2336 		struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting;
2337 		reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT);
2338 	}
2339 	use_free_buffer(outnet);
2340 }
2341 
2342 /** close the oldest reuse_tcp connection to make a fd and struct pend
2343  * available for a new stream connection */
2344 static void
2345 reuse_tcp_close_oldest(struct outside_network* outnet)
2346 {
2347 	struct reuse_tcp* reuse;
2348 	verbose(VERB_CLIENT, "reuse_tcp_close_oldest");
2349 	reuse = reuse_tcp_lru_snip(outnet);
2350 	if(!reuse) return;
2351 	/* free up */
2352 	reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED);
2353 }
2354 
2355 static uint16_t
2356 tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse)
2357 {
2358 	if(reuse)
2359 		return reuse_tcp_select_id(reuse, outnet);
2360 	return GET_RANDOM_ID(outnet->rnd);
2361 }
2362 
2363 /** find spare ID value for reuse tcp stream.  That is random and also does
2364  * not collide with an existing query ID that is in use or waiting */
2365 uint16_t
2366 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet)
2367 {
2368 	uint16_t id = 0, curid, nextid;
2369 	const int try_random = 2000;
2370 	int i;
2371 	unsigned select, count, space;
2372 	rbnode_type* node;
2373 
2374 	/* make really sure the tree is not empty */
2375 	if(reuse->tree_by_id.count == 0) {
2376 		id = GET_RANDOM_ID(outnet->rnd);
2377 		return id;
2378 	}
2379 
2380 	/* try to find random empty spots by picking them */
2381 	for(i = 0; i<try_random; i++) {
2382 		id = GET_RANDOM_ID(outnet->rnd);
2383 		if(!reuse_tcp_by_id_find(reuse, id)) {
2384 			return id;
2385 		}
2386 	}
2387 
2388 	/* equally pick a random unused element from the tree that is
2389 	 * not in use.  Pick a the n-th index of an unused number,
2390 	 * then loop over the empty spaces in the tree and find it */
2391 	log_assert(reuse->tree_by_id.count < 0xffff);
2392 	select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count);
2393 	/* select value now in 0 .. num free - 1 */
2394 
2395 	count = 0; /* number of free spaces passed by */
2396 	node = rbtree_first(&reuse->tree_by_id);
2397 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2398 	/* see if select is before first node */
2399 	if(select < (unsigned)tree_by_id_get_id(node))
2400 		return select;
2401 	count += tree_by_id_get_id(node);
2402 	/* perhaps select is between nodes */
2403 	while(node && node != RBTREE_NULL) {
2404 		rbnode_type* next = rbtree_next(node);
2405 		if(next && next != RBTREE_NULL) {
2406 			curid = tree_by_id_get_id(node);
2407 			nextid = tree_by_id_get_id(next);
2408 			log_assert(curid < nextid);
2409 			if(curid != 0xffff && curid + 1 < nextid) {
2410 				/* space between nodes */
2411 				space = nextid - curid - 1;
2412 				log_assert(select >= count);
2413 				if(select < count + space) {
2414 					/* here it is */
2415 					return curid + 1 + (select - count);
2416 				}
2417 				count += space;
2418 			}
2419 		}
2420 		node = next;
2421 	}
2422 
2423 	/* select is after the last node */
2424 	/* count is the number of free positions before the nodes in the
2425 	 * tree */
2426 	node = rbtree_last(&reuse->tree_by_id);
2427 	log_assert(node && node != RBTREE_NULL); /* tree not empty */
2428 	curid = tree_by_id_get_id(node);
2429 	log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff);
2430 	return curid + 1 + (select - count);
2431 }
2432 
2433 struct waiting_tcp*
2434 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
2435 	int timeout, comm_point_callback_type* callback, void* callback_arg)
2436 {
2437 	struct pending_tcp* pend = sq->outnet->tcp_free;
2438 	struct reuse_tcp* reuse = NULL;
2439 	struct waiting_tcp* w;
2440 
2441 	verbose(VERB_CLIENT, "pending_tcp_query");
2442 	if(sldns_buffer_limit(packet) < sizeof(uint16_t)) {
2443 		verbose(VERB_ALGO, "pending tcp query with too short buffer < 2");
2444 		return NULL;
2445 	}
2446 
2447 	/* find out if a reused stream to the target exists */
2448 	/* if so, take it into use */
2449 	reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen,
2450 		sq->ssl_upstream);
2451 	if(reuse) {
2452 		log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse);
2453 		log_assert(reuse->pending);
2454 		pend = reuse->pending;
2455 		reuse_tcp_lru_touch(sq->outnet, reuse);
2456 	}
2457 
2458 	log_assert(!reuse || (reuse && pend));
2459 	/* if !pend but we have reuse streams, close a reuse stream
2460 	 * to be able to open a new one to this target, no use waiting
2461 	 * to reuse a file descriptor while another query needs to use
2462 	 * that buffer and file descriptor now. */
2463 	if(!pend) {
2464 		reuse_tcp_close_oldest(sq->outnet);
2465 		pend = sq->outnet->tcp_free;
2466 		log_assert(!reuse || (pend == reuse->pending));
2467 	}
2468 
2469 	/* allocate space to store query */
2470 	w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp)
2471 		+ sldns_buffer_limit(packet));
2472 	if(!w) {
2473 		return NULL;
2474 	}
2475 	if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) {
2476 		free(w);
2477 		return NULL;
2478 	}
2479 	w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
2480 	w->pkt_len = sldns_buffer_limit(packet);
2481 	memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len);
2482 	w->id = tcp_select_id(sq->outnet, reuse);
2483 	LDNS_ID_SET(w->pkt, w->id);
2484 	memcpy(&w->addr, &sq->addr, sq->addrlen);
2485 	w->addrlen = sq->addrlen;
2486 	w->outnet = sq->outnet;
2487 	w->on_tcp_waiting_list = 0;
2488 	w->next_waiting = NULL;
2489 	w->cb = callback;
2490 	w->cb_arg = callback_arg;
2491 	w->ssl_upstream = sq->ssl_upstream;
2492 	w->tls_auth_name = sq->tls_auth_name;
2493 	w->timeout = timeout;
2494 	w->id_node.key = NULL;
2495 	w->write_wait_prev = NULL;
2496 	w->write_wait_next = NULL;
2497 	w->write_wait_queued = 0;
2498 	w->error_count = 0;
2499 #ifdef USE_DNSTAP
2500 	w->sq = NULL;
2501 #endif
2502 	w->in_cb_and_decommission = 0;
2503 	if(pend) {
2504 		/* we have a buffer available right now */
2505 		if(reuse) {
2506 			log_assert(reuse == &pend->reuse);
2507 			/* reuse existing fd, write query and continue */
2508 			/* store query in tree by id */
2509 			verbose(VERB_CLIENT, "pending_tcp_query: reuse, store");
2510 			w->next_waiting = (void*)pend;
2511 			reuse_tree_by_id_insert(&pend->reuse, w);
2512 			/* can we write right now? */
2513 			if(pend->query == NULL) {
2514 				/* write straight away */
2515 				/* stop the timer on read of the fd */
2516 				comm_point_stop_listening(pend->c);
2517 				pend->query = w;
2518 				outnet_tcp_take_query_setup(pend->c->fd, pend,
2519 					w);
2520 			} else {
2521 				/* put it in the waiting list for
2522 				 * this stream */
2523 				reuse_write_wait_push_back(&pend->reuse, w);
2524 			}
2525 		} else {
2526 			/* create new fd and connect to addr, setup to
2527 			 * write query */
2528 			verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect");
2529 			rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
2530 			pend->reuse.pending = pend;
2531 			memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen);
2532 			pend->reuse.addrlen = sq->addrlen;
2533 			if(!outnet_tcp_take_into_use(w)) {
2534 				waiting_tcp_delete(w);
2535 				return NULL;
2536 			}
2537 		}
2538 #ifdef USE_DNSTAP
2539 		if(sq->outnet->dtenv &&
2540 		   (sq->outnet->dtenv->log_resolver_query_messages ||
2541 		    sq->outnet->dtenv->log_forwarder_query_messages)) {
2542 			/* use w->pkt, because it has the ID value */
2543 			sldns_buffer tmp;
2544 			sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
2545 			dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr,
2546 				&pend->pi->addr, comm_tcp, NULL, sq->zone,
2547 				sq->zonelen, &tmp);
2548 		}
2549 #endif
2550 	} else {
2551 		/* queue up */
2552 		/* waiting for a buffer on the outside network buffer wait
2553 		 * list */
2554 		verbose(VERB_CLIENT, "pending_tcp_query: queue to wait");
2555 #ifdef USE_DNSTAP
2556 		w->sq = sq;
2557 #endif
2558 		outnet_waiting_tcp_list_add(sq->outnet, w, 1);
2559 	}
2560 	return w;
2561 }
2562 
2563 /** create query for serviced queries */
2564 static void
2565 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen,
2566 	uint16_t qtype, uint16_t qclass, uint16_t flags)
2567 {
2568 	sldns_buffer_clear(buff);
2569 	/* skip id */
2570 	sldns_buffer_write_u16(buff, flags);
2571 	sldns_buffer_write_u16(buff, 1); /* qdcount */
2572 	sldns_buffer_write_u16(buff, 0); /* ancount */
2573 	sldns_buffer_write_u16(buff, 0); /* nscount */
2574 	sldns_buffer_write_u16(buff, 0); /* arcount */
2575 	sldns_buffer_write(buff, qname, qnamelen);
2576 	sldns_buffer_write_u16(buff, qtype);
2577 	sldns_buffer_write_u16(buff, qclass);
2578 	sldns_buffer_flip(buff);
2579 }
2580 
2581 /** lookup serviced query in serviced query rbtree */
2582 static struct serviced_query*
2583 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2584 	struct sockaddr_storage* addr, socklen_t addrlen,
2585 	struct edns_option* opt_list)
2586 {
2587 	struct serviced_query key;
2588 	key.node.key = &key;
2589 	key.qbuf = sldns_buffer_begin(buff);
2590 	key.qbuflen = sldns_buffer_limit(buff);
2591 	key.dnssec = dnssec;
2592 	memcpy(&key.addr, addr, addrlen);
2593 	key.addrlen = addrlen;
2594 	key.outnet = outnet;
2595 	key.opt_list = opt_list;
2596 	return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
2597 }
2598 
2599 void
2600 serviced_timer_cb(void* arg)
2601 {
2602 	struct serviced_query* sq = (struct serviced_query*)arg;
2603 	struct outside_network* outnet = sq->outnet;
2604 	verbose(VERB_ALGO, "serviced send timer");
2605 	/* By the time this cb is called, if we don't have any registered
2606 	 * callbacks for this serviced_query anymore; do not send. */
2607 	if(!sq->cblist)
2608 		goto delete;
2609 	/* perform first network action */
2610 	if(outnet->do_udp && !(sq->tcp_upstream || sq->ssl_upstream)) {
2611 		if(!serviced_udp_send(sq, outnet->udp_buff))
2612 			goto delete;
2613 	} else {
2614 		if(!serviced_tcp_send(sq, outnet->udp_buff))
2615 			goto delete;
2616 	}
2617 	/* Maybe by this time we don't have callbacks attached anymore. Don't
2618 	 * proactively try to delete; let it run and maybe another callback
2619 	 * will get attached by the time we get an answer. */
2620 	return;
2621 delete:
2622 	serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
2623 }
2624 
2625 /** Create new serviced entry */
2626 static struct serviced_query*
2627 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2628 	int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
2629 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
2630 	uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list,
2631 	size_t pad_queries_block_size, struct alloc_cache* alloc,
2632 	struct regional* region)
2633 {
2634 	struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
2635 	struct timeval t;
2636 #ifdef UNBOUND_DEBUG
2637 	rbnode_type* ins;
2638 #endif
2639 	if(!sq) {
2640 		alloc_reg_release(alloc, region);
2641 		return NULL;
2642 	}
2643 	sq->node.key = sq;
2644 	sq->alloc = alloc;
2645 	sq->region = region;
2646 	sq->qbuf = regional_alloc_init(region, sldns_buffer_begin(buff),
2647 		sldns_buffer_limit(buff));
2648 	if(!sq->qbuf) {
2649 		alloc_reg_release(alloc, region);
2650 		free(sq);
2651 		return NULL;
2652 	}
2653 	sq->qbuflen = sldns_buffer_limit(buff);
2654 	sq->zone = regional_alloc_init(region, zone, zonelen);
2655 	if(!sq->zone) {
2656 		alloc_reg_release(alloc, region);
2657 		free(sq);
2658 		return NULL;
2659 	}
2660 	sq->zonelen = zonelen;
2661 	sq->qtype = qtype;
2662 	sq->dnssec = dnssec;
2663 	sq->want_dnssec = want_dnssec;
2664 	sq->nocaps = nocaps;
2665 	sq->tcp_upstream = tcp_upstream;
2666 	sq->ssl_upstream = ssl_upstream;
2667 	if(tls_auth_name) {
2668 		sq->tls_auth_name = regional_strdup(region, tls_auth_name);
2669 		if(!sq->tls_auth_name) {
2670 			alloc_reg_release(alloc, region);
2671 			free(sq);
2672 			return NULL;
2673 		}
2674 	} else {
2675 		sq->tls_auth_name = NULL;
2676 	}
2677 	memcpy(&sq->addr, addr, addrlen);
2678 	sq->addrlen = addrlen;
2679 	sq->opt_list = opt_list;
2680 	sq->busy = 0;
2681 	sq->timer = comm_timer_create(outnet->base, serviced_timer_cb, sq);
2682 	if(!sq->timer) {
2683 		alloc_reg_release(alloc, region);
2684 		free(sq);
2685 		return NULL;
2686 	}
2687 	memset(&t, 0, sizeof(t));
2688 	comm_timer_set(sq->timer, &t);
2689 	sq->outnet = outnet;
2690 	sq->cblist = NULL;
2691 	sq->pending = NULL;
2692 	sq->status = serviced_initial;
2693 	sq->retry = 0;
2694 	sq->to_be_deleted = 0;
2695 	sq->padding_block_size = pad_queries_block_size;
2696 #ifdef UNBOUND_DEBUG
2697 	ins =
2698 #else
2699 	(void)
2700 #endif
2701 	rbtree_insert(outnet->serviced, &sq->node);
2702 	log_assert(ins != NULL); /* must not be already present */
2703 	return sq;
2704 }
2705 
2706 /** reuse tcp stream, remove serviced query from stream,
2707  * return true if the stream is kept, false if it is to be closed */
2708 static int
2709 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w,
2710 	struct serviced_query* sq)
2711 {
2712 	struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting;
2713 	verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep");
2714 	/* remove the callback. let query continue to write to not cancel
2715 	 * the stream itself.  also keep it as an entry in the tree_by_id,
2716 	 * in case the answer returns (that we no longer want), but we cannot
2717 	 * pick the same ID number meanwhile */
2718 	w->cb = NULL;
2719 	/* see if can be entered in reuse tree
2720 	 * for that the FD has to be non-1 */
2721 	if(pend_tcp->c->fd == -1) {
2722 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd");
2723 		return 0;
2724 	}
2725 	/* if in tree and used by other queries */
2726 	if(pend_tcp->reuse.node.key) {
2727 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries");
2728 		/* do not reset the keepalive timer, for that
2729 		 * we'd need traffic, and this is where the serviced is
2730 		 * removed due to state machine internal reasons,
2731 		 * eg. iterator no longer interested in this query */
2732 		return 1;
2733 	}
2734 	/* if still open and want to keep it open */
2735 	if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count <
2736 		sq->outnet->tcp_reuse_max) {
2737 		verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open");
2738 		/* set a keepalive timer on it */
2739 		if(!reuse_tcp_insert(sq->outnet, pend_tcp)) {
2740 			return 0;
2741 		}
2742 		reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout);
2743 		return 1;
2744 	}
2745 	return 0;
2746 }
2747 
2748 /** cleanup serviced query entry */
2749 static void
2750 serviced_delete(struct serviced_query* sq)
2751 {
2752 	verbose(VERB_CLIENT, "serviced_delete");
2753 	if(sq->pending) {
2754 		/* clear up the pending query */
2755 		if(sq->status == serviced_query_UDP_EDNS ||
2756 			sq->status == serviced_query_UDP ||
2757 			sq->status == serviced_query_UDP_EDNS_FRAG ||
2758 			sq->status == serviced_query_UDP_EDNS_fallback) {
2759 			struct pending* p = (struct pending*)sq->pending;
2760 			verbose(VERB_CLIENT, "serviced_delete: UDP");
2761 			if(p->pc)
2762 				portcomm_loweruse(sq->outnet, p->pc);
2763 			pending_delete(sq->outnet, p);
2764 			/* this call can cause reentrant calls back into the
2765 			 * mesh */
2766 			outnet_send_wait_udp(sq->outnet);
2767 		} else {
2768 			struct waiting_tcp* w = (struct waiting_tcp*)
2769 				sq->pending;
2770 			verbose(VERB_CLIENT, "serviced_delete: TCP");
2771 			log_assert(!(w->write_wait_queued && w->on_tcp_waiting_list));
2772 			/* if on stream-write-waiting list then
2773 			 * remove from waiting list and waiting_tcp_delete */
2774 			if(w->write_wait_queued) {
2775 				struct pending_tcp* pend =
2776 					(struct pending_tcp*)w->next_waiting;
2777 				verbose(VERB_CLIENT, "serviced_delete: writewait");
2778 				if(!w->in_cb_and_decommission)
2779 					reuse_tree_by_id_delete(&pend->reuse, w);
2780 				reuse_write_wait_remove(&pend->reuse, w);
2781 				if(!w->in_cb_and_decommission)
2782 					waiting_tcp_delete(w);
2783 			} else if(!w->on_tcp_waiting_list) {
2784 				struct pending_tcp* pend =
2785 					(struct pending_tcp*)w->next_waiting;
2786 				verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep");
2787 				/* w needs to stay on tree_by_id to not assign
2788 				 * the same ID; remove the callback since its
2789 				 * serviced_query will be gone. */
2790 				w->cb = NULL;
2791 				if(!reuse_tcp_remove_serviced_keep(w, sq)) {
2792 					if(!w->in_cb_and_decommission)
2793 						reuse_cb_and_decommission(sq->outnet,
2794 							pend, NETEVENT_CLOSED);
2795 					use_free_buffer(sq->outnet);
2796 				}
2797 				sq->pending = NULL;
2798 			} else {
2799 				verbose(VERB_CLIENT, "serviced_delete: tcpwait");
2800 				outnet_waiting_tcp_list_remove(sq->outnet, w);
2801 				if(!w->in_cb_and_decommission)
2802 					waiting_tcp_delete(w);
2803 			}
2804 		}
2805 	}
2806 	/* does not delete from tree, caller has to do that */
2807 	serviced_node_del(&sq->node, NULL);
2808 }
2809 
2810 /** perturb a dname capitalization randomly */
2811 static void
2812 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len)
2813 {
2814 	uint8_t lablen;
2815 	uint8_t* d = qbuf + 10;
2816 	long int random = 0;
2817 	int bits = 0;
2818 	log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */);
2819 	(void)len;
2820 	lablen = *d++;
2821 	while(lablen) {
2822 		while(lablen--) {
2823 			/* only perturb A-Z, a-z */
2824 			if(isalpha((unsigned char)*d)) {
2825 				/* get a random bit */
2826 				if(bits == 0) {
2827 					random = ub_random(rnd);
2828 					bits = 30;
2829 				}
2830 				if(random & 0x1) {
2831 					*d = (uint8_t)toupper((unsigned char)*d);
2832 				} else {
2833 					*d = (uint8_t)tolower((unsigned char)*d);
2834 				}
2835 				random >>= 1;
2836 				bits--;
2837 			}
2838 			d++;
2839 		}
2840 		lablen = *d++;
2841 	}
2842 	if(verbosity >= VERB_ALGO) {
2843 		char buf[LDNS_MAX_DOMAINLEN];
2844 		dname_str(qbuf+10, buf);
2845 		verbose(VERB_ALGO, "qname perturbed to %s", buf);
2846 	}
2847 }
2848 
2849 static uint16_t
2850 serviced_query_udp_size(struct serviced_query* sq, enum serviced_query_status status) {
2851 	uint16_t udp_size;
2852 	if(status == serviced_query_UDP_EDNS_FRAG) {
2853 		if(addr_is_ip6(&sq->addr, sq->addrlen)) {
2854 			if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE)
2855 				udp_size = EDNS_FRAG_SIZE_IP6;
2856 			else	udp_size = EDNS_ADVERTISED_SIZE;
2857 		} else {
2858 			if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE)
2859 				udp_size = EDNS_FRAG_SIZE_IP4;
2860 			else	udp_size = EDNS_ADVERTISED_SIZE;
2861 		}
2862 	} else {
2863 		udp_size = EDNS_ADVERTISED_SIZE;
2864 	}
2865 	return udp_size;
2866 }
2867 
2868 /** put serviced query into a buffer */
2869 static void
2870 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns)
2871 {
2872 	/* if we are using 0x20 bits for ID randomness, perturb them */
2873 	if(sq->outnet->use_caps_for_id && !sq->nocaps) {
2874 		serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen);
2875 	}
2876 	/* generate query */
2877 	sldns_buffer_clear(buff);
2878 	sldns_buffer_write_u16(buff, 0); /* id placeholder */
2879 	sldns_buffer_write(buff, sq->qbuf, sq->qbuflen);
2880 	sldns_buffer_flip(buff);
2881 	if(with_edns) {
2882 		/* add edns section */
2883 		struct edns_data edns;
2884 		struct edns_option padding_option;
2885 		edns.edns_present = 1;
2886 		edns.ext_rcode = 0;
2887 		edns.edns_version = EDNS_ADVERTISED_VERSION;
2888 		edns.opt_list_in = NULL;
2889 		edns.opt_list_out = sq->opt_list;
2890 		edns.opt_list_inplace_cb_out = NULL;
2891 		edns.udp_size = serviced_query_udp_size(sq, sq->status);
2892 		edns.bits = 0;
2893 		if(sq->dnssec & EDNS_DO)
2894 			edns.bits = EDNS_DO;
2895 		if(sq->dnssec & BIT_CD)
2896 			LDNS_CD_SET(sldns_buffer_begin(buff));
2897 		if (sq->ssl_upstream && sq->padding_block_size) {
2898 			padding_option.opt_code = LDNS_EDNS_PADDING;
2899 			padding_option.opt_len = 0;
2900 			padding_option.opt_data = NULL;
2901 			padding_option.next = edns.opt_list_out;
2902 			edns.opt_list_out = &padding_option;
2903 			edns.padding_block_size = sq->padding_block_size;
2904 		}
2905 		attach_edns_record(buff, &edns);
2906 	}
2907 }
2908 
2909 /**
2910  * Perform serviced query UDP sending operation.
2911  * Sends UDP with EDNS, unless infra host marked non EDNS.
2912  * @param sq: query to send.
2913  * @param buff: buffer scratch space.
2914  * @return 0 on error.
2915  */
2916 static int
2917 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff)
2918 {
2919 	int rtt, vs;
2920 	uint8_t edns_lame_known;
2921 	time_t now = *sq->outnet->now_secs;
2922 
2923 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
2924 		sq->zonelen, now, &vs, &edns_lame_known, &rtt))
2925 		return 0;
2926 	sq->last_rtt = rtt;
2927 	verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs);
2928 	if(sq->status == serviced_initial) {
2929 		if(vs != -1) {
2930 			sq->status = serviced_query_UDP_EDNS;
2931 		} else {
2932 			sq->status = serviced_query_UDP;
2933 		}
2934 	}
2935 	serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) ||
2936 		(sq->status == serviced_query_UDP_EDNS_FRAG));
2937 	sq->last_sent_time = *sq->outnet->now_tv;
2938 	sq->edns_lame_known = (int)edns_lame_known;
2939 	verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt);
2940 	sq->pending = pending_udp_query(sq, buff, rtt,
2941 		serviced_udp_callback, sq);
2942 	if(!sq->pending)
2943 		return 0;
2944 	return 1;
2945 }
2946 
2947 /** check that perturbed qname is identical */
2948 static int
2949 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen)
2950 {
2951 	uint8_t* d1 = sldns_buffer_begin(pkt)+12;
2952 	uint8_t* d2 = qbuf+10;
2953 	uint8_t len1, len2;
2954 	int count = 0;
2955 	if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */
2956 		return 0;
2957 	log_assert(qbuflen >= 15 /* 10 header, root, type, class */);
2958 	len1 = *d1++;
2959 	len2 = *d2++;
2960 	while(len1 != 0 || len2 != 0) {
2961 		if(LABEL_IS_PTR(len1)) {
2962 			/* check if we can read *d1 with compression ptr rest */
2963 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2964 				return 0;
2965 			d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1);
2966 			/* check if we can read the destination *d1 */
2967 			if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2968 				return 0;
2969 			len1 = *d1++;
2970 			if(count++ > MAX_COMPRESS_PTRS)
2971 				return 0;
2972 			continue;
2973 		}
2974 		if(d2 > qbuf+qbuflen)
2975 			return 0;
2976 		if(len1 != len2)
2977 			return 0;
2978 		if(len1 > LDNS_MAX_LABELLEN)
2979 			return 0;
2980 		/* check len1 + 1(next length) are okay to read */
2981 		if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2982 			return 0;
2983 		log_assert(len1 <= LDNS_MAX_LABELLEN);
2984 		log_assert(len2 <= LDNS_MAX_LABELLEN);
2985 		log_assert(len1 == len2 && len1 != 0);
2986 		/* compare the labels - bitwise identical */
2987 		if(memcmp(d1, d2, len1) != 0)
2988 			return 0;
2989 		d1 += len1;
2990 		d2 += len2;
2991 		len1 = *d1++;
2992 		len2 = *d2++;
2993 	}
2994 	return 1;
2995 }
2996 
2997 /** call the callbacks for a serviced query */
2998 static void
2999 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
3000 	struct comm_reply* rep)
3001 {
3002 	struct service_callback* p;
3003 	int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/
3004 	uint8_t *backup_p = NULL;
3005 	size_t backlen = 0;
3006 #ifdef UNBOUND_DEBUG
3007 	rbnode_type* rem =
3008 #else
3009 	(void)
3010 #endif
3011 	/* remove from tree, and schedule for deletion, so that callbacks
3012 	 * can safely deregister themselves and even create new serviced
3013 	 * queries that are identical to this one. */
3014 	rbtree_delete(sq->outnet->serviced, sq);
3015 	log_assert(rem); /* should have been present */
3016 	sq->to_be_deleted = 1;
3017 	verbose(VERB_ALGO, "svcd callbacks start");
3018 	if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c &&
3019 		!sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
3020 		/* for type PTR do not check perturbed name in answer,
3021 		 * compatibility with cisco dns guard boxes that mess up
3022 		 * reverse queries 0x20 contents */
3023 		/* noerror and nxdomain must have a qname in reply */
3024 		if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
3025 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3026 				== LDNS_RCODE_NOERROR ||
3027 			 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3028 				== LDNS_RCODE_NXDOMAIN)) {
3029 			verbose(VERB_DETAIL, "no qname in reply to check 0x20ID");
3030 			log_addr(VERB_DETAIL, "from server",
3031 				&sq->addr, sq->addrlen);
3032 			log_buf(VERB_DETAIL, "for packet", c->buffer);
3033 			error = NETEVENT_CLOSED;
3034 			c = NULL;
3035 		} else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 &&
3036 			!serviced_check_qname(c->buffer, sq->qbuf,
3037 			sq->qbuflen)) {
3038 			verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname");
3039 			log_addr(VERB_DETAIL, "from server",
3040 				&sq->addr, sq->addrlen);
3041 			log_buf(VERB_DETAIL, "for packet", c->buffer);
3042 			error = NETEVENT_CAPSFAIL;
3043 			/* and cleanup too */
3044 			pkt_dname_tolower(c->buffer,
3045 				sldns_buffer_at(c->buffer, 12));
3046 		} else {
3047 			verbose(VERB_ALGO, "good 0x20-ID in reply qname");
3048 			/* cleanup caps, prettier cache contents. */
3049 			pkt_dname_tolower(c->buffer,
3050 				sldns_buffer_at(c->buffer, 12));
3051 		}
3052 	}
3053 	if(dobackup && c) {
3054 		/* make a backup of the query, since the querystate processing
3055 		 * may send outgoing queries that overwrite the buffer.
3056 		 * use secondary buffer to store the query.
3057 		 * This is a data copy, but faster than packet to server */
3058 		backlen = sldns_buffer_limit(c->buffer);
3059 		backup_p = regional_alloc_init(sq->region,
3060 			sldns_buffer_begin(c->buffer), backlen);
3061 		if(!backup_p) {
3062 			log_err("malloc failure in serviced query callbacks");
3063 			error = NETEVENT_CLOSED;
3064 			c = NULL;
3065 		}
3066 		sq->outnet->svcd_overhead = backlen;
3067 	}
3068 	/* test the actual sq->cblist, because the next elem could be deleted*/
3069 	while((p=sq->cblist) != NULL) {
3070 		sq->cblist = p->next; /* remove this element */
3071 		if(dobackup && c) {
3072 			sldns_buffer_clear(c->buffer);
3073 			sldns_buffer_write(c->buffer, backup_p, backlen);
3074 			sldns_buffer_flip(c->buffer);
3075 		}
3076 		fptr_ok(fptr_whitelist_serviced_query(p->cb));
3077 		(void)(*p->cb)(c, p->cb_arg, error, rep);
3078 	}
3079 	if(backup_p) {
3080 		sq->outnet->svcd_overhead = 0;
3081 	}
3082 	verbose(VERB_ALGO, "svcd callbacks end");
3083 	log_assert(sq->cblist == NULL);
3084 	serviced_delete(sq);
3085 }
3086 
3087 int
3088 serviced_tcp_callback(struct comm_point* c, void* arg, int error,
3089         struct comm_reply* rep)
3090 {
3091 	struct serviced_query* sq = (struct serviced_query*)arg;
3092 	struct comm_reply r2;
3093 #ifdef USE_DNSTAP
3094 	struct waiting_tcp* w = (struct waiting_tcp*)sq->pending;
3095 	struct pending_tcp* pend_tcp = NULL;
3096 	struct port_if* pi = NULL;
3097 	if(w && !w->on_tcp_waiting_list && w->next_waiting) {
3098 		pend_tcp = (struct pending_tcp*)w->next_waiting;
3099 		pi = pend_tcp->pi;
3100 	}
3101 #endif
3102 	sq->pending = NULL; /* removed after this callback */
3103 	if(error != NETEVENT_NOERROR)
3104 		log_addr(VERB_QUERY, "tcp error for address",
3105 			&sq->addr, sq->addrlen);
3106 	if(error==NETEVENT_NOERROR)
3107 		infra_update_tcp_works(sq->outnet->infra, &sq->addr,
3108 			sq->addrlen, sq->zone, sq->zonelen);
3109 #ifdef USE_DNSTAP
3110 	/*
3111 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3112 	 */
3113 	if(error==NETEVENT_NOERROR && pi && sq->outnet->dtenv &&
3114 	   (sq->outnet->dtenv->log_resolver_response_messages ||
3115 	    sq->outnet->dtenv->log_forwarder_response_messages)) {
3116 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3117 		log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
3118 		dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr,
3119 			&pi->addr, c->type, c->ssl, sq->zone, sq->zonelen, sq->qbuf,
3120 			sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv,
3121 			c->buffer);
3122 	}
3123 #endif
3124 	if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS &&
3125 		(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3126 		LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin(
3127 		c->buffer)) == LDNS_RCODE_NOTIMPL) ) {
3128 		/* attempt to fallback to nonEDNS */
3129 		sq->status = serviced_query_TCP_EDNS_fallback;
3130 		serviced_tcp_initiate(sq, c->buffer);
3131 		return 0;
3132 	} else if(error==NETEVENT_NOERROR &&
3133 		sq->status == serviced_query_TCP_EDNS_fallback &&
3134 			(LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3135 			LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE(
3136 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN
3137 			|| LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3138 			== LDNS_RCODE_YXDOMAIN)) {
3139 		/* the fallback produced a result that looks promising, note
3140 		 * that this server should be approached without EDNS */
3141 		/* only store noEDNS in cache if domain is noDNSSEC */
3142 		if(!sq->want_dnssec)
3143 		  if(!infra_edns_update(sq->outnet->infra, &sq->addr,
3144 			sq->addrlen, sq->zone, sq->zonelen, -1,
3145 			*sq->outnet->now_secs))
3146 			log_err("Out of memory caching no edns for host");
3147 		sq->status = serviced_query_TCP;
3148 	}
3149 	if(sq->tcp_upstream || sq->ssl_upstream) {
3150 	    struct timeval now = *sq->outnet->now_tv;
3151 	    if(error!=NETEVENT_NOERROR) {
3152 	        if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3153 		    sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3154 		    -1, sq->last_rtt, (time_t)now.tv_sec))
3155 		    log_err("out of memory in TCP exponential backoff.");
3156 	    } else if(now.tv_sec > sq->last_sent_time.tv_sec ||
3157 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3158 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3159 		/* convert from microseconds to milliseconds */
3160 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3161 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3162 		verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime);
3163 		log_assert(roundtime >= 0);
3164 		/* only store if less then AUTH_TIMEOUT seconds, it could be
3165 		 * huge due to system-hibernated and we woke up */
3166 		if(roundtime < 60000) {
3167 		    if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3168 			sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3169 			roundtime, sq->last_rtt, (time_t)now.tv_sec))
3170 			log_err("out of memory noting rtt.");
3171 		}
3172 	    }
3173 	}
3174 	/* insert address into reply info */
3175 	if(!rep) {
3176 		/* create one if there isn't (on errors) */
3177 		rep = &r2;
3178 		r2.c = c;
3179 	}
3180 	memcpy(&rep->remote_addr, &sq->addr, sq->addrlen);
3181 	rep->remote_addrlen = sq->addrlen;
3182 	serviced_callbacks(sq, error, c, rep);
3183 	return 0;
3184 }
3185 
3186 static void
3187 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff)
3188 {
3189 	verbose(VERB_ALGO, "initiate TCP query %s",
3190 		sq->status==serviced_query_TCP_EDNS?"EDNS":"");
3191 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3192 	sq->last_sent_time = *sq->outnet->now_tv;
3193 	log_assert(!sq->busy);
3194 	sq->busy = 1;
3195 	sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout,
3196 		serviced_tcp_callback, sq);
3197 	sq->busy = 0;
3198 	if(!sq->pending) {
3199 		/* delete from tree so that a retry by above layer does not
3200 		 * clash with this entry */
3201 		verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query");
3202 		serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL);
3203 	}
3204 }
3205 
3206 /** Send serviced query over TCP return false on initial failure */
3207 static int
3208 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff)
3209 {
3210 	int vs, rtt, timeout;
3211 	uint8_t edns_lame_known;
3212 	if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
3213 		sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known,
3214 		&rtt))
3215 		return 0;
3216 	sq->last_rtt = rtt;
3217 	if(vs != -1)
3218 		sq->status = serviced_query_TCP_EDNS;
3219 	else 	sq->status = serviced_query_TCP;
3220 	serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3221 	sq->last_sent_time = *sq->outnet->now_tv;
3222 	if(sq->tcp_upstream || sq->ssl_upstream) {
3223 		timeout = rtt;
3224 		if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout)
3225 			timeout = sq->outnet->tcp_auth_query_timeout;
3226 	} else {
3227 		timeout = sq->outnet->tcp_auth_query_timeout;
3228 	}
3229 	log_assert(!sq->busy);
3230 	sq->busy = 1;
3231 	sq->pending = pending_tcp_query(sq, buff, timeout,
3232 		serviced_tcp_callback, sq);
3233 	sq->busy = 0;
3234 	return sq->pending != NULL;
3235 }
3236 
3237 /* see if packet is edns malformed; got zeroes at start.
3238  * This is from servers that return malformed packets to EDNS0 queries,
3239  * but they return good packets for nonEDNS0 queries.
3240  * We try to detect their output; without resorting to a full parse or
3241  * check for too many bytes after the end of the packet. */
3242 static int
3243 packet_edns_malformed(struct sldns_buffer* buf, int qtype)
3244 {
3245 	size_t len;
3246 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE)
3247 		return 1; /* malformed */
3248 	/* they have NOERROR rcode, 1 answer. */
3249 	if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR)
3250 		return 0;
3251 	/* one query (to skip) and answer records */
3252 	if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 ||
3253 		LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0)
3254 		return 0;
3255 	/* skip qname */
3256 	len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE),
3257 		sldns_buffer_limit(buf)-LDNS_HEADER_SIZE);
3258 	if(len == 0)
3259 		return 0;
3260 	if(len == 1 && qtype == 0)
3261 		return 0; /* we asked for '.' and type 0 */
3262 	/* and then 4 bytes (type and class of query) */
3263 	if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3)
3264 		return 0;
3265 
3266 	/* and start with 11 zeroes as the answer RR */
3267 	/* so check the qtype of the answer record, qname=0, type=0 */
3268 	if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 &&
3269 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 &&
3270 	   sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0)
3271 		return 1;
3272 	return 0;
3273 }
3274 
3275 int
3276 serviced_udp_callback(struct comm_point* c, void* arg, int error,
3277         struct comm_reply* rep)
3278 {
3279 	struct serviced_query* sq = (struct serviced_query*)arg;
3280 	struct outside_network* outnet = sq->outnet;
3281 	struct timeval now = *sq->outnet->now_tv;
3282 #ifdef USE_DNSTAP
3283 	struct pending* p = (struct pending*)sq->pending;
3284 #endif
3285 
3286 	sq->pending = NULL; /* removed after callback */
3287 	if(error == NETEVENT_TIMEOUT) {
3288 		if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000 &&
3289 		   (serviced_query_udp_size(sq, serviced_query_UDP_EDNS_FRAG) < serviced_query_udp_size(sq, serviced_query_UDP_EDNS))) {
3290 			/* fallback to 1480/1280 */
3291 			sq->status = serviced_query_UDP_EDNS_FRAG;
3292 			log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10,
3293 				&sq->addr, sq->addrlen);
3294 			if(!serviced_udp_send(sq, c->buffer)) {
3295 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3296 			}
3297 			return 0;
3298 		}
3299 		if(sq->status == serviced_query_UDP_EDNS_FRAG) {
3300 			/* fragmentation size did not fix it */
3301 			sq->status = serviced_query_UDP_EDNS;
3302 		}
3303 		sq->retry++;
3304 		if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3305 			sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
3306 			(time_t)now.tv_sec))
3307 			log_err("out of memory in UDP exponential backoff");
3308 		if(sq->retry < OUTBOUND_UDP_RETRY) {
3309 			log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
3310 				&sq->addr, sq->addrlen);
3311 			if(!serviced_udp_send(sq, c->buffer)) {
3312 				serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3313 			}
3314 			return 0;
3315 		}
3316 	}
3317 	if(error != NETEVENT_NOERROR) {
3318 		/* udp returns error (due to no ID or interface available) */
3319 		serviced_callbacks(sq, error, c, rep);
3320 		return 0;
3321 	}
3322 #ifdef USE_DNSTAP
3323 	/*
3324 	 * sending src (local service)/dst (upstream) addresses over DNSTAP
3325 	 */
3326 	if(error == NETEVENT_NOERROR && outnet->dtenv && p->pc &&
3327 		(outnet->dtenv->log_resolver_response_messages ||
3328 		outnet->dtenv->log_forwarder_response_messages)) {
3329 		log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3330 		log_addr(VERB_ALGO, "to local addr", &p->pc->pif->addr,
3331 			p->pc->pif->addrlen);
3332 		dt_msg_send_outside_response(outnet->dtenv, &sq->addr,
3333 			&p->pc->pif->addr, c->type, c->ssl, sq->zone, sq->zonelen,
3334 			sq->qbuf, sq->qbuflen, &sq->last_sent_time,
3335 			sq->outnet->now_tv, c->buffer);
3336 	}
3337 #endif
3338 	if( (sq->status == serviced_query_UDP_EDNS
3339 		||sq->status == serviced_query_UDP_EDNS_FRAG)
3340 		&& (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))
3341 			== LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(
3342 			sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL
3343 		    || packet_edns_malformed(c->buffer, sq->qtype)
3344 			)) {
3345 		/* try to get an answer by falling back without EDNS */
3346 		verbose(VERB_ALGO, "serviced query: attempt without EDNS");
3347 		sq->status = serviced_query_UDP_EDNS_fallback;
3348 		sq->retry = 0;
3349 		if(!serviced_udp_send(sq, c->buffer)) {
3350 			serviced_callbacks(sq, NETEVENT_CLOSED, c, rep);
3351 		}
3352 		return 0;
3353 	} else if(sq->status == serviced_query_UDP_EDNS &&
3354 		!sq->edns_lame_known) {
3355 		/* now we know that edns queries received answers store that */
3356 		log_addr(VERB_ALGO, "serviced query: EDNS works for",
3357 			&sq->addr, sq->addrlen);
3358 		if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3359 			sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
3360 			log_err("Out of memory caching edns works");
3361 		}
3362 		sq->edns_lame_known = 1;
3363 	} else if(sq->status == serviced_query_UDP_EDNS_fallback &&
3364 		!sq->edns_lame_known && (LDNS_RCODE_WIRE(
3365 		sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR ||
3366 		LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) ==
3367 		LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin(
3368 		c->buffer)) == LDNS_RCODE_YXDOMAIN)) {
3369 		/* the fallback produced a result that looks promising, note
3370 		 * that this server should be approached without EDNS */
3371 		/* only store noEDNS in cache if domain is noDNSSEC */
3372 		if(!sq->want_dnssec) {
3373 		  log_addr(VERB_ALGO, "serviced query: EDNS fails for",
3374 			&sq->addr, sq->addrlen);
3375 		  if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3376 			sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
3377 			log_err("Out of memory caching no edns for host");
3378 		  }
3379 		} else {
3380 		  log_addr(VERB_ALGO, "serviced query: EDNS fails, but "
3381 			"not stored because need DNSSEC for", &sq->addr,
3382 			sq->addrlen);
3383 		}
3384 		sq->status = serviced_query_UDP;
3385 	}
3386 	if(now.tv_sec > sq->last_sent_time.tv_sec ||
3387 		(now.tv_sec == sq->last_sent_time.tv_sec &&
3388 		now.tv_usec > sq->last_sent_time.tv_usec)) {
3389 		/* convert from microseconds to milliseconds */
3390 		int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3391 		  + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3392 		verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime);
3393 		log_assert(roundtime >= 0);
3394 		/* in case the system hibernated, do not enter a huge value,
3395 		 * above this value gives trouble with server selection */
3396 		if(roundtime < 60000) {
3397 		    if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3398 			sq->zone, sq->zonelen, sq->qtype, roundtime,
3399 			sq->last_rtt, (time_t)now.tv_sec))
3400 			log_err("out of memory noting rtt.");
3401 		}
3402 	}
3403 	/* perform TC flag check and TCP fallback after updating our
3404 	 * cache entries for EDNS status and RTT times */
3405 	if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) {
3406 		/* fallback to TCP */
3407 		/* this discards partial UDP contents */
3408 		if(sq->status == serviced_query_UDP_EDNS ||
3409 			sq->status == serviced_query_UDP_EDNS_FRAG ||
3410 			sq->status == serviced_query_UDP_EDNS_fallback)
3411 			/* if we have unfinished EDNS_fallback, start again */
3412 			sq->status = serviced_query_TCP_EDNS;
3413 		else	sq->status = serviced_query_TCP;
3414 		serviced_tcp_initiate(sq, c->buffer);
3415 		return 0;
3416 	}
3417 	/* yay! an answer */
3418 	serviced_callbacks(sq, error, c, rep);
3419 	return 0;
3420 }
3421 
3422 struct serviced_query*
3423 outnet_serviced_query(struct outside_network* outnet,
3424 	struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec,
3425 	int nocaps, int check_ratelimit, int tcp_upstream, int ssl_upstream,
3426 	char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
3427 	uint8_t* zone, size_t zonelen, struct module_qstate* qstate,
3428 	comm_point_callback_type* callback, void* callback_arg,
3429 	sldns_buffer* buff, struct module_env* env, int* was_ratelimited)
3430 {
3431 	struct serviced_query* sq;
3432 	struct service_callback* cb;
3433 	struct edns_string_addr* client_string_addr;
3434 	struct regional* region;
3435 	struct edns_option* backed_up_opt_list = qstate->edns_opts_back_out;
3436 	struct edns_option* per_upstream_opt_list = NULL;
3437 	time_t timenow = 0;
3438 
3439 	/* If we have an already populated EDNS option list make a copy since
3440 	 * we may now add upstream specific EDNS options. */
3441 	/* Use a region that could be attached to a serviced_query, if it needs
3442 	 * to be created. If an existing one is found then this region will be
3443 	 * destroyed here. */
3444 	region = alloc_reg_obtain(env->alloc);
3445 	if(!region) return NULL;
3446 	if(qstate->edns_opts_back_out) {
3447 		per_upstream_opt_list = edns_opt_copy_region(
3448 			qstate->edns_opts_back_out, region);
3449 		if(!per_upstream_opt_list) {
3450 			alloc_reg_release(env->alloc, region);
3451 			return NULL;
3452 		}
3453 		qstate->edns_opts_back_out = per_upstream_opt_list;
3454 	}
3455 
3456 	if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone,
3457 		zonelen, qstate, region)) {
3458 		alloc_reg_release(env->alloc, region);
3459 		return NULL;
3460 	}
3461 	/* Restore the option list; we can explicitly use the copied one from
3462 	 * now on. */
3463 	per_upstream_opt_list = qstate->edns_opts_back_out;
3464 	qstate->edns_opts_back_out = backed_up_opt_list;
3465 
3466 	if((client_string_addr = edns_string_addr_lookup(
3467 		&env->edns_strings->client_strings, addr, addrlen))) {
3468 		edns_opt_list_append(&per_upstream_opt_list,
3469 			env->edns_strings->client_string_opcode,
3470 			client_string_addr->string_len,
3471 			client_string_addr->string, region);
3472 	}
3473 
3474 	serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype,
3475 		qinfo->qclass, flags);
3476 	sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen,
3477 		per_upstream_opt_list);
3478 	if(!sq) {
3479 		/* Check ratelimit only for new serviced_query */
3480 		if(check_ratelimit) {
3481 			timenow = *env->now;
3482 			if(!infra_ratelimit_inc(env->infra_cache, zone,
3483 				zonelen, timenow, env->cfg->ratelimit_backoff,
3484 				&qstate->qinfo,
3485 				qstate->mesh_info->reply_list
3486 					?&qstate->mesh_info->reply_list->query_reply
3487 					:NULL)) {
3488 				/* Can we pass through with slip factor? */
3489 				if(env->cfg->ratelimit_factor == 0 ||
3490 					ub_random_max(env->rnd,
3491 					env->cfg->ratelimit_factor) != 1) {
3492 					*was_ratelimited = 1;
3493 					alloc_reg_release(env->alloc, region);
3494 					return NULL;
3495 				}
3496 				log_nametypeclass(VERB_ALGO,
3497 					"ratelimit allowed through for "
3498 					"delegation point", zone,
3499 					LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN);
3500 			}
3501 		}
3502 		/* make new serviced query entry */
3503 		sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
3504 			tcp_upstream, ssl_upstream, tls_auth_name, addr,
3505 			addrlen, zone, zonelen, (int)qinfo->qtype,
3506 			per_upstream_opt_list,
3507 			( ssl_upstream && env->cfg->pad_queries
3508 			? env->cfg->pad_queries_block_size : 0 ),
3509 			env->alloc, region);
3510 		if(!sq) {
3511 			if(check_ratelimit) {
3512 				infra_ratelimit_dec(env->infra_cache,
3513 					zone, zonelen, timenow);
3514 			}
3515 			return NULL;
3516 		}
3517 		if(!(cb = (struct service_callback*)regional_alloc(
3518 			sq->region, sizeof(*cb)))) {
3519 			if(check_ratelimit) {
3520 				infra_ratelimit_dec(env->infra_cache,
3521 					zone, zonelen, timenow);
3522 			}
3523 			(void)rbtree_delete(outnet->serviced, sq);
3524 			serviced_node_del(&sq->node, NULL);
3525 			return NULL;
3526 		}
3527 		/* No network action at this point; it will be invoked with the
3528 		 * serviced_query timer instead to run outside of the mesh. */
3529 	} else {
3530 		/* We don't need this region anymore. */
3531 		alloc_reg_release(env->alloc, region);
3532 		/* duplicate entries are included in the callback list, because
3533 		 * there is a counterpart registration by our caller that needs
3534 		 * to be doubly-removed (with callbacks perhaps). */
3535 		if(!(cb = (struct service_callback*)regional_alloc(
3536 			sq->region, sizeof(*cb)))) {
3537 			return NULL;
3538 		}
3539 	}
3540 	/* add callback to list of callbacks */
3541 	cb->cb = callback;
3542 	cb->cb_arg = callback_arg;
3543 	cb->next = sq->cblist;
3544 	sq->cblist = cb;
3545 	return sq;
3546 }
3547 
3548 /** remove callback from list */
3549 static void
3550 callback_list_remove(struct serviced_query* sq, void* cb_arg)
3551 {
3552 	struct service_callback** pp = &sq->cblist;
3553 	while(*pp) {
3554 		if((*pp)->cb_arg == cb_arg) {
3555 			struct service_callback* del = *pp;
3556 			*pp = del->next;
3557 			return;
3558 		}
3559 		pp = &(*pp)->next;
3560 	}
3561 }
3562 
3563 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg)
3564 {
3565 	if(!sq)
3566 		return;
3567 	callback_list_remove(sq, cb_arg);
3568 	/* if callbacks() routine scheduled deletion, let it do that */
3569 	if(!sq->cblist && !sq->busy && !sq->to_be_deleted) {
3570 		(void)rbtree_delete(sq->outnet->serviced, sq);
3571 		serviced_delete(sq);
3572 	}
3573 }
3574 
3575 /** create fd to send to this destination */
3576 static int
3577 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr,
3578 	socklen_t to_addrlen)
3579 {
3580 	struct sockaddr_storage* addr;
3581 	socklen_t addrlen;
3582 	int i, try, pnum, dscp;
3583 	struct port_if* pif;
3584 
3585 	/* create fd */
3586 	dscp = outnet->ip_dscp;
3587 	for(try = 0; try<1000; try++) {
3588 		int port = 0;
3589 		int freebind = 0;
3590 		int noproto = 0;
3591 		int inuse = 0;
3592 		int fd = -1;
3593 
3594 		/* select interface */
3595 		if(addr_is_ip6(to_addr, to_addrlen)) {
3596 			if(outnet->num_ip6 == 0) {
3597 				char to[64];
3598 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3599 				verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to);
3600 				return -1;
3601 			}
3602 			i = ub_random_max(outnet->rnd, outnet->num_ip6);
3603 			pif = &outnet->ip6_ifs[i];
3604 		} else {
3605 			if(outnet->num_ip4 == 0) {
3606 				char to[64];
3607 				addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3608 				verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to);
3609 				return -1;
3610 			}
3611 			i = ub_random_max(outnet->rnd, outnet->num_ip4);
3612 			pif = &outnet->ip4_ifs[i];
3613 		}
3614 		addr = &pif->addr;
3615 		addrlen = pif->addrlen;
3616 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3617 		pnum = ub_random_max(outnet->rnd, pif->avail_total);
3618 		if(pnum < pif->inuse) {
3619 			/* port already open */
3620 			port = pif->out[pnum]->number;
3621 		} else {
3622 			/* unused ports in start part of array */
3623 			port = pif->avail_ports[pnum - pif->inuse];
3624 		}
3625 #else
3626 		pnum = port = 0;
3627 #endif
3628 		if(addr_is_ip6(to_addr, to_addrlen)) {
3629 			struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
3630 			sa.sin6_port = (in_port_t)htons((uint16_t)port);
3631 			fd = create_udp_sock(AF_INET6, SOCK_DGRAM,
3632 				(struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto,
3633 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3634 		} else {
3635 			struct sockaddr_in* sa = (struct sockaddr_in*)addr;
3636 			sa->sin_port = (in_port_t)htons((uint16_t)port);
3637 			fd = create_udp_sock(AF_INET, SOCK_DGRAM,
3638 				(struct sockaddr*)addr, addrlen, 1, &inuse, &noproto,
3639 				0, 0, 0, NULL, 0, freebind, 0, dscp);
3640 		}
3641 		if(fd != -1) {
3642 			return fd;
3643 		}
3644 		if(!inuse) {
3645 			return -1;
3646 		}
3647 	}
3648 	/* too many tries */
3649 	log_err("cannot send probe, ports are in use");
3650 	return -1;
3651 }
3652 
3653 struct comm_point*
3654 outnet_comm_point_for_udp(struct outside_network* outnet,
3655 	comm_point_callback_type* cb, void* cb_arg,
3656 	struct sockaddr_storage* to_addr, socklen_t to_addrlen)
3657 {
3658 	struct comm_point* cp;
3659 	int fd = fd_for_dest(outnet, to_addr, to_addrlen);
3660 	if(fd == -1) {
3661 		return NULL;
3662 	}
3663 	cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff, 0,
3664 		cb, cb_arg, NULL);
3665 	if(!cp) {
3666 		log_err("malloc failure");
3667 		close(fd);
3668 		return NULL;
3669 	}
3670 	return cp;
3671 }
3672 
3673 /** setup SSL for comm point */
3674 static int
3675 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet,
3676 	int fd, char* host)
3677 {
3678 	cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd);
3679 	if(!cp->ssl) {
3680 		log_err("cannot create SSL object");
3681 		return 0;
3682 	}
3683 #ifdef USE_WINSOCK
3684 	comm_point_tcp_win_bio_cb(cp, cp->ssl);
3685 #endif
3686 	cp->ssl_shake_state = comm_ssl_shake_write;
3687 	/* https verification */
3688 #ifdef HAVE_SSL
3689 	if(outnet->tls_use_sni) {
3690 		(void)SSL_set_tlsext_host_name(cp->ssl, host);
3691 	}
3692 #endif
3693 #ifdef HAVE_SSL_SET1_HOST
3694 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3695 		/* because we set SSL_VERIFY_PEER, in netevent in
3696 		 * ssl_handshake, it'll check if the certificate
3697 		 * verification has succeeded */
3698 		/* SSL_VERIFY_PEER is set on the sslctx */
3699 		/* and the certificates to verify with are loaded into
3700 		 * it with SSL_load_verify_locations or
3701 		 * SSL_CTX_set_default_verify_paths */
3702 		/* setting the hostname makes openssl verify the
3703 		 * host name in the x509 certificate in the
3704 		 * SSL connection*/
3705 		if(!SSL_set1_host(cp->ssl, host)) {
3706 			log_err("SSL_set1_host failed");
3707 			return 0;
3708 		}
3709 	}
3710 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST)
3711 	/* openssl 1.0.2 has this function that can be used for
3712 	 * set1_host like verification */
3713 	if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) {
3714 		X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl);
3715 #  ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS
3716 		X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
3717 #  endif
3718 		if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) {
3719 			log_err("X509_VERIFY_PARAM_set1_host failed");
3720 			return 0;
3721 		}
3722 	}
3723 #else
3724 	(void)host;
3725 #endif /* HAVE_SSL_SET1_HOST */
3726 	return 1;
3727 }
3728 
3729 struct comm_point*
3730 outnet_comm_point_for_tcp(struct outside_network* outnet,
3731 	comm_point_callback_type* cb, void* cb_arg,
3732 	struct sockaddr_storage* to_addr, socklen_t to_addrlen,
3733 	sldns_buffer* query, int timeout, int ssl, char* host)
3734 {
3735 	struct comm_point* cp;
3736 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss,
3737 		outnet->ip_dscp, ssl);
3738 	if(fd == -1) {
3739 		return 0;
3740 	}
3741 	fd_set_nonblock(fd);
3742 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3743 		/* outnet_tcp_connect has closed fd on error for us */
3744 		return 0;
3745 	}
3746 	cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg);
3747 	if(!cp) {
3748 		log_err("malloc failure");
3749 		close(fd);
3750 		return 0;
3751 	}
3752 	cp->repinfo.remote_addrlen = to_addrlen;
3753 	memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen);
3754 
3755 	/* setup for SSL (if needed) */
3756 	if(ssl) {
3757 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3758 			log_err("cannot setup XoT");
3759 			comm_point_delete(cp);
3760 			return NULL;
3761 		}
3762 	}
3763 
3764 	/* set timeout on TCP connection */
3765 	comm_point_start_listening(cp, fd, timeout);
3766 	/* copy scratch buffer to cp->buffer */
3767 	sldns_buffer_copy(cp->buffer, query);
3768 	return cp;
3769 }
3770 
3771 /** setup the User-Agent HTTP header based on http-user-agent configuration */
3772 static void
3773 setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg)
3774 {
3775 	if(cfg->hide_http_user_agent) return;
3776 	if(cfg->http_user_agent==NULL || cfg->http_user_agent[0] == 0) {
3777 		sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME,
3778 			PACKAGE_VERSION);
3779 	} else {
3780 		sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent);
3781 	}
3782 }
3783 
3784 /** setup http request headers in buffer for sending query to destination */
3785 static int
3786 setup_http_request(sldns_buffer* buf, char* host, char* path,
3787 	struct config_file* cfg)
3788 {
3789 	sldns_buffer_clear(buf);
3790 	sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path);
3791 	sldns_buffer_printf(buf, "Host: %s\r\n", host);
3792 	setup_http_user_agent(buf, cfg);
3793 	/* We do not really do multiple queries per connection,
3794 	 * but this header setting is also not needed.
3795 	 * sldns_buffer_printf(buf, "Connection: close\r\n") */
3796 	sldns_buffer_printf(buf, "\r\n");
3797 	if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf))
3798 		return 0; /* somehow buffer too short, but it is about 60K
3799 		and the request is only a couple bytes long. */
3800 	sldns_buffer_flip(buf);
3801 	return 1;
3802 }
3803 
3804 struct comm_point*
3805 outnet_comm_point_for_http(struct outside_network* outnet,
3806 	comm_point_callback_type* cb, void* cb_arg,
3807 	struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout,
3808 	int ssl, char* host, char* path, struct config_file* cfg)
3809 {
3810 	/* cp calls cb with err=NETEVENT_DONE when transfer is done */
3811 	struct comm_point* cp;
3812 	int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss,
3813 		outnet->ip_dscp, ssl);
3814 	if(fd == -1) {
3815 		return 0;
3816 	}
3817 	fd_set_nonblock(fd);
3818 	if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3819 		/* outnet_tcp_connect has closed fd on error for us */
3820 		return 0;
3821 	}
3822 	cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg,
3823 		outnet->udp_buff);
3824 	if(!cp) {
3825 		log_err("malloc failure");
3826 		close(fd);
3827 		return 0;
3828 	}
3829 	cp->repinfo.remote_addrlen = to_addrlen;
3830 	memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen);
3831 
3832 	/* setup for SSL (if needed) */
3833 	if(ssl) {
3834 		if(!setup_comm_ssl(cp, outnet, fd, host)) {
3835 			log_err("cannot setup https");
3836 			comm_point_delete(cp);
3837 			return NULL;
3838 		}
3839 	}
3840 
3841 	/* set timeout on TCP connection */
3842 	comm_point_start_listening(cp, fd, timeout);
3843 
3844 	/* setup http request in cp->buffer */
3845 	if(!setup_http_request(cp->buffer, host, path, cfg)) {
3846 		log_err("error setting up http request");
3847 		comm_point_delete(cp);
3848 		return NULL;
3849 	}
3850 	return cp;
3851 }
3852 
3853 /** get memory used by waiting tcp entry (in use or not) */
3854 static size_t
3855 waiting_tcp_get_mem(struct waiting_tcp* w)
3856 {
3857 	size_t s;
3858 	if(!w) return 0;
3859 	s = sizeof(*w) + w->pkt_len;
3860 	if(w->timer)
3861 		s += comm_timer_get_mem(w->timer);
3862 	return s;
3863 }
3864 
3865 /** get memory used by port if */
3866 static size_t
3867 if_get_mem(struct port_if* pif)
3868 {
3869 	size_t s;
3870 	int i;
3871 	s = sizeof(*pif) +
3872 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION
3873 	    sizeof(int)*pif->avail_total +
3874 #endif
3875 		sizeof(struct port_comm*)*pif->maxout;
3876 	for(i=0; i<pif->inuse; i++)
3877 		s += sizeof(*pif->out[i]) +
3878 			comm_point_get_mem(pif->out[i]->cp);
3879 	return s;
3880 }
3881 
3882 /** get memory used by waiting udp */
3883 static size_t
3884 waiting_udp_get_mem(struct pending* w)
3885 {
3886 	size_t s;
3887 	s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len;
3888 	return s;
3889 }
3890 
3891 size_t outnet_get_mem(struct outside_network* outnet)
3892 {
3893 	size_t i;
3894 	int k;
3895 	struct waiting_tcp* w;
3896 	struct pending* u;
3897 	struct serviced_query* sq;
3898 	struct service_callback* sb;
3899 	struct port_comm* pc;
3900 	size_t s = sizeof(*outnet) + sizeof(*outnet->base) +
3901 		sizeof(*outnet->udp_buff) +
3902 		sldns_buffer_capacity(outnet->udp_buff);
3903 	/* second buffer is not ours */
3904 	for(pc = outnet->unused_fds; pc; pc = pc->next) {
3905 		s += sizeof(*pc) + comm_point_get_mem(pc->cp);
3906 	}
3907 	for(k=0; k<outnet->num_ip4; k++)
3908 		s += if_get_mem(&outnet->ip4_ifs[k]);
3909 	for(k=0; k<outnet->num_ip6; k++)
3910 		s += if_get_mem(&outnet->ip6_ifs[k]);
3911 	for(u=outnet->udp_wait_first; u; u=u->next_waiting)
3912 		s += waiting_udp_get_mem(u);
3913 
3914 	s += sizeof(struct pending_tcp*)*outnet->num_tcp;
3915 	for(i=0; i<outnet->num_tcp; i++) {
3916 		s += sizeof(struct pending_tcp);
3917 		s += comm_point_get_mem(outnet->tcp_conns[i]->c);
3918 		if(outnet->tcp_conns[i]->query)
3919 			s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query);
3920 	}
3921 	for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
3922 		s += waiting_tcp_get_mem(w);
3923 	s += sizeof(*outnet->pending);
3924 	s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) *
3925 		outnet->pending->count;
3926 	s += sizeof(*outnet->serviced);
3927 	s += outnet->svcd_overhead;
3928 	RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) {
3929 		s += sizeof(*sq) + sq->qbuflen;
3930 		for(sb = sq->cblist; sb; sb = sb->next)
3931 			s += sizeof(*sb);
3932 	}
3933 	return s;
3934 }
3935 
3936 size_t
3937 serviced_get_mem(struct serviced_query* sq)
3938 {
3939 	struct service_callback* sb;
3940 	size_t s;
3941 	s = sizeof(*sq) + sq->qbuflen;
3942 	for(sb = sq->cblist; sb; sb = sb->next)
3943 		s += sizeof(*sb);
3944 	if(sq->status == serviced_query_UDP_EDNS ||
3945 		sq->status == serviced_query_UDP ||
3946 		sq->status == serviced_query_UDP_EDNS_FRAG ||
3947 		sq->status == serviced_query_UDP_EDNS_fallback) {
3948 		s += sizeof(struct pending);
3949 		s += comm_timer_get_mem(NULL);
3950 	} else {
3951 		/* does not have size of the pkt pointer */
3952 		/* always has a timer except on malloc failures */
3953 
3954 		/* these sizes are part of the main outside network mem */
3955 		/*
3956 		s += sizeof(struct waiting_tcp);
3957 		s += comm_timer_get_mem(NULL);
3958 		*/
3959 	}
3960 	return s;
3961 }
3962 
3963