1 /* 2 * services/outside_network.c - implement sending of queries and wait answer. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file has functions to send queries to authoritative servers and 40 * wait for the pending answer events. 41 */ 42 #include "config.h" 43 #include <ctype.h> 44 #ifdef HAVE_SYS_TYPES_H 45 # include <sys/types.h> 46 #endif 47 #include <sys/time.h> 48 #include "services/outside_network.h" 49 #include "services/listen_dnsport.h" 50 #include "services/cache/infra.h" 51 #include "iterator/iterator.h" 52 #include "util/data/msgparse.h" 53 #include "util/data/msgreply.h" 54 #include "util/data/msgencode.h" 55 #include "util/data/dname.h" 56 #include "util/netevent.h" 57 #include "util/log.h" 58 #include "util/net_help.h" 59 #include "util/random.h" 60 #include "util/fptr_wlist.h" 61 #include "util/edns.h" 62 #include "sldns/sbuffer.h" 63 #include "dnstap/dnstap.h" 64 #ifdef HAVE_OPENSSL_SSL_H 65 #include <openssl/ssl.h> 66 #endif 67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST 68 #include <openssl/x509v3.h> 69 #endif 70 71 #ifdef HAVE_NETDB_H 72 #include <netdb.h> 73 #endif 74 #include <fcntl.h> 75 76 /** number of times to retry making a random ID that is unique. */ 77 #define MAX_ID_RETRY 1000 78 /** number of times to retry finding interface, port that can be opened. */ 79 #define MAX_PORT_RETRY 10000 80 /** number of retries on outgoing UDP queries */ 81 #define OUTBOUND_UDP_RETRY 1 82 83 /** initiate TCP transaction for serviced query */ 84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff); 85 /** with a fd available, randomize and send UDP */ 86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, 87 int timeout); 88 89 /** remove waiting tcp from the outnet waiting list */ 90 static void waiting_list_remove(struct outside_network* outnet, 91 struct waiting_tcp* w); 92 93 int 94 pending_cmp(const void* key1, const void* key2) 95 { 96 struct pending *p1 = (struct pending*)key1; 97 struct pending *p2 = (struct pending*)key2; 98 if(p1->id < p2->id) 99 return -1; 100 if(p1->id > p2->id) 101 return 1; 102 log_assert(p1->id == p2->id); 103 return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen); 104 } 105 106 int 107 serviced_cmp(const void* key1, const void* key2) 108 { 109 struct serviced_query* q1 = (struct serviced_query*)key1; 110 struct serviced_query* q2 = (struct serviced_query*)key2; 111 int r; 112 if(q1->qbuflen < q2->qbuflen) 113 return -1; 114 if(q1->qbuflen > q2->qbuflen) 115 return 1; 116 log_assert(q1->qbuflen == q2->qbuflen); 117 log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */); 118 /* alternate casing of qname is still the same query */ 119 if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0) 120 return r; 121 if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0) 122 return r; 123 if(q1->dnssec != q2->dnssec) { 124 if(q1->dnssec < q2->dnssec) 125 return -1; 126 return 1; 127 } 128 if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0) 129 return r; 130 if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0) 131 return r; 132 return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen); 133 } 134 135 /** compare if the reuse element has the same address, port and same ssl-is 136 * used-for-it characteristic */ 137 static int 138 reuse_cmp_addrportssl(const void* key1, const void* key2) 139 { 140 struct reuse_tcp* r1 = (struct reuse_tcp*)key1; 141 struct reuse_tcp* r2 = (struct reuse_tcp*)key2; 142 int r; 143 /* compare address and port */ 144 r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen); 145 if(r != 0) 146 return r; 147 148 /* compare if SSL-enabled */ 149 if(r1->is_ssl && !r2->is_ssl) 150 return 1; 151 if(!r1->is_ssl && r2->is_ssl) 152 return -1; 153 return 0; 154 } 155 156 int 157 reuse_cmp(const void* key1, const void* key2) 158 { 159 int r; 160 r = reuse_cmp_addrportssl(key1, key2); 161 if(r != 0) 162 return r; 163 164 /* compare ptr value */ 165 if(key1 < key2) return -1; 166 if(key1 > key2) return 1; 167 return 0; 168 } 169 170 int reuse_id_cmp(const void* key1, const void* key2) 171 { 172 struct waiting_tcp* w1 = (struct waiting_tcp*)key1; 173 struct waiting_tcp* w2 = (struct waiting_tcp*)key2; 174 if(w1->id < w2->id) 175 return -1; 176 if(w1->id > w2->id) 177 return 1; 178 return 0; 179 } 180 181 /** delete waiting_tcp entry. Does not unlink from waiting list. 182 * @param w: to delete. 183 */ 184 static void 185 waiting_tcp_delete(struct waiting_tcp* w) 186 { 187 if(!w) return; 188 if(w->timer) 189 comm_timer_delete(w->timer); 190 free(w); 191 } 192 193 /** 194 * Pick random outgoing-interface of that family, and bind it. 195 * port set to 0 so OS picks a port number for us. 196 * if it is the ANY address, do not bind. 197 * @param w: tcp structure with destination address. 198 * @param s: socket fd. 199 * @return false on error, socket closed. 200 */ 201 static int 202 pick_outgoing_tcp(struct waiting_tcp* w, int s) 203 { 204 struct port_if* pi = NULL; 205 int num; 206 #ifdef INET6 207 if(addr_is_ip6(&w->addr, w->addrlen)) 208 num = w->outnet->num_ip6; 209 else 210 #endif 211 num = w->outnet->num_ip4; 212 if(num == 0) { 213 log_err("no TCP outgoing interfaces of family"); 214 log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen); 215 sock_close(s); 216 return 0; 217 } 218 #ifdef INET6 219 if(addr_is_ip6(&w->addr, w->addrlen)) 220 pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)]; 221 else 222 #endif 223 pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)]; 224 log_assert(pi); 225 if(addr_is_any(&pi->addr, pi->addrlen)) { 226 /* binding to the ANY interface is for listening sockets */ 227 return 1; 228 } 229 /* set port to 0 */ 230 if(addr_is_ip6(&pi->addr, pi->addrlen)) 231 ((struct sockaddr_in6*)&pi->addr)->sin6_port = 0; 232 else ((struct sockaddr_in*)&pi->addr)->sin_port = 0; 233 if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) { 234 log_err("outgoing tcp: bind: %s", sock_strerror(errno)); 235 sock_close(s); 236 return 0; 237 } 238 log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen); 239 return 1; 240 } 241 242 /** get TCP file descriptor for address, returns -1 on failure, 243 * tcp_mss is 0 or maxseg size to set for TCP packets. */ 244 int 245 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp) 246 { 247 int s; 248 int af; 249 char* err; 250 #ifdef SO_REUSEADDR 251 int on = 1; 252 #endif 253 #ifdef INET6 254 if(addr_is_ip6(addr, addrlen)){ 255 s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP); 256 af = AF_INET6; 257 } else { 258 #else 259 { 260 #endif 261 af = AF_INET; 262 s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); 263 } 264 if(s == -1) { 265 log_err_addr("outgoing tcp: socket", sock_strerror(errno), 266 addr, addrlen); 267 return -1; 268 } 269 270 #ifdef SO_REUSEADDR 271 if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on, 272 (socklen_t)sizeof(on)) < 0) { 273 verbose(VERB_ALGO, "outgoing tcp:" 274 " setsockopt(.. SO_REUSEADDR ..) failed"); 275 } 276 #endif 277 278 err = set_ip_dscp(s, af, dscp); 279 if(err != NULL) { 280 verbose(VERB_ALGO, "outgoing tcp:" 281 "error setting IP DiffServ codepoint on socket"); 282 } 283 284 if(tcp_mss > 0) { 285 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG) 286 if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG, 287 (void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) { 288 verbose(VERB_ALGO, "outgoing tcp:" 289 " setsockopt(.. TCP_MAXSEG ..) failed"); 290 } 291 #else 292 verbose(VERB_ALGO, "outgoing tcp:" 293 " setsockopt(TCP_MAXSEG) unsupported"); 294 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */ 295 } 296 297 return s; 298 } 299 300 /** connect tcp connection to addr, 0 on failure */ 301 int 302 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen) 303 { 304 if(connect(s, (struct sockaddr*)addr, addrlen) == -1) { 305 #ifndef USE_WINSOCK 306 #ifdef EINPROGRESS 307 if(errno != EINPROGRESS) { 308 #endif 309 if(tcp_connect_errno_needs_log( 310 (struct sockaddr*)addr, addrlen)) 311 log_err_addr("outgoing tcp: connect", 312 strerror(errno), addr, addrlen); 313 close(s); 314 return 0; 315 #ifdef EINPROGRESS 316 } 317 #endif 318 #else /* USE_WINSOCK */ 319 if(WSAGetLastError() != WSAEINPROGRESS && 320 WSAGetLastError() != WSAEWOULDBLOCK) { 321 closesocket(s); 322 return 0; 323 } 324 #endif 325 } 326 return 1; 327 } 328 329 /** log reuse item addr and ptr with message */ 330 static void 331 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse) 332 { 333 uint16_t port; 334 char addrbuf[128]; 335 if(verbosity < v) return; 336 addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf)); 337 port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port); 338 verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port, 339 reuse->pending->c->fd); 340 } 341 342 /** pop the first element from the writewait list */ 343 static struct waiting_tcp* reuse_write_wait_pop(struct reuse_tcp* reuse) 344 { 345 struct waiting_tcp* w = reuse->write_wait_first; 346 if(!w) 347 return NULL; 348 log_assert(w->write_wait_queued); 349 log_assert(!w->write_wait_prev); 350 reuse->write_wait_first = w->write_wait_next; 351 if(w->write_wait_next) 352 w->write_wait_next->write_wait_prev = NULL; 353 else reuse->write_wait_last = NULL; 354 w->write_wait_queued = 0; 355 return w; 356 } 357 358 /** remove the element from the writewait list */ 359 static void reuse_write_wait_remove(struct reuse_tcp* reuse, 360 struct waiting_tcp* w) 361 { 362 if(!w) 363 return; 364 if(!w->write_wait_queued) 365 return; 366 if(w->write_wait_prev) 367 w->write_wait_prev->write_wait_next = w->write_wait_next; 368 else reuse->write_wait_first = w->write_wait_next; 369 if(w->write_wait_next) 370 w->write_wait_next->write_wait_prev = w->write_wait_prev; 371 else reuse->write_wait_last = w->write_wait_prev; 372 w->write_wait_queued = 0; 373 } 374 375 /** push the element after the last on the writewait list */ 376 static void reuse_write_wait_push_back(struct reuse_tcp* reuse, 377 struct waiting_tcp* w) 378 { 379 if(!w) return; 380 log_assert(!w->write_wait_queued); 381 if(reuse->write_wait_last) { 382 reuse->write_wait_last->write_wait_next = w; 383 w->write_wait_prev = reuse->write_wait_last; 384 } else { 385 reuse->write_wait_first = w; 386 } 387 reuse->write_wait_last = w; 388 w->write_wait_queued = 1; 389 } 390 391 /** insert element in tree by id */ 392 void 393 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w) 394 { 395 log_assert(w->id_node.key == NULL); 396 w->id_node.key = w; 397 rbtree_insert(&reuse->tree_by_id, &w->id_node); 398 } 399 400 /** find element in tree by id */ 401 struct waiting_tcp* 402 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id) 403 { 404 struct waiting_tcp key_w; 405 rbnode_type* n; 406 memset(&key_w, 0, sizeof(key_w)); 407 key_w.id_node.key = &key_w; 408 key_w.id = id; 409 n = rbtree_search(&reuse->tree_by_id, &key_w); 410 if(!n) return NULL; 411 return (struct waiting_tcp*)n->key; 412 } 413 414 /** return ID value of rbnode in tree_by_id */ 415 static uint16_t 416 tree_by_id_get_id(rbnode_type* node) 417 { 418 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 419 return w->id; 420 } 421 422 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */ 423 static int 424 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp) 425 { 426 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse); 427 if(pend_tcp->reuse.item_on_lru_list) 428 return 1; 429 pend_tcp->reuse.node.key = &pend_tcp->reuse; 430 pend_tcp->reuse.pending = pend_tcp; 431 if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) { 432 /* this is a duplicate connection, close this one */ 433 verbose(VERB_CLIENT, "reuse_tcp_insert: duplicate connection"); 434 pend_tcp->reuse.node.key = NULL; 435 return 0; 436 } 437 /* insert into LRU, first is newest */ 438 pend_tcp->reuse.lru_prev = NULL; 439 if(outnet->tcp_reuse_first) { 440 pend_tcp->reuse.lru_next = outnet->tcp_reuse_first; 441 outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse; 442 } else { 443 pend_tcp->reuse.lru_next = NULL; 444 outnet->tcp_reuse_last = &pend_tcp->reuse; 445 } 446 outnet->tcp_reuse_first = &pend_tcp->reuse; 447 pend_tcp->reuse.item_on_lru_list = 1; 448 return 1; 449 } 450 451 /** find reuse tcp stream to destination for query, or NULL if none */ 452 static struct reuse_tcp* 453 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr, 454 socklen_t addrlen, int use_ssl) 455 { 456 struct waiting_tcp key_w; 457 struct pending_tcp key_p; 458 struct comm_point c; 459 rbnode_type* result = NULL, *prev; 460 verbose(VERB_CLIENT, "reuse_tcp_find"); 461 memset(&key_w, 0, sizeof(key_w)); 462 memset(&key_p, 0, sizeof(key_p)); 463 memset(&c, 0, sizeof(c)); 464 key_p.query = &key_w; 465 key_p.c = &c; 466 key_p.reuse.pending = &key_p; 467 key_p.reuse.node.key = &key_p.reuse; 468 if(use_ssl) 469 key_p.reuse.is_ssl = 1; 470 if(addrlen > (socklen_t)sizeof(key_p.reuse.addr)) 471 return NULL; 472 memmove(&key_p.reuse.addr, addr, addrlen); 473 key_p.reuse.addrlen = addrlen; 474 475 verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u", 476 (unsigned)outnet->tcp_reuse.count); 477 if(outnet->tcp_reuse.root == NULL || 478 outnet->tcp_reuse.root == RBTREE_NULL) 479 return NULL; 480 if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse.node, 481 &result)) { 482 /* exact match */ 483 /* but the key is on stack, and ptr is compared, impossible */ 484 log_assert(&key_p.reuse != (struct reuse_tcp*)result); 485 log_assert(&key_p != ((struct reuse_tcp*)result)->pending); 486 } 487 /* not found, return null */ 488 if(!result || result == RBTREE_NULL) 489 return NULL; 490 verbose(VERB_CLIENT, "reuse_tcp_find check inexact match"); 491 /* inexact match, find one of possibly several connections to the 492 * same destination address, with the correct port, ssl, and 493 * also less than max number of open queries, or else, fail to open 494 * a new one */ 495 /* rewind to start of sequence of same address,port,ssl */ 496 prev = rbtree_previous(result); 497 while(prev && prev != RBTREE_NULL && 498 reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) { 499 result = prev; 500 prev = rbtree_previous(result); 501 } 502 503 /* loop to find first one that has correct characteristics */ 504 while(result && result != RBTREE_NULL && 505 reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) { 506 if(((struct reuse_tcp*)result)->tree_by_id.count < 507 MAX_REUSE_TCP_QUERIES) { 508 /* same address, port, ssl-yes-or-no, and has 509 * space for another query */ 510 return (struct reuse_tcp*)result; 511 } 512 result = rbtree_next(result); 513 } 514 return NULL; 515 } 516 517 /** use the buffer to setup writing the query */ 518 static void 519 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend, 520 struct waiting_tcp* w) 521 { 522 struct timeval tv; 523 verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write " 524 "len %d timeout %d msec", 525 (int)w->pkt_len, w->timeout); 526 pend->c->tcp_write_pkt = w->pkt; 527 pend->c->tcp_write_pkt_len = w->pkt_len; 528 pend->c->tcp_write_and_read = 1; 529 pend->c->tcp_write_byte_count = 0; 530 pend->c->tcp_is_reading = 0; 531 comm_point_start_listening(pend->c, s, -1); 532 /* set timer on the waiting_tcp entry, this is the write timeout 533 * for the written packet. The timer on pend->c is the timer 534 * for when there is no written packet and we have readtimeouts */ 535 #ifndef S_SPLINT_S 536 tv.tv_sec = w->timeout/1000; 537 tv.tv_usec = (w->timeout%1000)*1000; 538 #endif 539 /* if the waiting_tcp was previously waiting for a buffer in the 540 * outside_network.tcpwaitlist, then the timer is reset now that 541 * we start writing it */ 542 comm_timer_set(w->timer, &tv); 543 } 544 545 /** use next free buffer to service a tcp query */ 546 static int 547 outnet_tcp_take_into_use(struct waiting_tcp* w) 548 { 549 struct pending_tcp* pend = w->outnet->tcp_free; 550 int s; 551 log_assert(pend); 552 log_assert(w->pkt); 553 log_assert(w->pkt_len > 0); 554 log_assert(w->addrlen > 0); 555 pend->c->tcp_do_toggle_rw = 0; 556 pend->c->tcp_do_close = 0; 557 /* open socket */ 558 s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp); 559 560 if(s == -1) 561 return 0; 562 563 if(!pick_outgoing_tcp(w, s)) 564 return 0; 565 566 fd_set_nonblock(s); 567 #ifdef USE_OSX_MSG_FASTOPEN 568 /* API for fast open is different here. We use a connectx() function and 569 then writes can happen as normal even using SSL.*/ 570 /* connectx requires that the len be set in the sockaddr struct*/ 571 struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr; 572 addr_in->sin_len = w->addrlen; 573 sa_endpoints_t endpoints; 574 endpoints.sae_srcif = 0; 575 endpoints.sae_srcaddr = NULL; 576 endpoints.sae_srcaddrlen = 0; 577 endpoints.sae_dstaddr = (struct sockaddr *)&w->addr; 578 endpoints.sae_dstaddrlen = w->addrlen; 579 if (connectx(s, &endpoints, SAE_ASSOCID_ANY, 580 CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE, 581 NULL, 0, NULL, NULL) == -1) { 582 /* if fails, failover to connect for OSX 10.10 */ 583 #ifdef EINPROGRESS 584 if(errno != EINPROGRESS) { 585 #else 586 if(1) { 587 #endif 588 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 589 #else /* USE_OSX_MSG_FASTOPEN*/ 590 #ifdef USE_MSG_FASTOPEN 591 pend->c->tcp_do_fastopen = 1; 592 /* Only do TFO for TCP in which case no connect() is required here. 593 Don't combine client TFO with SSL, since OpenSSL can't 594 currently support doing a handshake on fd that already isn't connected*/ 595 if (w->outnet->sslctx && w->ssl_upstream) { 596 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 597 #else /* USE_MSG_FASTOPEN*/ 598 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 599 #endif /* USE_MSG_FASTOPEN*/ 600 #endif /* USE_OSX_MSG_FASTOPEN*/ 601 #ifndef USE_WINSOCK 602 #ifdef EINPROGRESS 603 if(errno != EINPROGRESS) { 604 #else 605 if(1) { 606 #endif 607 if(tcp_connect_errno_needs_log( 608 (struct sockaddr*)&w->addr, w->addrlen)) 609 log_err_addr("outgoing tcp: connect", 610 strerror(errno), &w->addr, w->addrlen); 611 close(s); 612 #else /* USE_WINSOCK */ 613 if(WSAGetLastError() != WSAEINPROGRESS && 614 WSAGetLastError() != WSAEWOULDBLOCK) { 615 closesocket(s); 616 #endif 617 return 0; 618 } 619 } 620 #ifdef USE_MSG_FASTOPEN 621 } 622 #endif /* USE_MSG_FASTOPEN */ 623 #ifdef USE_OSX_MSG_FASTOPEN 624 } 625 } 626 #endif /* USE_OSX_MSG_FASTOPEN */ 627 if(w->outnet->sslctx && w->ssl_upstream) { 628 pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s); 629 if(!pend->c->ssl) { 630 pend->c->fd = s; 631 comm_point_close(pend->c); 632 return 0; 633 } 634 verbose(VERB_ALGO, "the query is using TLS encryption, for %s", 635 (w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection")); 636 #ifdef USE_WINSOCK 637 comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl); 638 #endif 639 pend->c->ssl_shake_state = comm_ssl_shake_write; 640 if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name, 641 w->outnet->tls_use_sni)) { 642 pend->c->fd = s; 643 #ifdef HAVE_SSL 644 SSL_free(pend->c->ssl); 645 #endif 646 pend->c->ssl = NULL; 647 comm_point_close(pend->c); 648 return 0; 649 } 650 } 651 w->next_waiting = (void*)pend; 652 w->outnet->num_tcp_outgoing++; 653 w->outnet->tcp_free = pend->next_free; 654 pend->next_free = NULL; 655 pend->query = w; 656 pend->reuse.outnet = w->outnet; 657 pend->c->repinfo.addrlen = w->addrlen; 658 pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again; 659 pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again; 660 pend->reuse.cp_more_read_again = 0; 661 pend->reuse.cp_more_write_again = 0; 662 memcpy(&pend->c->repinfo.addr, &w->addr, w->addrlen); 663 pend->reuse.pending = pend; 664 if(pend->c->ssl) 665 pend->reuse.is_ssl = 1; 666 else pend->reuse.is_ssl = 0; 667 /* insert in reuse by address tree if not already inserted there */ 668 (void)reuse_tcp_insert(w->outnet, pend); 669 reuse_tree_by_id_insert(&pend->reuse, w); 670 outnet_tcp_take_query_setup(s, pend, w); 671 return 1; 672 } 673 674 /** Touch the lru of a reuse_tcp element, it is in use. 675 * This moves it to the front of the list, where it is not likely to 676 * be closed. Items at the back of the list are closed to make space. */ 677 static void 678 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse) 679 { 680 if(!reuse->item_on_lru_list) 681 return; /* not on the list, no lru to modify */ 682 if(!reuse->lru_prev) 683 return; /* already first in the list */ 684 /* remove at current position */ 685 /* since it is not first, there is a previous element */ 686 reuse->lru_prev->lru_next = reuse->lru_next; 687 if(reuse->lru_next) 688 reuse->lru_next->lru_prev = reuse->lru_prev; 689 else outnet->tcp_reuse_last = reuse->lru_prev; 690 /* insert at the front */ 691 reuse->lru_prev = NULL; 692 reuse->lru_next = outnet->tcp_reuse_first; 693 /* since it is not first, it is not the only element and 694 * lru_next is thus not NULL and thus reuse is now not the last in 695 * the list, so outnet->tcp_reuse_last does not need to be modified */ 696 outnet->tcp_reuse_first = reuse; 697 } 698 699 /** call callback on waiting_tcp, if not NULL */ 700 static void 701 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error, 702 struct comm_reply* reply_info) 703 { 704 if(w->cb) { 705 fptr_ok(fptr_whitelist_pending_tcp(w->cb)); 706 (void)(*w->cb)(c, w->cb_arg, error, reply_info); 707 } 708 } 709 710 /** see if buffers can be used to service TCP queries */ 711 static void 712 use_free_buffer(struct outside_network* outnet) 713 { 714 struct waiting_tcp* w; 715 while(outnet->tcp_free && outnet->tcp_wait_first 716 && !outnet->want_to_quit) { 717 struct reuse_tcp* reuse = NULL; 718 w = outnet->tcp_wait_first; 719 outnet->tcp_wait_first = w->next_waiting; 720 if(outnet->tcp_wait_last == w) 721 outnet->tcp_wait_last = NULL; 722 w->on_tcp_waiting_list = 0; 723 reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen, 724 w->ssl_upstream); 725 if(reuse) { 726 log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: " 727 "found reuse", reuse); 728 reuse_tcp_lru_touch(outnet, reuse); 729 comm_timer_disable(w->timer); 730 w->next_waiting = (void*)reuse->pending; 731 reuse_tree_by_id_insert(reuse, w); 732 if(reuse->pending->query) { 733 /* on the write wait list */ 734 reuse_write_wait_push_back(reuse, w); 735 } else { 736 /* write straight away */ 737 /* stop the timer on read of the fd */ 738 comm_point_stop_listening(reuse->pending->c); 739 reuse->pending->query = w; 740 outnet_tcp_take_query_setup( 741 reuse->pending->c->fd, reuse->pending, 742 w); 743 } 744 } else { 745 struct pending_tcp* pend = w->outnet->tcp_free; 746 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 747 pend->reuse.pending = pend; 748 memcpy(&pend->reuse.addr, &w->addr, w->addrlen); 749 pend->reuse.addrlen = w->addrlen; 750 if(!outnet_tcp_take_into_use(w)) { 751 waiting_tcp_callback(w, NULL, NETEVENT_CLOSED, 752 NULL); 753 waiting_tcp_delete(w); 754 } 755 } 756 } 757 } 758 759 /** add waiting_tcp element to the outnet tcp waiting list */ 760 static void 761 outnet_add_tcp_waiting(struct outside_network* outnet, struct waiting_tcp* w) 762 { 763 struct timeval tv; 764 if(w->on_tcp_waiting_list) 765 return; 766 w->next_waiting = NULL; 767 if(outnet->tcp_wait_last) 768 outnet->tcp_wait_last->next_waiting = w; 769 else outnet->tcp_wait_first = w; 770 outnet->tcp_wait_last = w; 771 w->on_tcp_waiting_list = 1; 772 #ifndef S_SPLINT_S 773 tv.tv_sec = w->timeout/1000; 774 tv.tv_usec = (w->timeout%1000)*1000; 775 #endif 776 comm_timer_set(w->timer, &tv); 777 } 778 779 /** delete element from tree by id */ 780 static void 781 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w) 782 { 783 log_assert(w->id_node.key != NULL); 784 rbtree_delete(&reuse->tree_by_id, w); 785 w->id_node.key = NULL; 786 } 787 788 /** move writewait list to go for another connection. */ 789 static void 790 reuse_move_writewait_away(struct outside_network* outnet, 791 struct pending_tcp* pend) 792 { 793 /* the writewait list has not been written yet, so if the 794 * stream was closed, they have not actually been failed, only 795 * the queries written. Other queries can get written to another 796 * stream. For upstreams that do not support multiple queries 797 * and answers, the stream can get closed, and then the queries 798 * can get written on a new socket */ 799 struct waiting_tcp* w; 800 if(pend->query && pend->query->error_count == 0 && 801 pend->c->tcp_write_pkt == pend->query->pkt && 802 pend->c->tcp_write_pkt_len == pend->query->pkt_len) { 803 /* since the current query is not written, it can also 804 * move to a free buffer */ 805 if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 && 806 LDNS_QDCOUNT(pend->query->pkt) > 0 && 807 dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) { 808 char buf[LDNS_MAX_DOMAINLEN+1]; 809 dname_str(pend->query->pkt+12, buf); 810 verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written", 811 buf, (int)pend->c->tcp_write_byte_count); 812 } 813 pend->c->tcp_write_pkt = NULL; 814 pend->c->tcp_write_pkt_len = 0; 815 pend->c->tcp_write_and_read = 0; 816 pend->reuse.cp_more_read_again = 0; 817 pend->reuse.cp_more_write_again = 0; 818 pend->c->tcp_is_reading = 1; 819 w = pend->query; 820 pend->query = NULL; 821 /* increase error count, so that if the next socket fails too 822 * the server selection is run again with this query failed 823 * and it can select a different server (if possible), or 824 * fail the query */ 825 w->error_count ++; 826 reuse_tree_by_id_delete(&pend->reuse, w); 827 outnet_add_tcp_waiting(outnet, w); 828 } 829 while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) { 830 if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 && 831 LDNS_QDCOUNT(w->pkt) > 0 && 832 dname_valid(w->pkt+12, w->pkt_len-12)) { 833 char buf[LDNS_MAX_DOMAINLEN+1]; 834 dname_str(w->pkt+12, buf); 835 verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf); 836 } 837 reuse_tree_by_id_delete(&pend->reuse, w); 838 outnet_add_tcp_waiting(outnet, w); 839 } 840 } 841 842 /** remove reused element from tree and lru list */ 843 static void 844 reuse_tcp_remove_tree_list(struct outside_network* outnet, 845 struct reuse_tcp* reuse) 846 { 847 verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list"); 848 if(reuse->node.key) { 849 /* delete it from reuse tree */ 850 (void)rbtree_delete(&outnet->tcp_reuse, &reuse->node); 851 reuse->node.key = NULL; 852 } 853 /* delete from reuse list */ 854 if(reuse->item_on_lru_list) { 855 if(reuse->lru_prev) { 856 /* assert that members of the lru list are waiting 857 * and thus have a pending pointer to the struct */ 858 log_assert(reuse->lru_prev->pending); 859 reuse->lru_prev->lru_next = reuse->lru_next; 860 } else { 861 log_assert(!reuse->lru_next || reuse->lru_next->pending); 862 outnet->tcp_reuse_first = reuse->lru_next; 863 } 864 if(reuse->lru_next) { 865 /* assert that members of the lru list are waiting 866 * and thus have a pending pointer to the struct */ 867 log_assert(reuse->lru_next->pending); 868 reuse->lru_next->lru_prev = reuse->lru_prev; 869 } else { 870 log_assert(!reuse->lru_prev || reuse->lru_prev->pending); 871 outnet->tcp_reuse_last = reuse->lru_prev; 872 } 873 reuse->item_on_lru_list = 0; 874 } 875 } 876 877 /** helper function that deletes an element from the tree of readwait 878 * elements in tcp reuse structure */ 879 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg)) 880 { 881 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 882 waiting_tcp_delete(w); 883 } 884 885 /** delete readwait waiting_tcp elements, deletes the elements in the list */ 886 void reuse_del_readwait(rbtree_type* tree_by_id) 887 { 888 if(tree_by_id->root == NULL || 889 tree_by_id->root == RBTREE_NULL) 890 return; 891 traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL); 892 rbtree_init(tree_by_id, reuse_id_cmp); 893 } 894 895 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */ 896 static void 897 decommission_pending_tcp(struct outside_network* outnet, 898 struct pending_tcp* pend) 899 { 900 verbose(VERB_CLIENT, "decommission_pending_tcp"); 901 pend->next_free = outnet->tcp_free; 902 outnet->tcp_free = pend; 903 if(pend->reuse.node.key) { 904 /* needs unlink from the reuse tree to get deleted */ 905 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 906 } 907 /* free SSL structure after remove from outnet tcp reuse tree, 908 * because the c->ssl null or not is used for sorting in the tree */ 909 if(pend->c->ssl) { 910 #ifdef HAVE_SSL 911 SSL_shutdown(pend->c->ssl); 912 SSL_free(pend->c->ssl); 913 pend->c->ssl = NULL; 914 #endif 915 } 916 comm_point_close(pend->c); 917 pend->reuse.cp_more_read_again = 0; 918 pend->reuse.cp_more_write_again = 0; 919 /* unlink the query and writewait list, it is part of the tree 920 * nodes and is deleted */ 921 pend->query = NULL; 922 pend->reuse.write_wait_first = NULL; 923 pend->reuse.write_wait_last = NULL; 924 reuse_del_readwait(&pend->reuse.tree_by_id); 925 } 926 927 /** perform failure callbacks for waiting queries in reuse read rbtree */ 928 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err) 929 { 930 rbnode_type* node; 931 if(tree_by_id->root == NULL || 932 tree_by_id->root == RBTREE_NULL) 933 return; 934 node = rbtree_first(tree_by_id); 935 while(node && node != RBTREE_NULL) { 936 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 937 waiting_tcp_callback(w, NULL, err, NULL); 938 node = rbtree_next(node); 939 } 940 } 941 942 /** perform callbacks for failure and also decommission pending tcp. 943 * the callbacks remove references in sq->pending to the waiting_tcp 944 * members of the tree_by_id in the pending tcp. The pending_tcp is 945 * removed before the callbacks, so that the callbacks do not modify 946 * the pending_tcp due to its reference in the outside_network reuse tree */ 947 static void reuse_cb_and_decommission(struct outside_network* outnet, 948 struct pending_tcp* pend, int error) 949 { 950 rbtree_type store; 951 store = pend->reuse.tree_by_id; 952 pend->query = NULL; 953 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 954 pend->reuse.write_wait_first = NULL; 955 pend->reuse.write_wait_last = NULL; 956 decommission_pending_tcp(outnet, pend); 957 reuse_cb_readwait_for_failure(&store, error); 958 reuse_del_readwait(&store); 959 } 960 961 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 962 static void 963 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp) 964 { 965 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse); 966 comm_point_start_listening(pend_tcp->c, -1, REUSE_TIMEOUT); 967 } 968 969 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 970 static void 971 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp) 972 { 973 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse); 974 sldns_buffer_clear(pend_tcp->c->buffer); 975 pend_tcp->c->tcp_is_reading = 1; 976 pend_tcp->c->tcp_byte_count = 0; 977 comm_point_stop_listening(pend_tcp->c); 978 comm_point_start_listening(pend_tcp->c, -1, REUSE_TIMEOUT); 979 } 980 981 int 982 outnet_tcp_cb(struct comm_point* c, void* arg, int error, 983 struct comm_reply *reply_info) 984 { 985 struct pending_tcp* pend = (struct pending_tcp*)arg; 986 struct outside_network* outnet = pend->reuse.outnet; 987 struct waiting_tcp* w = NULL; 988 verbose(VERB_ALGO, "outnettcp cb"); 989 if(error == NETEVENT_TIMEOUT) { 990 if(pend->c->tcp_write_and_read) { 991 verbose(VERB_QUERY, "outnettcp got tcp timeout " 992 "for read, ignored because write underway"); 993 /* if we are writing, ignore readtimer, wait for write timer 994 * or write is done */ 995 return 0; 996 } else { 997 verbose(VERB_QUERY, "outnettcp got tcp timeout %s", 998 (pend->reuse.tree_by_id.count?"for reading pkt": 999 "for keepalive for reuse")); 1000 } 1001 /* must be timeout for reading or keepalive reuse, 1002 * close it. */ 1003 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 1004 } else if(error == NETEVENT_PKT_WRITTEN) { 1005 /* the packet we want to write has been written. */ 1006 verbose(VERB_ALGO, "outnet tcp pkt was written event"); 1007 log_assert(c == pend->c); 1008 log_assert(pend->query->pkt == pend->c->tcp_write_pkt); 1009 log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len); 1010 pend->c->tcp_write_pkt = NULL; 1011 pend->c->tcp_write_pkt_len = 0; 1012 /* the pend.query is already in tree_by_id */ 1013 log_assert(pend->query->id_node.key); 1014 pend->query = NULL; 1015 /* setup to write next packet or setup read timeout */ 1016 if(pend->reuse.write_wait_first) { 1017 verbose(VERB_ALGO, "outnet tcp setup next pkt"); 1018 /* we can write it straight away perhaps, set flag 1019 * because this callback called after a tcp write 1020 * succeeded and likely more buffer space is available 1021 * and we can write some more. */ 1022 pend->reuse.cp_more_write_again = 1; 1023 pend->query = reuse_write_wait_pop(&pend->reuse); 1024 comm_point_stop_listening(pend->c); 1025 outnet_tcp_take_query_setup(pend->c->fd, pend, 1026 pend->query); 1027 } else { 1028 verbose(VERB_ALGO, "outnet tcp writes done, wait"); 1029 pend->c->tcp_write_and_read = 0; 1030 pend->reuse.cp_more_read_again = 0; 1031 pend->reuse.cp_more_write_again = 0; 1032 pend->c->tcp_is_reading = 1; 1033 comm_point_stop_listening(pend->c); 1034 reuse_tcp_setup_timeout(pend); 1035 } 1036 return 0; 1037 } else if(error != NETEVENT_NOERROR) { 1038 verbose(VERB_QUERY, "outnettcp got tcp error %d", error); 1039 reuse_move_writewait_away(outnet, pend); 1040 /* pass error below and exit */ 1041 } else { 1042 /* check ID */ 1043 if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) { 1044 log_addr(VERB_QUERY, 1045 "outnettcp: bad ID in reply, too short, from:", 1046 &pend->reuse.addr, pend->reuse.addrlen); 1047 error = NETEVENT_CLOSED; 1048 } else { 1049 uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin( 1050 c->buffer)); 1051 /* find the query the reply is for */ 1052 w = reuse_tcp_by_id_find(&pend->reuse, id); 1053 } 1054 } 1055 if(error == NETEVENT_NOERROR && !w) { 1056 /* no struct waiting found in tree, no reply to call */ 1057 log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:", 1058 &pend->reuse.addr, pend->reuse.addrlen); 1059 error = NETEVENT_CLOSED; 1060 } 1061 if(error == NETEVENT_NOERROR) { 1062 /* add to reuse tree so it can be reused, if not a failure. 1063 * This is possible if the state machine wants to make a tcp 1064 * query again to the same destination. */ 1065 if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) { 1066 (void)reuse_tcp_insert(outnet, pend); 1067 } 1068 } 1069 if(w) { 1070 reuse_tree_by_id_delete(&pend->reuse, w); 1071 verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d", 1072 error, (int)sldns_buffer_limit(c->buffer)); 1073 waiting_tcp_callback(w, c, error, reply_info); 1074 waiting_tcp_delete(w); 1075 } 1076 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb"); 1077 if(error == NETEVENT_NOERROR && pend->reuse.node.key) { 1078 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it"); 1079 /* it is in the reuse_tcp tree, with other queries, or 1080 * on the empty list. do not decommission it */ 1081 /* if there are more outstanding queries, we could try to 1082 * read again, to see if it is on the input, 1083 * because this callback called after a successful read 1084 * and there could be more bytes to read on the input */ 1085 if(pend->reuse.tree_by_id.count != 0) 1086 pend->reuse.cp_more_read_again = 1; 1087 reuse_tcp_setup_read_and_timeout(pend); 1088 return 0; 1089 } 1090 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it"); 1091 /* no queries on it, no space to keep it. or timeout or closed due 1092 * to error. Close it */ 1093 reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT? 1094 NETEVENT_TIMEOUT:NETEVENT_CLOSED)); 1095 use_free_buffer(outnet); 1096 return 0; 1097 } 1098 1099 /** lower use count on pc, see if it can be closed */ 1100 static void 1101 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc) 1102 { 1103 struct port_if* pif; 1104 pc->num_outstanding--; 1105 if(pc->num_outstanding > 0) { 1106 return; 1107 } 1108 /* close it and replace in unused list */ 1109 verbose(VERB_ALGO, "close of port %d", pc->number); 1110 comm_point_close(pc->cp); 1111 pif = pc->pif; 1112 log_assert(pif->inuse > 0); 1113 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1114 pif->avail_ports[pif->avail_total - pif->inuse] = pc->number; 1115 #endif 1116 pif->inuse--; 1117 pif->out[pc->index] = pif->out[pif->inuse]; 1118 pif->out[pc->index]->index = pc->index; 1119 pc->next = outnet->unused_fds; 1120 outnet->unused_fds = pc; 1121 } 1122 1123 /** try to send waiting UDP queries */ 1124 static void 1125 outnet_send_wait_udp(struct outside_network* outnet) 1126 { 1127 struct pending* pend; 1128 /* process waiting queries */ 1129 while(outnet->udp_wait_first && outnet->unused_fds 1130 && !outnet->want_to_quit) { 1131 pend = outnet->udp_wait_first; 1132 outnet->udp_wait_first = pend->next_waiting; 1133 if(!pend->next_waiting) outnet->udp_wait_last = NULL; 1134 sldns_buffer_clear(outnet->udp_buff); 1135 sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len); 1136 sldns_buffer_flip(outnet->udp_buff); 1137 free(pend->pkt); /* freeing now makes get_mem correct */ 1138 pend->pkt = NULL; 1139 pend->pkt_len = 0; 1140 if(!randomize_and_send_udp(pend, outnet->udp_buff, 1141 pend->timeout)) { 1142 /* callback error on pending */ 1143 if(pend->cb) { 1144 fptr_ok(fptr_whitelist_pending_udp(pend->cb)); 1145 (void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg, 1146 NETEVENT_CLOSED, NULL); 1147 } 1148 pending_delete(outnet, pend); 1149 } 1150 } 1151 } 1152 1153 int 1154 outnet_udp_cb(struct comm_point* c, void* arg, int error, 1155 struct comm_reply *reply_info) 1156 { 1157 struct outside_network* outnet = (struct outside_network*)arg; 1158 struct pending key; 1159 struct pending* p; 1160 verbose(VERB_ALGO, "answer cb"); 1161 1162 if(error != NETEVENT_NOERROR) { 1163 verbose(VERB_QUERY, "outnetudp got udp error %d", error); 1164 return 0; 1165 } 1166 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) { 1167 verbose(VERB_QUERY, "outnetudp udp too short"); 1168 return 0; 1169 } 1170 log_assert(reply_info); 1171 1172 /* setup lookup key */ 1173 key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer)); 1174 memcpy(&key.addr, &reply_info->addr, reply_info->addrlen); 1175 key.addrlen = reply_info->addrlen; 1176 verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id); 1177 log_addr(VERB_ALGO, "Incoming reply addr =", 1178 &reply_info->addr, reply_info->addrlen); 1179 1180 /* find it, see if this thing is a valid query response */ 1181 verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count); 1182 p = (struct pending*)rbtree_search(outnet->pending, &key); 1183 if(!p) { 1184 verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped."); 1185 log_buf(VERB_ALGO, "dropped message", c->buffer); 1186 outnet->unwanted_replies++; 1187 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1188 >= outnet->unwanted_threshold) { 1189 log_warn("unwanted reply total reached threshold (%u)" 1190 " you may be under attack." 1191 " defensive action: clearing the cache", 1192 (unsigned)outnet->unwanted_threshold); 1193 fptr_ok(fptr_whitelist_alloc_cleanup( 1194 outnet->unwanted_action)); 1195 (*outnet->unwanted_action)(outnet->unwanted_param); 1196 outnet->unwanted_total = 0; 1197 } 1198 return 0; 1199 } 1200 1201 verbose(VERB_ALGO, "received udp reply."); 1202 log_buf(VERB_ALGO, "udp message", c->buffer); 1203 if(p->pc->cp != c) { 1204 verbose(VERB_QUERY, "received reply id,addr on wrong port. " 1205 "dropped."); 1206 outnet->unwanted_replies++; 1207 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1208 >= outnet->unwanted_threshold) { 1209 log_warn("unwanted reply total reached threshold (%u)" 1210 " you may be under attack." 1211 " defensive action: clearing the cache", 1212 (unsigned)outnet->unwanted_threshold); 1213 fptr_ok(fptr_whitelist_alloc_cleanup( 1214 outnet->unwanted_action)); 1215 (*outnet->unwanted_action)(outnet->unwanted_param); 1216 outnet->unwanted_total = 0; 1217 } 1218 return 0; 1219 } 1220 comm_timer_disable(p->timer); 1221 verbose(VERB_ALGO, "outnet handle udp reply"); 1222 /* delete from tree first in case callback creates a retry */ 1223 (void)rbtree_delete(outnet->pending, p->node.key); 1224 if(p->cb) { 1225 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1226 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info); 1227 } 1228 portcomm_loweruse(outnet, p->pc); 1229 pending_delete(NULL, p); 1230 outnet_send_wait_udp(outnet); 1231 return 0; 1232 } 1233 1234 /** calculate number of ip4 and ip6 interfaces*/ 1235 static void 1236 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6, 1237 int* num_ip4, int* num_ip6) 1238 { 1239 int i; 1240 *num_ip4 = 0; 1241 *num_ip6 = 0; 1242 if(num_ifs <= 0) { 1243 if(do_ip4) 1244 *num_ip4 = 1; 1245 if(do_ip6) 1246 *num_ip6 = 1; 1247 return; 1248 } 1249 for(i=0; i<num_ifs; i++) 1250 { 1251 if(str_is_ip6(ifs[i])) { 1252 if(do_ip6) 1253 (*num_ip6)++; 1254 } else { 1255 if(do_ip4) 1256 (*num_ip4)++; 1257 } 1258 } 1259 1260 } 1261 1262 void 1263 pending_udp_timer_delay_cb(void* arg) 1264 { 1265 struct pending* p = (struct pending*)arg; 1266 struct outside_network* outnet = p->outnet; 1267 verbose(VERB_ALGO, "timeout udp with delay"); 1268 portcomm_loweruse(outnet, p->pc); 1269 pending_delete(outnet, p); 1270 outnet_send_wait_udp(outnet); 1271 } 1272 1273 void 1274 pending_udp_timer_cb(void *arg) 1275 { 1276 struct pending* p = (struct pending*)arg; 1277 struct outside_network* outnet = p->outnet; 1278 /* it timed out */ 1279 verbose(VERB_ALGO, "timeout udp"); 1280 if(p->cb) { 1281 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1282 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL); 1283 } 1284 /* if delayclose, keep port open for a longer time. 1285 * But if the udpwaitlist exists, then we are struggling to 1286 * keep up with demand for sockets, so do not wait, but service 1287 * the customer (customer service more important than portICMPs) */ 1288 if(outnet->delayclose && !outnet->udp_wait_first) { 1289 p->cb = NULL; 1290 p->timer->callback = &pending_udp_timer_delay_cb; 1291 comm_timer_set(p->timer, &outnet->delay_tv); 1292 return; 1293 } 1294 portcomm_loweruse(outnet, p->pc); 1295 pending_delete(outnet, p); 1296 outnet_send_wait_udp(outnet); 1297 } 1298 1299 /** create pending_tcp buffers */ 1300 static int 1301 create_pending_tcp(struct outside_network* outnet, size_t bufsize) 1302 { 1303 size_t i; 1304 if(outnet->num_tcp == 0) 1305 return 1; /* no tcp needed, nothing to do */ 1306 if(!(outnet->tcp_conns = (struct pending_tcp **)calloc( 1307 outnet->num_tcp, sizeof(struct pending_tcp*)))) 1308 return 0; 1309 for(i=0; i<outnet->num_tcp; i++) { 1310 if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1, 1311 sizeof(struct pending_tcp)))) 1312 return 0; 1313 outnet->tcp_conns[i]->next_free = outnet->tcp_free; 1314 outnet->tcp_free = outnet->tcp_conns[i]; 1315 outnet->tcp_conns[i]->c = comm_point_create_tcp_out( 1316 outnet->base, bufsize, outnet_tcp_cb, 1317 outnet->tcp_conns[i]); 1318 if(!outnet->tcp_conns[i]->c) 1319 return 0; 1320 } 1321 return 1; 1322 } 1323 1324 /** setup an outgoing interface, ready address */ 1325 static int setup_if(struct port_if* pif, const char* addrstr, 1326 int* avail, int numavail, size_t numfd) 1327 { 1328 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1329 pif->avail_total = numavail; 1330 pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int)); 1331 if(!pif->avail_ports) 1332 return 0; 1333 #endif 1334 if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) && 1335 !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT, 1336 &pif->addr, &pif->addrlen, &pif->pfxlen)) 1337 return 0; 1338 pif->maxout = (int)numfd; 1339 pif->inuse = 0; 1340 pif->out = (struct port_comm**)calloc(numfd, 1341 sizeof(struct port_comm*)); 1342 if(!pif->out) 1343 return 0; 1344 return 1; 1345 } 1346 1347 struct outside_network* 1348 outside_network_create(struct comm_base *base, size_t bufsize, 1349 size_t num_ports, char** ifs, int num_ifs, int do_ip4, 1350 int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra, 1351 struct ub_randstate* rnd, int use_caps_for_id, int* availports, 1352 int numavailports, size_t unwanted_threshold, int tcp_mss, 1353 void (*unwanted_action)(void*), void* unwanted_param, int do_udp, 1354 void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv, 1355 int udp_connect) 1356 { 1357 struct outside_network* outnet = (struct outside_network*) 1358 calloc(1, sizeof(struct outside_network)); 1359 size_t k; 1360 if(!outnet) { 1361 log_err("malloc failed"); 1362 return NULL; 1363 } 1364 comm_base_timept(base, &outnet->now_secs, &outnet->now_tv); 1365 outnet->base = base; 1366 outnet->num_tcp = num_tcp; 1367 outnet->num_tcp_outgoing = 0; 1368 outnet->infra = infra; 1369 outnet->rnd = rnd; 1370 outnet->sslctx = sslctx; 1371 outnet->tls_use_sni = tls_use_sni; 1372 #ifdef USE_DNSTAP 1373 outnet->dtenv = dtenv; 1374 #else 1375 (void)dtenv; 1376 #endif 1377 outnet->svcd_overhead = 0; 1378 outnet->want_to_quit = 0; 1379 outnet->unwanted_threshold = unwanted_threshold; 1380 outnet->unwanted_action = unwanted_action; 1381 outnet->unwanted_param = unwanted_param; 1382 outnet->use_caps_for_id = use_caps_for_id; 1383 outnet->do_udp = do_udp; 1384 outnet->tcp_mss = tcp_mss; 1385 outnet->ip_dscp = dscp; 1386 #ifndef S_SPLINT_S 1387 if(delayclose) { 1388 outnet->delayclose = 1; 1389 outnet->delay_tv.tv_sec = delayclose/1000; 1390 outnet->delay_tv.tv_usec = (delayclose%1000)*1000; 1391 } 1392 #endif 1393 if(udp_connect) { 1394 outnet->udp_connect = 1; 1395 } 1396 if(numavailports == 0 || num_ports == 0) { 1397 log_err("no outgoing ports available"); 1398 outside_network_delete(outnet); 1399 return NULL; 1400 } 1401 #ifndef INET6 1402 do_ip6 = 0; 1403 #endif 1404 calc_num46(ifs, num_ifs, do_ip4, do_ip6, 1405 &outnet->num_ip4, &outnet->num_ip6); 1406 if(outnet->num_ip4 != 0) { 1407 if(!(outnet->ip4_ifs = (struct port_if*)calloc( 1408 (size_t)outnet->num_ip4, sizeof(struct port_if)))) { 1409 log_err("malloc failed"); 1410 outside_network_delete(outnet); 1411 return NULL; 1412 } 1413 } 1414 if(outnet->num_ip6 != 0) { 1415 if(!(outnet->ip6_ifs = (struct port_if*)calloc( 1416 (size_t)outnet->num_ip6, sizeof(struct port_if)))) { 1417 log_err("malloc failed"); 1418 outside_network_delete(outnet); 1419 return NULL; 1420 } 1421 } 1422 if( !(outnet->udp_buff = sldns_buffer_new(bufsize)) || 1423 !(outnet->pending = rbtree_create(pending_cmp)) || 1424 !(outnet->serviced = rbtree_create(serviced_cmp)) || 1425 !create_pending_tcp(outnet, bufsize)) { 1426 log_err("malloc failed"); 1427 outside_network_delete(outnet); 1428 return NULL; 1429 } 1430 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1431 outnet->tcp_reuse_max = num_tcp; 1432 1433 /* allocate commpoints */ 1434 for(k=0; k<num_ports; k++) { 1435 struct port_comm* pc; 1436 pc = (struct port_comm*)calloc(1, sizeof(*pc)); 1437 if(!pc) { 1438 log_err("malloc failed"); 1439 outside_network_delete(outnet); 1440 return NULL; 1441 } 1442 pc->cp = comm_point_create_udp(outnet->base, -1, 1443 outnet->udp_buff, outnet_udp_cb, outnet); 1444 if(!pc->cp) { 1445 log_err("malloc failed"); 1446 free(pc); 1447 outside_network_delete(outnet); 1448 return NULL; 1449 } 1450 pc->next = outnet->unused_fds; 1451 outnet->unused_fds = pc; 1452 } 1453 1454 /* allocate interfaces */ 1455 if(num_ifs == 0) { 1456 if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0", 1457 availports, numavailports, num_ports)) { 1458 log_err("malloc failed"); 1459 outside_network_delete(outnet); 1460 return NULL; 1461 } 1462 if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::", 1463 availports, numavailports, num_ports)) { 1464 log_err("malloc failed"); 1465 outside_network_delete(outnet); 1466 return NULL; 1467 } 1468 } else { 1469 size_t done_4 = 0, done_6 = 0; 1470 int i; 1471 for(i=0; i<num_ifs; i++) { 1472 if(str_is_ip6(ifs[i]) && do_ip6) { 1473 if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i], 1474 availports, numavailports, num_ports)){ 1475 log_err("malloc failed"); 1476 outside_network_delete(outnet); 1477 return NULL; 1478 } 1479 done_6++; 1480 } 1481 if(!str_is_ip6(ifs[i]) && do_ip4) { 1482 if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i], 1483 availports, numavailports, num_ports)){ 1484 log_err("malloc failed"); 1485 outside_network_delete(outnet); 1486 return NULL; 1487 } 1488 done_4++; 1489 } 1490 } 1491 } 1492 return outnet; 1493 } 1494 1495 /** helper pending delete */ 1496 static void 1497 pending_node_del(rbnode_type* node, void* arg) 1498 { 1499 struct pending* pend = (struct pending*)node; 1500 struct outside_network* outnet = (struct outside_network*)arg; 1501 pending_delete(outnet, pend); 1502 } 1503 1504 /** helper serviced delete */ 1505 static void 1506 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg)) 1507 { 1508 struct serviced_query* sq = (struct serviced_query*)node; 1509 struct service_callback* p = sq->cblist, *np; 1510 free(sq->qbuf); 1511 free(sq->zone); 1512 free(sq->tls_auth_name); 1513 edns_opt_list_free(sq->opt_list); 1514 while(p) { 1515 np = p->next; 1516 free(p); 1517 p = np; 1518 } 1519 free(sq); 1520 } 1521 1522 void 1523 outside_network_quit_prepare(struct outside_network* outnet) 1524 { 1525 if(!outnet) 1526 return; 1527 /* prevent queued items from being sent */ 1528 outnet->want_to_quit = 1; 1529 } 1530 1531 void 1532 outside_network_delete(struct outside_network* outnet) 1533 { 1534 if(!outnet) 1535 return; 1536 outnet->want_to_quit = 1; 1537 /* check every element, since we can be called on malloc error */ 1538 if(outnet->pending) { 1539 /* free pending elements, but do no unlink from tree. */ 1540 traverse_postorder(outnet->pending, pending_node_del, NULL); 1541 free(outnet->pending); 1542 } 1543 if(outnet->serviced) { 1544 traverse_postorder(outnet->serviced, serviced_node_del, NULL); 1545 free(outnet->serviced); 1546 } 1547 if(outnet->udp_buff) 1548 sldns_buffer_free(outnet->udp_buff); 1549 if(outnet->unused_fds) { 1550 struct port_comm* p = outnet->unused_fds, *np; 1551 while(p) { 1552 np = p->next; 1553 comm_point_delete(p->cp); 1554 free(p); 1555 p = np; 1556 } 1557 outnet->unused_fds = NULL; 1558 } 1559 if(outnet->ip4_ifs) { 1560 int i, k; 1561 for(i=0; i<outnet->num_ip4; i++) { 1562 for(k=0; k<outnet->ip4_ifs[i].inuse; k++) { 1563 struct port_comm* pc = outnet->ip4_ifs[i]. 1564 out[k]; 1565 comm_point_delete(pc->cp); 1566 free(pc); 1567 } 1568 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1569 free(outnet->ip4_ifs[i].avail_ports); 1570 #endif 1571 free(outnet->ip4_ifs[i].out); 1572 } 1573 free(outnet->ip4_ifs); 1574 } 1575 if(outnet->ip6_ifs) { 1576 int i, k; 1577 for(i=0; i<outnet->num_ip6; i++) { 1578 for(k=0; k<outnet->ip6_ifs[i].inuse; k++) { 1579 struct port_comm* pc = outnet->ip6_ifs[i]. 1580 out[k]; 1581 comm_point_delete(pc->cp); 1582 free(pc); 1583 } 1584 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1585 free(outnet->ip6_ifs[i].avail_ports); 1586 #endif 1587 free(outnet->ip6_ifs[i].out); 1588 } 1589 free(outnet->ip6_ifs); 1590 } 1591 if(outnet->tcp_conns) { 1592 size_t i; 1593 for(i=0; i<outnet->num_tcp; i++) 1594 if(outnet->tcp_conns[i]) { 1595 if(outnet->tcp_conns[i]->query && 1596 !outnet->tcp_conns[i]->query-> 1597 on_tcp_waiting_list) { 1598 /* delete waiting_tcp elements that 1599 * the tcp conn is working on */ 1600 struct pending_tcp* pend = 1601 (struct pending_tcp*)outnet-> 1602 tcp_conns[i]->query-> 1603 next_waiting; 1604 decommission_pending_tcp(outnet, pend); 1605 } 1606 comm_point_delete(outnet->tcp_conns[i]->c); 1607 waiting_tcp_delete(outnet->tcp_conns[i]->query); 1608 free(outnet->tcp_conns[i]); 1609 } 1610 free(outnet->tcp_conns); 1611 } 1612 if(outnet->tcp_wait_first) { 1613 struct waiting_tcp* p = outnet->tcp_wait_first, *np; 1614 while(p) { 1615 np = p->next_waiting; 1616 waiting_tcp_delete(p); 1617 p = np; 1618 } 1619 } 1620 /* was allocated in struct pending that was deleted above */ 1621 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1622 outnet->tcp_reuse_first = NULL; 1623 outnet->tcp_reuse_last = NULL; 1624 if(outnet->udp_wait_first) { 1625 struct pending* p = outnet->udp_wait_first, *np; 1626 while(p) { 1627 np = p->next_waiting; 1628 pending_delete(NULL, p); 1629 p = np; 1630 } 1631 } 1632 free(outnet); 1633 } 1634 1635 void 1636 pending_delete(struct outside_network* outnet, struct pending* p) 1637 { 1638 if(!p) 1639 return; 1640 if(outnet && outnet->udp_wait_first && 1641 (p->next_waiting || p == outnet->udp_wait_last) ) { 1642 /* delete from waiting list, if it is in the waiting list */ 1643 struct pending* prev = NULL, *x = outnet->udp_wait_first; 1644 while(x && x != p) { 1645 prev = x; 1646 x = x->next_waiting; 1647 } 1648 if(x) { 1649 log_assert(x == p); 1650 if(prev) 1651 prev->next_waiting = p->next_waiting; 1652 else outnet->udp_wait_first = p->next_waiting; 1653 if(outnet->udp_wait_last == p) 1654 outnet->udp_wait_last = prev; 1655 } 1656 } 1657 if(outnet) { 1658 (void)rbtree_delete(outnet->pending, p->node.key); 1659 } 1660 if(p->timer) 1661 comm_timer_delete(p->timer); 1662 free(p->pkt); 1663 free(p); 1664 } 1665 1666 static void 1667 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd) 1668 { 1669 int i, last; 1670 if(!(pfxlen > 0 && pfxlen < 128)) 1671 return; 1672 for(i = 0; i < (128 - pfxlen) / 8; i++) { 1673 sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256); 1674 } 1675 last = pfxlen & 7; 1676 if(last != 0) { 1677 sa->sin6_addr.s6_addr[15-i] |= 1678 ((0xFF >> last) & ub_random_max(rnd, 256)); 1679 } 1680 } 1681 1682 /** 1683 * Try to open a UDP socket for outgoing communication. 1684 * Sets sockets options as needed. 1685 * @param addr: socket address. 1686 * @param addrlen: length of address. 1687 * @param pfxlen: length of network prefix (for address randomisation). 1688 * @param port: port override for addr. 1689 * @param inuse: if -1 is returned, this bool means the port was in use. 1690 * @param rnd: random state (for address randomisation). 1691 * @param dscp: DSCP to use. 1692 * @return fd or -1 1693 */ 1694 static int 1695 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen, 1696 int port, int* inuse, struct ub_randstate* rnd, int dscp) 1697 { 1698 int fd, noproto; 1699 if(addr_is_ip6(addr, addrlen)) { 1700 int freebind = 0; 1701 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 1702 sa.sin6_port = (in_port_t)htons((uint16_t)port); 1703 sa.sin6_flowinfo = 0; 1704 sa.sin6_scope_id = 0; 1705 if(pfxlen != 0) { 1706 freebind = 1; 1707 sai6_putrandom(&sa, pfxlen, rnd); 1708 } 1709 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 1710 (struct sockaddr*)&sa, addrlen, 1, inuse, &noproto, 1711 0, 0, 0, NULL, 0, freebind, 0, dscp); 1712 } else { 1713 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 1714 sa->sin_port = (in_port_t)htons((uint16_t)port); 1715 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 1716 (struct sockaddr*)addr, addrlen, 1, inuse, &noproto, 1717 0, 0, 0, NULL, 0, 0, 0, dscp); 1718 } 1719 return fd; 1720 } 1721 1722 /** Select random ID */ 1723 static int 1724 select_id(struct outside_network* outnet, struct pending* pend, 1725 sldns_buffer* packet) 1726 { 1727 int id_tries = 0; 1728 pend->id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 1729 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 1730 1731 /* insert in tree */ 1732 pend->node.key = pend; 1733 while(!rbtree_insert(outnet->pending, &pend->node)) { 1734 /* change ID to avoid collision */ 1735 pend->id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 1736 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 1737 id_tries++; 1738 if(id_tries == MAX_ID_RETRY) { 1739 pend->id=99999; /* non existant ID */ 1740 log_err("failed to generate unique ID, drop msg"); 1741 return 0; 1742 } 1743 } 1744 verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id); 1745 return 1; 1746 } 1747 1748 /** Select random interface and port */ 1749 static int 1750 select_ifport(struct outside_network* outnet, struct pending* pend, 1751 int num_if, struct port_if* ifs) 1752 { 1753 int my_if, my_port, fd, portno, inuse, tries=0; 1754 struct port_if* pif; 1755 /* randomly select interface and port */ 1756 if(num_if == 0) { 1757 verbose(VERB_QUERY, "Need to send query but have no " 1758 "outgoing interfaces of that family"); 1759 return 0; 1760 } 1761 log_assert(outnet->unused_fds); 1762 tries = 0; 1763 while(1) { 1764 my_if = ub_random_max(outnet->rnd, num_if); 1765 pif = &ifs[my_if]; 1766 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1767 if(outnet->udp_connect) { 1768 /* if we connect() we cannot reuse fds for a port */ 1769 if(pif->inuse >= pif->avail_total) { 1770 tries++; 1771 if(tries < MAX_PORT_RETRY) 1772 continue; 1773 log_err("failed to find an open port, drop msg"); 1774 return 0; 1775 } 1776 my_port = pif->inuse + ub_random_max(outnet->rnd, 1777 pif->avail_total - pif->inuse); 1778 } else { 1779 my_port = ub_random_max(outnet->rnd, pif->avail_total); 1780 if(my_port < pif->inuse) { 1781 /* port already open */ 1782 pend->pc = pif->out[my_port]; 1783 verbose(VERB_ALGO, "using UDP if=%d port=%d", 1784 my_if, pend->pc->number); 1785 break; 1786 } 1787 } 1788 /* try to open new port, if fails, loop to try again */ 1789 log_assert(pif->inuse < pif->maxout); 1790 portno = pif->avail_ports[my_port - pif->inuse]; 1791 #else 1792 my_port = portno = 0; 1793 #endif 1794 fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen, 1795 portno, &inuse, outnet->rnd, outnet->ip_dscp); 1796 if(fd == -1 && !inuse) { 1797 /* nonrecoverable error making socket */ 1798 return 0; 1799 } 1800 if(fd != -1) { 1801 verbose(VERB_ALGO, "opened UDP if=%d port=%d", 1802 my_if, portno); 1803 if(outnet->udp_connect) { 1804 /* connect() to the destination */ 1805 if(connect(fd, (struct sockaddr*)&pend->addr, 1806 pend->addrlen) < 0) { 1807 log_err_addr("udp connect failed", 1808 strerror(errno), &pend->addr, 1809 pend->addrlen); 1810 sock_close(fd); 1811 return 0; 1812 } 1813 } 1814 /* grab fd */ 1815 pend->pc = outnet->unused_fds; 1816 outnet->unused_fds = pend->pc->next; 1817 1818 /* setup portcomm */ 1819 pend->pc->next = NULL; 1820 pend->pc->number = portno; 1821 pend->pc->pif = pif; 1822 pend->pc->index = pif->inuse; 1823 pend->pc->num_outstanding = 0; 1824 comm_point_start_listening(pend->pc->cp, fd, -1); 1825 1826 /* grab port in interface */ 1827 pif->out[pif->inuse] = pend->pc; 1828 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1829 pif->avail_ports[my_port - pif->inuse] = 1830 pif->avail_ports[pif->avail_total-pif->inuse-1]; 1831 #endif 1832 pif->inuse++; 1833 break; 1834 } 1835 /* failed, already in use */ 1836 verbose(VERB_QUERY, "port %d in use, trying another", portno); 1837 tries++; 1838 if(tries == MAX_PORT_RETRY) { 1839 log_err("failed to find an open port, drop msg"); 1840 return 0; 1841 } 1842 } 1843 log_assert(pend->pc); 1844 pend->pc->num_outstanding++; 1845 1846 return 1; 1847 } 1848 1849 static int 1850 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout) 1851 { 1852 struct timeval tv; 1853 struct outside_network* outnet = pend->sq->outnet; 1854 1855 /* select id */ 1856 if(!select_id(outnet, pend, packet)) { 1857 return 0; 1858 } 1859 1860 /* select src_if, port */ 1861 if(addr_is_ip6(&pend->addr, pend->addrlen)) { 1862 if(!select_ifport(outnet, pend, 1863 outnet->num_ip6, outnet->ip6_ifs)) 1864 return 0; 1865 } else { 1866 if(!select_ifport(outnet, pend, 1867 outnet->num_ip4, outnet->ip4_ifs)) 1868 return 0; 1869 } 1870 log_assert(pend->pc && pend->pc->cp); 1871 1872 /* send it over the commlink */ 1873 if(!comm_point_send_udp_msg(pend->pc->cp, packet, 1874 (struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) { 1875 portcomm_loweruse(outnet, pend->pc); 1876 return 0; 1877 } 1878 1879 /* system calls to set timeout after sending UDP to make roundtrip 1880 smaller. */ 1881 #ifndef S_SPLINT_S 1882 tv.tv_sec = timeout/1000; 1883 tv.tv_usec = (timeout%1000)*1000; 1884 #endif 1885 comm_timer_set(pend->timer, &tv); 1886 1887 #ifdef USE_DNSTAP 1888 if(outnet->dtenv && 1889 (outnet->dtenv->log_resolver_query_messages || 1890 outnet->dtenv->log_forwarder_query_messages)) 1891 dt_msg_send_outside_query(outnet->dtenv, &pend->addr, comm_udp, 1892 pend->sq->zone, pend->sq->zonelen, packet); 1893 #endif 1894 return 1; 1895 } 1896 1897 struct pending* 1898 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet, 1899 int timeout, comm_point_callback_type* cb, void* cb_arg) 1900 { 1901 struct pending* pend = (struct pending*)calloc(1, sizeof(*pend)); 1902 if(!pend) return NULL; 1903 pend->outnet = sq->outnet; 1904 pend->sq = sq; 1905 pend->addrlen = sq->addrlen; 1906 memmove(&pend->addr, &sq->addr, sq->addrlen); 1907 pend->cb = cb; 1908 pend->cb_arg = cb_arg; 1909 pend->node.key = pend; 1910 pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb, 1911 pend); 1912 if(!pend->timer) { 1913 free(pend); 1914 return NULL; 1915 } 1916 1917 if(sq->outnet->unused_fds == NULL) { 1918 /* no unused fd, cannot create a new port (randomly) */ 1919 verbose(VERB_ALGO, "no fds available, udp query waiting"); 1920 pend->timeout = timeout; 1921 pend->pkt_len = sldns_buffer_limit(packet); 1922 pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet), 1923 pend->pkt_len); 1924 if(!pend->pkt) { 1925 comm_timer_delete(pend->timer); 1926 free(pend); 1927 return NULL; 1928 } 1929 /* put at end of waiting list */ 1930 if(sq->outnet->udp_wait_last) 1931 sq->outnet->udp_wait_last->next_waiting = pend; 1932 else 1933 sq->outnet->udp_wait_first = pend; 1934 sq->outnet->udp_wait_last = pend; 1935 return pend; 1936 } 1937 if(!randomize_and_send_udp(pend, packet, timeout)) { 1938 pending_delete(sq->outnet, pend); 1939 return NULL; 1940 } 1941 return pend; 1942 } 1943 1944 void 1945 outnet_tcptimer(void* arg) 1946 { 1947 struct waiting_tcp* w = (struct waiting_tcp*)arg; 1948 struct outside_network* outnet = w->outnet; 1949 verbose(VERB_CLIENT, "outnet_tcptimer"); 1950 if(w->on_tcp_waiting_list) { 1951 /* it is on the waiting list */ 1952 waiting_list_remove(outnet, w); 1953 waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL); 1954 waiting_tcp_delete(w); 1955 } else { 1956 /* it was in use */ 1957 struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting; 1958 reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT); 1959 } 1960 use_free_buffer(outnet); 1961 } 1962 1963 /** close the oldest reuse_tcp connection to make a fd and struct pend 1964 * available for a new stream connection */ 1965 static void 1966 reuse_tcp_close_oldest(struct outside_network* outnet) 1967 { 1968 struct pending_tcp* pend; 1969 verbose(VERB_CLIENT, "reuse_tcp_close_oldest"); 1970 if(!outnet->tcp_reuse_last) return; 1971 pend = outnet->tcp_reuse_last->pending; 1972 1973 /* snip off of LRU */ 1974 log_assert(pend->reuse.lru_next == NULL); 1975 if(pend->reuse.lru_prev) { 1976 outnet->tcp_reuse_last = pend->reuse.lru_prev; 1977 pend->reuse.lru_prev->lru_next = NULL; 1978 } else { 1979 outnet->tcp_reuse_last = NULL; 1980 outnet->tcp_reuse_first = NULL; 1981 } 1982 pend->reuse.item_on_lru_list = 0; 1983 1984 /* free up */ 1985 reuse_cb_and_decommission(outnet, pend, NETEVENT_CLOSED); 1986 } 1987 1988 /** find spare ID value for reuse tcp stream. That is random and also does 1989 * not collide with an existing query ID that is in use or waiting */ 1990 uint16_t 1991 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet) 1992 { 1993 uint16_t id = 0, curid, nextid; 1994 const int try_random = 2000; 1995 int i; 1996 unsigned select, count, space; 1997 rbnode_type* node; 1998 1999 /* make really sure the tree is not empty */ 2000 if(reuse->tree_by_id.count == 0) { 2001 id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 2002 return id; 2003 } 2004 2005 /* try to find random empty spots by picking them */ 2006 for(i = 0; i<try_random; i++) { 2007 id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 2008 if(!reuse_tcp_by_id_find(reuse, id)) { 2009 return id; 2010 } 2011 } 2012 2013 /* equally pick a random unused element from the tree that is 2014 * not in use. Pick a the n-th index of an ununused number, 2015 * then loop over the empty spaces in the tree and find it */ 2016 log_assert(reuse->tree_by_id.count < 0xffff); 2017 select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count); 2018 /* select value now in 0 .. num free - 1 */ 2019 2020 count = 0; /* number of free spaces passed by */ 2021 node = rbtree_first(&reuse->tree_by_id); 2022 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2023 /* see if select is before first node */ 2024 if(select < tree_by_id_get_id(node)) 2025 return select; 2026 count += tree_by_id_get_id(node); 2027 /* perhaps select is between nodes */ 2028 while(node && node != RBTREE_NULL) { 2029 rbnode_type* next = rbtree_next(node); 2030 if(next && next != RBTREE_NULL) { 2031 curid = tree_by_id_get_id(node); 2032 nextid = tree_by_id_get_id(next); 2033 log_assert(curid < nextid); 2034 if(curid != 0xffff && curid + 1 < nextid) { 2035 /* space between nodes */ 2036 space = nextid - curid - 1; 2037 log_assert(select >= count); 2038 if(select < count + space) { 2039 /* here it is */ 2040 return curid + 1 + (select - count); 2041 } 2042 count += space; 2043 } 2044 } 2045 node = next; 2046 } 2047 2048 /* select is after the last node */ 2049 /* count is the number of free positions before the nodes in the 2050 * tree */ 2051 node = rbtree_last(&reuse->tree_by_id); 2052 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2053 curid = tree_by_id_get_id(node); 2054 log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff); 2055 return curid + 1 + (select - count); 2056 } 2057 2058 struct waiting_tcp* 2059 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet, 2060 int timeout, comm_point_callback_type* callback, void* callback_arg) 2061 { 2062 struct pending_tcp* pend = sq->outnet->tcp_free; 2063 struct reuse_tcp* reuse = NULL; 2064 struct waiting_tcp* w; 2065 2066 verbose(VERB_CLIENT, "pending_tcp_query"); 2067 if(sldns_buffer_limit(packet) < sizeof(uint16_t)) { 2068 verbose(VERB_ALGO, "pending tcp query with too short buffer < 2"); 2069 return NULL; 2070 } 2071 2072 /* find out if a reused stream to the target exists */ 2073 /* if so, take it into use */ 2074 reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen, 2075 sq->ssl_upstream); 2076 if(reuse) { 2077 log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse); 2078 log_assert(reuse->pending); 2079 pend = reuse->pending; 2080 reuse_tcp_lru_touch(sq->outnet, reuse); 2081 } 2082 2083 /* if !pend but we have reuse streams, close a reuse stream 2084 * to be able to open a new one to this target, no use waiting 2085 * to reuse a file descriptor while another query needs to use 2086 * that buffer and file descriptor now. */ 2087 if(!pend) { 2088 reuse_tcp_close_oldest(sq->outnet); 2089 pend = sq->outnet->tcp_free; 2090 } 2091 2092 /* allocate space to store query */ 2093 w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp) 2094 + sldns_buffer_limit(packet)); 2095 if(!w) { 2096 return NULL; 2097 } 2098 if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) { 2099 free(w); 2100 return NULL; 2101 } 2102 w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp); 2103 w->pkt_len = sldns_buffer_limit(packet); 2104 memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len); 2105 if(reuse) 2106 w->id = reuse_tcp_select_id(reuse, sq->outnet); 2107 else w->id = ((unsigned)ub_random(sq->outnet->rnd)>>8) & 0xffff; 2108 LDNS_ID_SET(w->pkt, w->id); 2109 memcpy(&w->addr, &sq->addr, sq->addrlen); 2110 w->addrlen = sq->addrlen; 2111 w->outnet = sq->outnet; 2112 w->on_tcp_waiting_list = 0; 2113 w->next_waiting = NULL; 2114 w->cb = callback; 2115 w->cb_arg = callback_arg; 2116 w->ssl_upstream = sq->ssl_upstream; 2117 w->tls_auth_name = sq->tls_auth_name; 2118 w->timeout = timeout; 2119 w->id_node.key = NULL; 2120 w->write_wait_prev = NULL; 2121 w->write_wait_next = NULL; 2122 w->write_wait_queued = 0; 2123 w->error_count = 0; 2124 if(pend) { 2125 /* we have a buffer available right now */ 2126 if(reuse) { 2127 /* reuse existing fd, write query and continue */ 2128 /* store query in tree by id */ 2129 verbose(VERB_CLIENT, "pending_tcp_query: reuse, store"); 2130 w->next_waiting = (void*)pend; 2131 reuse_tree_by_id_insert(&pend->reuse, w); 2132 /* can we write right now? */ 2133 if(pend->query == NULL) { 2134 /* write straight away */ 2135 /* stop the timer on read of the fd */ 2136 comm_point_stop_listening(pend->c); 2137 pend->query = w; 2138 outnet_tcp_take_query_setup(pend->c->fd, pend, 2139 w); 2140 } else { 2141 /* put it in the waiting list for 2142 * this stream */ 2143 reuse_write_wait_push_back(&pend->reuse, w); 2144 } 2145 } else { 2146 /* create new fd and connect to addr, setup to 2147 * write query */ 2148 verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect"); 2149 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 2150 pend->reuse.pending = pend; 2151 memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen); 2152 pend->reuse.addrlen = sq->addrlen; 2153 if(!outnet_tcp_take_into_use(w)) { 2154 waiting_tcp_delete(w); 2155 return NULL; 2156 } 2157 } 2158 } else { 2159 /* queue up */ 2160 /* waiting for a buffer on the outside network buffer wait 2161 * list */ 2162 verbose(VERB_CLIENT, "pending_tcp_query: queue to wait"); 2163 outnet_add_tcp_waiting(sq->outnet, w); 2164 } 2165 #ifdef USE_DNSTAP 2166 if(sq->outnet->dtenv && 2167 (sq->outnet->dtenv->log_resolver_query_messages || 2168 sq->outnet->dtenv->log_forwarder_query_messages)) 2169 dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr, 2170 comm_tcp, sq->zone, sq->zonelen, packet); 2171 #endif 2172 return w; 2173 } 2174 2175 /** create query for serviced queries */ 2176 static void 2177 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen, 2178 uint16_t qtype, uint16_t qclass, uint16_t flags) 2179 { 2180 sldns_buffer_clear(buff); 2181 /* skip id */ 2182 sldns_buffer_write_u16(buff, flags); 2183 sldns_buffer_write_u16(buff, 1); /* qdcount */ 2184 sldns_buffer_write_u16(buff, 0); /* ancount */ 2185 sldns_buffer_write_u16(buff, 0); /* nscount */ 2186 sldns_buffer_write_u16(buff, 0); /* arcount */ 2187 sldns_buffer_write(buff, qname, qnamelen); 2188 sldns_buffer_write_u16(buff, qtype); 2189 sldns_buffer_write_u16(buff, qclass); 2190 sldns_buffer_flip(buff); 2191 } 2192 2193 /** lookup serviced query in serviced query rbtree */ 2194 static struct serviced_query* 2195 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2196 struct sockaddr_storage* addr, socklen_t addrlen, 2197 struct edns_option* opt_list) 2198 { 2199 struct serviced_query key; 2200 key.node.key = &key; 2201 key.qbuf = sldns_buffer_begin(buff); 2202 key.qbuflen = sldns_buffer_limit(buff); 2203 key.dnssec = dnssec; 2204 memcpy(&key.addr, addr, addrlen); 2205 key.addrlen = addrlen; 2206 key.outnet = outnet; 2207 key.opt_list = opt_list; 2208 return (struct serviced_query*)rbtree_search(outnet->serviced, &key); 2209 } 2210 2211 /** Create new serviced entry */ 2212 static struct serviced_query* 2213 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2214 int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream, 2215 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen, 2216 uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list) 2217 { 2218 struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq)); 2219 #ifdef UNBOUND_DEBUG 2220 rbnode_type* ins; 2221 #endif 2222 if(!sq) 2223 return NULL; 2224 sq->node.key = sq; 2225 sq->qbuf = memdup(sldns_buffer_begin(buff), sldns_buffer_limit(buff)); 2226 if(!sq->qbuf) { 2227 free(sq); 2228 return NULL; 2229 } 2230 sq->qbuflen = sldns_buffer_limit(buff); 2231 sq->zone = memdup(zone, zonelen); 2232 if(!sq->zone) { 2233 free(sq->qbuf); 2234 free(sq); 2235 return NULL; 2236 } 2237 sq->zonelen = zonelen; 2238 sq->qtype = qtype; 2239 sq->dnssec = dnssec; 2240 sq->want_dnssec = want_dnssec; 2241 sq->nocaps = nocaps; 2242 sq->tcp_upstream = tcp_upstream; 2243 sq->ssl_upstream = ssl_upstream; 2244 if(tls_auth_name) { 2245 sq->tls_auth_name = strdup(tls_auth_name); 2246 if(!sq->tls_auth_name) { 2247 free(sq->zone); 2248 free(sq->qbuf); 2249 free(sq); 2250 return NULL; 2251 } 2252 } else { 2253 sq->tls_auth_name = NULL; 2254 } 2255 memcpy(&sq->addr, addr, addrlen); 2256 sq->addrlen = addrlen; 2257 sq->opt_list = NULL; 2258 if(opt_list) { 2259 sq->opt_list = edns_opt_copy_alloc(opt_list); 2260 if(!sq->opt_list) { 2261 free(sq->tls_auth_name); 2262 free(sq->zone); 2263 free(sq->qbuf); 2264 free(sq); 2265 return NULL; 2266 } 2267 } 2268 sq->outnet = outnet; 2269 sq->cblist = NULL; 2270 sq->pending = NULL; 2271 sq->status = serviced_initial; 2272 sq->retry = 0; 2273 sq->to_be_deleted = 0; 2274 #ifdef UNBOUND_DEBUG 2275 ins = 2276 #else 2277 (void) 2278 #endif 2279 rbtree_insert(outnet->serviced, &sq->node); 2280 log_assert(ins != NULL); /* must not be already present */ 2281 return sq; 2282 } 2283 2284 /** remove waiting tcp from the outnet waiting list */ 2285 static void 2286 waiting_list_remove(struct outside_network* outnet, struct waiting_tcp* w) 2287 { 2288 struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL; 2289 w->on_tcp_waiting_list = 0; 2290 while(p) { 2291 if(p == w) { 2292 /* remove w */ 2293 if(prev) 2294 prev->next_waiting = w->next_waiting; 2295 else outnet->tcp_wait_first = w->next_waiting; 2296 if(outnet->tcp_wait_last == w) 2297 outnet->tcp_wait_last = prev; 2298 return; 2299 } 2300 prev = p; 2301 p = p->next_waiting; 2302 } 2303 } 2304 2305 /** reuse tcp stream, remove serviced query from stream, 2306 * return true if the stream is kept, false if it is to be closed */ 2307 static int 2308 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w, 2309 struct serviced_query* sq) 2310 { 2311 struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting; 2312 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep"); 2313 /* remove the callback. let query continue to write to not cancel 2314 * the stream itself. also keep it as an entry in the tree_by_id, 2315 * in case the answer returns (that we no longer want), but we cannot 2316 * pick the same ID number meanwhile */ 2317 w->cb = NULL; 2318 /* see if can be entered in reuse tree 2319 * for that the FD has to be non-1 */ 2320 if(pend_tcp->c->fd == -1) { 2321 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd"); 2322 return 0; 2323 } 2324 /* if in tree and used by other queries */ 2325 if(pend_tcp->reuse.node.key) { 2326 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries"); 2327 /* do not reset the keepalive timer, for that 2328 * we'd need traffic, and this is where the serviced is 2329 * removed due to state machine internal reasons, 2330 * eg. iterator no longer interested in this query */ 2331 return 1; 2332 } 2333 /* if still open and want to keep it open */ 2334 if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count < 2335 sq->outnet->tcp_reuse_max) { 2336 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open"); 2337 /* set a keepalive timer on it */ 2338 if(!reuse_tcp_insert(sq->outnet, pend_tcp)) { 2339 return 0; 2340 } 2341 reuse_tcp_setup_timeout(pend_tcp); 2342 return 1; 2343 } 2344 return 0; 2345 } 2346 2347 /** cleanup serviced query entry */ 2348 static void 2349 serviced_delete(struct serviced_query* sq) 2350 { 2351 verbose(VERB_CLIENT, "serviced_delete"); 2352 if(sq->pending) { 2353 /* clear up the pending query */ 2354 if(sq->status == serviced_query_UDP_EDNS || 2355 sq->status == serviced_query_UDP || 2356 sq->status == serviced_query_UDP_EDNS_FRAG || 2357 sq->status == serviced_query_UDP_EDNS_fallback) { 2358 struct pending* p = (struct pending*)sq->pending; 2359 verbose(VERB_CLIENT, "serviced_delete: UDP"); 2360 if(p->pc) 2361 portcomm_loweruse(sq->outnet, p->pc); 2362 pending_delete(sq->outnet, p); 2363 /* this call can cause reentrant calls back into the 2364 * mesh */ 2365 outnet_send_wait_udp(sq->outnet); 2366 } else { 2367 struct waiting_tcp* w = (struct waiting_tcp*) 2368 sq->pending; 2369 verbose(VERB_CLIENT, "serviced_delete: TCP"); 2370 /* if on stream-write-waiting list then 2371 * remove from waiting list and waiting_tcp_delete */ 2372 if(w->write_wait_queued) { 2373 struct pending_tcp* pend = 2374 (struct pending_tcp*)w->next_waiting; 2375 verbose(VERB_CLIENT, "serviced_delete: writewait"); 2376 reuse_tree_by_id_delete(&pend->reuse, w); 2377 reuse_write_wait_remove(&pend->reuse, w); 2378 waiting_tcp_delete(w); 2379 } else if(!w->on_tcp_waiting_list) { 2380 struct pending_tcp* pend = 2381 (struct pending_tcp*)w->next_waiting; 2382 verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep"); 2383 if(!reuse_tcp_remove_serviced_keep(w, sq)) { 2384 reuse_cb_and_decommission(sq->outnet, 2385 pend, NETEVENT_CLOSED); 2386 use_free_buffer(sq->outnet); 2387 } 2388 sq->pending = NULL; 2389 } else { 2390 verbose(VERB_CLIENT, "serviced_delete: tcpwait"); 2391 waiting_list_remove(sq->outnet, w); 2392 waiting_tcp_delete(w); 2393 } 2394 } 2395 } 2396 /* does not delete from tree, caller has to do that */ 2397 serviced_node_del(&sq->node, NULL); 2398 } 2399 2400 /** perturb a dname capitalization randomly */ 2401 static void 2402 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len) 2403 { 2404 uint8_t lablen; 2405 uint8_t* d = qbuf + 10; 2406 long int random = 0; 2407 int bits = 0; 2408 log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */); 2409 (void)len; 2410 lablen = *d++; 2411 while(lablen) { 2412 while(lablen--) { 2413 /* only perturb A-Z, a-z */ 2414 if(isalpha((unsigned char)*d)) { 2415 /* get a random bit */ 2416 if(bits == 0) { 2417 random = ub_random(rnd); 2418 bits = 30; 2419 } 2420 if(random & 0x1) { 2421 *d = (uint8_t)toupper((unsigned char)*d); 2422 } else { 2423 *d = (uint8_t)tolower((unsigned char)*d); 2424 } 2425 random >>= 1; 2426 bits--; 2427 } 2428 d++; 2429 } 2430 lablen = *d++; 2431 } 2432 if(verbosity >= VERB_ALGO) { 2433 char buf[LDNS_MAX_DOMAINLEN+1]; 2434 dname_str(qbuf+10, buf); 2435 verbose(VERB_ALGO, "qname perturbed to %s", buf); 2436 } 2437 } 2438 2439 /** put serviced query into a buffer */ 2440 static void 2441 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns) 2442 { 2443 /* if we are using 0x20 bits for ID randomness, perturb them */ 2444 if(sq->outnet->use_caps_for_id && !sq->nocaps) { 2445 serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen); 2446 } 2447 /* generate query */ 2448 sldns_buffer_clear(buff); 2449 sldns_buffer_write_u16(buff, 0); /* id placeholder */ 2450 sldns_buffer_write(buff, sq->qbuf, sq->qbuflen); 2451 sldns_buffer_flip(buff); 2452 if(with_edns) { 2453 /* add edns section */ 2454 struct edns_data edns; 2455 edns.edns_present = 1; 2456 edns.ext_rcode = 0; 2457 edns.edns_version = EDNS_ADVERTISED_VERSION; 2458 edns.opt_list = sq->opt_list; 2459 if(sq->status == serviced_query_UDP_EDNS_FRAG) { 2460 if(addr_is_ip6(&sq->addr, sq->addrlen)) { 2461 if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE) 2462 edns.udp_size = EDNS_FRAG_SIZE_IP6; 2463 else edns.udp_size = EDNS_ADVERTISED_SIZE; 2464 } else { 2465 if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE) 2466 edns.udp_size = EDNS_FRAG_SIZE_IP4; 2467 else edns.udp_size = EDNS_ADVERTISED_SIZE; 2468 } 2469 } else { 2470 edns.udp_size = EDNS_ADVERTISED_SIZE; 2471 } 2472 edns.bits = 0; 2473 if(sq->dnssec & EDNS_DO) 2474 edns.bits = EDNS_DO; 2475 if(sq->dnssec & BIT_CD) 2476 LDNS_CD_SET(sldns_buffer_begin(buff)); 2477 attach_edns_record(buff, &edns); 2478 } 2479 } 2480 2481 /** 2482 * Perform serviced query UDP sending operation. 2483 * Sends UDP with EDNS, unless infra host marked non EDNS. 2484 * @param sq: query to send. 2485 * @param buff: buffer scratch space. 2486 * @return 0 on error. 2487 */ 2488 static int 2489 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff) 2490 { 2491 int rtt, vs; 2492 uint8_t edns_lame_known; 2493 time_t now = *sq->outnet->now_secs; 2494 2495 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 2496 sq->zonelen, now, &vs, &edns_lame_known, &rtt)) 2497 return 0; 2498 sq->last_rtt = rtt; 2499 verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs); 2500 if(sq->status == serviced_initial) { 2501 if(vs != -1) { 2502 sq->status = serviced_query_UDP_EDNS; 2503 } else { 2504 sq->status = serviced_query_UDP; 2505 } 2506 } 2507 serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) || 2508 (sq->status == serviced_query_UDP_EDNS_FRAG)); 2509 sq->last_sent_time = *sq->outnet->now_tv; 2510 sq->edns_lame_known = (int)edns_lame_known; 2511 verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt); 2512 sq->pending = pending_udp_query(sq, buff, rtt, 2513 serviced_udp_callback, sq); 2514 if(!sq->pending) 2515 return 0; 2516 return 1; 2517 } 2518 2519 /** check that perturbed qname is identical */ 2520 static int 2521 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen) 2522 { 2523 uint8_t* d1 = sldns_buffer_begin(pkt)+12; 2524 uint8_t* d2 = qbuf+10; 2525 uint8_t len1, len2; 2526 int count = 0; 2527 if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */ 2528 return 0; 2529 log_assert(qbuflen >= 15 /* 10 header, root, type, class */); 2530 len1 = *d1++; 2531 len2 = *d2++; 2532 while(len1 != 0 || len2 != 0) { 2533 if(LABEL_IS_PTR(len1)) { 2534 /* check if we can read *d1 with compression ptr rest */ 2535 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2536 return 0; 2537 d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1); 2538 /* check if we can read the destination *d1 */ 2539 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2540 return 0; 2541 len1 = *d1++; 2542 if(count++ > MAX_COMPRESS_PTRS) 2543 return 0; 2544 continue; 2545 } 2546 if(d2 > qbuf+qbuflen) 2547 return 0; 2548 if(len1 != len2) 2549 return 0; 2550 if(len1 > LDNS_MAX_LABELLEN) 2551 return 0; 2552 /* check len1 + 1(next length) are okay to read */ 2553 if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2554 return 0; 2555 log_assert(len1 <= LDNS_MAX_LABELLEN); 2556 log_assert(len2 <= LDNS_MAX_LABELLEN); 2557 log_assert(len1 == len2 && len1 != 0); 2558 /* compare the labels - bitwise identical */ 2559 if(memcmp(d1, d2, len1) != 0) 2560 return 0; 2561 d1 += len1; 2562 d2 += len2; 2563 len1 = *d1++; 2564 len2 = *d2++; 2565 } 2566 return 1; 2567 } 2568 2569 /** call the callbacks for a serviced query */ 2570 static void 2571 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c, 2572 struct comm_reply* rep) 2573 { 2574 struct service_callback* p; 2575 int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/ 2576 uint8_t *backup_p = NULL; 2577 size_t backlen = 0; 2578 #ifdef UNBOUND_DEBUG 2579 rbnode_type* rem = 2580 #else 2581 (void) 2582 #endif 2583 /* remove from tree, and schedule for deletion, so that callbacks 2584 * can safely deregister themselves and even create new serviced 2585 * queries that are identical to this one. */ 2586 rbtree_delete(sq->outnet->serviced, sq); 2587 log_assert(rem); /* should have been present */ 2588 sq->to_be_deleted = 1; 2589 verbose(VERB_ALGO, "svcd callbacks start"); 2590 if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c && 2591 !sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) { 2592 /* for type PTR do not check perturbed name in answer, 2593 * compatibility with cisco dns guard boxes that mess up 2594 * reverse queries 0x20 contents */ 2595 /* noerror and nxdomain must have a qname in reply */ 2596 if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 && 2597 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2598 == LDNS_RCODE_NOERROR || 2599 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2600 == LDNS_RCODE_NXDOMAIN)) { 2601 verbose(VERB_DETAIL, "no qname in reply to check 0x20ID"); 2602 log_addr(VERB_DETAIL, "from server", 2603 &sq->addr, sq->addrlen); 2604 log_buf(VERB_DETAIL, "for packet", c->buffer); 2605 error = NETEVENT_CLOSED; 2606 c = NULL; 2607 } else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 && 2608 !serviced_check_qname(c->buffer, sq->qbuf, 2609 sq->qbuflen)) { 2610 verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname"); 2611 log_addr(VERB_DETAIL, "from server", 2612 &sq->addr, sq->addrlen); 2613 log_buf(VERB_DETAIL, "for packet", c->buffer); 2614 error = NETEVENT_CAPSFAIL; 2615 /* and cleanup too */ 2616 pkt_dname_tolower(c->buffer, 2617 sldns_buffer_at(c->buffer, 12)); 2618 } else { 2619 verbose(VERB_ALGO, "good 0x20-ID in reply qname"); 2620 /* cleanup caps, prettier cache contents. */ 2621 pkt_dname_tolower(c->buffer, 2622 sldns_buffer_at(c->buffer, 12)); 2623 } 2624 } 2625 if(dobackup && c) { 2626 /* make a backup of the query, since the querystate processing 2627 * may send outgoing queries that overwrite the buffer. 2628 * use secondary buffer to store the query. 2629 * This is a data copy, but faster than packet to server */ 2630 backlen = sldns_buffer_limit(c->buffer); 2631 backup_p = memdup(sldns_buffer_begin(c->buffer), backlen); 2632 if(!backup_p) { 2633 log_err("malloc failure in serviced query callbacks"); 2634 error = NETEVENT_CLOSED; 2635 c = NULL; 2636 } 2637 sq->outnet->svcd_overhead = backlen; 2638 } 2639 /* test the actual sq->cblist, because the next elem could be deleted*/ 2640 while((p=sq->cblist) != NULL) { 2641 sq->cblist = p->next; /* remove this element */ 2642 if(dobackup && c) { 2643 sldns_buffer_clear(c->buffer); 2644 sldns_buffer_write(c->buffer, backup_p, backlen); 2645 sldns_buffer_flip(c->buffer); 2646 } 2647 fptr_ok(fptr_whitelist_serviced_query(p->cb)); 2648 (void)(*p->cb)(c, p->cb_arg, error, rep); 2649 free(p); 2650 } 2651 if(backup_p) { 2652 free(backup_p); 2653 sq->outnet->svcd_overhead = 0; 2654 } 2655 verbose(VERB_ALGO, "svcd callbacks end"); 2656 log_assert(sq->cblist == NULL); 2657 serviced_delete(sq); 2658 } 2659 2660 int 2661 serviced_tcp_callback(struct comm_point* c, void* arg, int error, 2662 struct comm_reply* rep) 2663 { 2664 struct serviced_query* sq = (struct serviced_query*)arg; 2665 struct comm_reply r2; 2666 sq->pending = NULL; /* removed after this callback */ 2667 if(error != NETEVENT_NOERROR) 2668 log_addr(VERB_QUERY, "tcp error for address", 2669 &sq->addr, sq->addrlen); 2670 if(error==NETEVENT_NOERROR) 2671 infra_update_tcp_works(sq->outnet->infra, &sq->addr, 2672 sq->addrlen, sq->zone, sq->zonelen); 2673 #ifdef USE_DNSTAP 2674 if(error==NETEVENT_NOERROR && sq->outnet->dtenv && 2675 (sq->outnet->dtenv->log_resolver_response_messages || 2676 sq->outnet->dtenv->log_forwarder_response_messages)) 2677 dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr, 2678 c->type, sq->zone, sq->zonelen, sq->qbuf, sq->qbuflen, 2679 &sq->last_sent_time, sq->outnet->now_tv, c->buffer); 2680 #endif 2681 if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS && 2682 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 2683 LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin( 2684 c->buffer)) == LDNS_RCODE_NOTIMPL) ) { 2685 /* attempt to fallback to nonEDNS */ 2686 sq->status = serviced_query_TCP_EDNS_fallback; 2687 serviced_tcp_initiate(sq, c->buffer); 2688 return 0; 2689 } else if(error==NETEVENT_NOERROR && 2690 sq->status == serviced_query_TCP_EDNS_fallback && 2691 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 2692 LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE( 2693 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN 2694 || LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2695 == LDNS_RCODE_YXDOMAIN)) { 2696 /* the fallback produced a result that looks promising, note 2697 * that this server should be approached without EDNS */ 2698 /* only store noEDNS in cache if domain is noDNSSEC */ 2699 if(!sq->want_dnssec) 2700 if(!infra_edns_update(sq->outnet->infra, &sq->addr, 2701 sq->addrlen, sq->zone, sq->zonelen, -1, 2702 *sq->outnet->now_secs)) 2703 log_err("Out of memory caching no edns for host"); 2704 sq->status = serviced_query_TCP; 2705 } 2706 if(sq->tcp_upstream || sq->ssl_upstream) { 2707 struct timeval now = *sq->outnet->now_tv; 2708 if(error!=NETEVENT_NOERROR) { 2709 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 2710 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 2711 -1, sq->last_rtt, (time_t)now.tv_sec)) 2712 log_err("out of memory in TCP exponential backoff."); 2713 } else if(now.tv_sec > sq->last_sent_time.tv_sec || 2714 (now.tv_sec == sq->last_sent_time.tv_sec && 2715 now.tv_usec > sq->last_sent_time.tv_usec)) { 2716 /* convert from microseconds to milliseconds */ 2717 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 2718 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 2719 verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime); 2720 log_assert(roundtime >= 0); 2721 /* only store if less then AUTH_TIMEOUT seconds, it could be 2722 * huge due to system-hibernated and we woke up */ 2723 if(roundtime < 60000) { 2724 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 2725 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 2726 roundtime, sq->last_rtt, (time_t)now.tv_sec)) 2727 log_err("out of memory noting rtt."); 2728 } 2729 } 2730 } 2731 /* insert address into reply info */ 2732 if(!rep) { 2733 /* create one if there isn't (on errors) */ 2734 rep = &r2; 2735 r2.c = c; 2736 } 2737 memcpy(&rep->addr, &sq->addr, sq->addrlen); 2738 rep->addrlen = sq->addrlen; 2739 serviced_callbacks(sq, error, c, rep); 2740 return 0; 2741 } 2742 2743 static void 2744 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff) 2745 { 2746 verbose(VERB_ALGO, "initiate TCP query %s", 2747 sq->status==serviced_query_TCP_EDNS?"EDNS":""); 2748 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 2749 sq->last_sent_time = *sq->outnet->now_tv; 2750 sq->pending = pending_tcp_query(sq, buff, TCP_AUTH_QUERY_TIMEOUT, 2751 serviced_tcp_callback, sq); 2752 if(!sq->pending) { 2753 /* delete from tree so that a retry by above layer does not 2754 * clash with this entry */ 2755 verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query"); 2756 serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL); 2757 } 2758 } 2759 2760 /** Send serviced query over TCP return false on initial failure */ 2761 static int 2762 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff) 2763 { 2764 int vs, rtt, timeout; 2765 uint8_t edns_lame_known; 2766 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 2767 sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known, 2768 &rtt)) 2769 return 0; 2770 sq->last_rtt = rtt; 2771 if(vs != -1) 2772 sq->status = serviced_query_TCP_EDNS; 2773 else sq->status = serviced_query_TCP; 2774 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 2775 sq->last_sent_time = *sq->outnet->now_tv; 2776 if(sq->tcp_upstream || sq->ssl_upstream) { 2777 timeout = rtt; 2778 if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < TCP_AUTH_QUERY_TIMEOUT) 2779 timeout = TCP_AUTH_QUERY_TIMEOUT; 2780 } else { 2781 timeout = TCP_AUTH_QUERY_TIMEOUT; 2782 } 2783 sq->pending = pending_tcp_query(sq, buff, timeout, 2784 serviced_tcp_callback, sq); 2785 return sq->pending != NULL; 2786 } 2787 2788 /* see if packet is edns malformed; got zeroes at start. 2789 * This is from servers that return malformed packets to EDNS0 queries, 2790 * but they return good packets for nonEDNS0 queries. 2791 * We try to detect their output; without resorting to a full parse or 2792 * check for too many bytes after the end of the packet. */ 2793 static int 2794 packet_edns_malformed(struct sldns_buffer* buf, int qtype) 2795 { 2796 size_t len; 2797 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE) 2798 return 1; /* malformed */ 2799 /* they have NOERROR rcode, 1 answer. */ 2800 if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR) 2801 return 0; 2802 /* one query (to skip) and answer records */ 2803 if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 || 2804 LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0) 2805 return 0; 2806 /* skip qname */ 2807 len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE), 2808 sldns_buffer_limit(buf)-LDNS_HEADER_SIZE); 2809 if(len == 0) 2810 return 0; 2811 if(len == 1 && qtype == 0) 2812 return 0; /* we asked for '.' and type 0 */ 2813 /* and then 4 bytes (type and class of query) */ 2814 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3) 2815 return 0; 2816 2817 /* and start with 11 zeroes as the answer RR */ 2818 /* so check the qtype of the answer record, qname=0, type=0 */ 2819 if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 && 2820 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 && 2821 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0) 2822 return 1; 2823 return 0; 2824 } 2825 2826 int 2827 serviced_udp_callback(struct comm_point* c, void* arg, int error, 2828 struct comm_reply* rep) 2829 { 2830 struct serviced_query* sq = (struct serviced_query*)arg; 2831 struct outside_network* outnet = sq->outnet; 2832 struct timeval now = *sq->outnet->now_tv; 2833 2834 sq->pending = NULL; /* removed after callback */ 2835 if(error == NETEVENT_TIMEOUT) { 2836 if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) { 2837 /* fallback to 1480/1280 */ 2838 sq->status = serviced_query_UDP_EDNS_FRAG; 2839 log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10, 2840 &sq->addr, sq->addrlen); 2841 if(!serviced_udp_send(sq, c->buffer)) { 2842 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 2843 } 2844 return 0; 2845 } 2846 if(sq->status == serviced_query_UDP_EDNS_FRAG) { 2847 /* fragmentation size did not fix it */ 2848 sq->status = serviced_query_UDP_EDNS; 2849 } 2850 sq->retry++; 2851 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 2852 sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt, 2853 (time_t)now.tv_sec)) 2854 log_err("out of memory in UDP exponential backoff"); 2855 if(sq->retry < OUTBOUND_UDP_RETRY) { 2856 log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10, 2857 &sq->addr, sq->addrlen); 2858 if(!serviced_udp_send(sq, c->buffer)) { 2859 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 2860 } 2861 return 0; 2862 } 2863 } 2864 if(error != NETEVENT_NOERROR) { 2865 /* udp returns error (due to no ID or interface available) */ 2866 serviced_callbacks(sq, error, c, rep); 2867 return 0; 2868 } 2869 #ifdef USE_DNSTAP 2870 if(error == NETEVENT_NOERROR && outnet->dtenv && 2871 (outnet->dtenv->log_resolver_response_messages || 2872 outnet->dtenv->log_forwarder_response_messages)) 2873 dt_msg_send_outside_response(outnet->dtenv, &sq->addr, c->type, 2874 sq->zone, sq->zonelen, sq->qbuf, sq->qbuflen, 2875 &sq->last_sent_time, sq->outnet->now_tv, c->buffer); 2876 #endif 2877 if( (sq->status == serviced_query_UDP_EDNS 2878 ||sq->status == serviced_query_UDP_EDNS_FRAG) 2879 && (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2880 == LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE( 2881 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL 2882 || packet_edns_malformed(c->buffer, sq->qtype) 2883 )) { 2884 /* try to get an answer by falling back without EDNS */ 2885 verbose(VERB_ALGO, "serviced query: attempt without EDNS"); 2886 sq->status = serviced_query_UDP_EDNS_fallback; 2887 sq->retry = 0; 2888 if(!serviced_udp_send(sq, c->buffer)) { 2889 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 2890 } 2891 return 0; 2892 } else if(sq->status == serviced_query_UDP_EDNS && 2893 !sq->edns_lame_known) { 2894 /* now we know that edns queries received answers store that */ 2895 log_addr(VERB_ALGO, "serviced query: EDNS works for", 2896 &sq->addr, sq->addrlen); 2897 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 2898 sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) { 2899 log_err("Out of memory caching edns works"); 2900 } 2901 sq->edns_lame_known = 1; 2902 } else if(sq->status == serviced_query_UDP_EDNS_fallback && 2903 !sq->edns_lame_known && (LDNS_RCODE_WIRE( 2904 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR || 2905 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 2906 LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin( 2907 c->buffer)) == LDNS_RCODE_YXDOMAIN)) { 2908 /* the fallback produced a result that looks promising, note 2909 * that this server should be approached without EDNS */ 2910 /* only store noEDNS in cache if domain is noDNSSEC */ 2911 if(!sq->want_dnssec) { 2912 log_addr(VERB_ALGO, "serviced query: EDNS fails for", 2913 &sq->addr, sq->addrlen); 2914 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 2915 sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) { 2916 log_err("Out of memory caching no edns for host"); 2917 } 2918 } else { 2919 log_addr(VERB_ALGO, "serviced query: EDNS fails, but " 2920 "not stored because need DNSSEC for", &sq->addr, 2921 sq->addrlen); 2922 } 2923 sq->status = serviced_query_UDP; 2924 } 2925 if(now.tv_sec > sq->last_sent_time.tv_sec || 2926 (now.tv_sec == sq->last_sent_time.tv_sec && 2927 now.tv_usec > sq->last_sent_time.tv_usec)) { 2928 /* convert from microseconds to milliseconds */ 2929 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 2930 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 2931 verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime); 2932 log_assert(roundtime >= 0); 2933 /* in case the system hibernated, do not enter a huge value, 2934 * above this value gives trouble with server selection */ 2935 if(roundtime < 60000) { 2936 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 2937 sq->zone, sq->zonelen, sq->qtype, roundtime, 2938 sq->last_rtt, (time_t)now.tv_sec)) 2939 log_err("out of memory noting rtt."); 2940 } 2941 } 2942 /* perform TC flag check and TCP fallback after updating our 2943 * cache entries for EDNS status and RTT times */ 2944 if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) { 2945 /* fallback to TCP */ 2946 /* this discards partial UDP contents */ 2947 if(sq->status == serviced_query_UDP_EDNS || 2948 sq->status == serviced_query_UDP_EDNS_FRAG || 2949 sq->status == serviced_query_UDP_EDNS_fallback) 2950 /* if we have unfinished EDNS_fallback, start again */ 2951 sq->status = serviced_query_TCP_EDNS; 2952 else sq->status = serviced_query_TCP; 2953 serviced_tcp_initiate(sq, c->buffer); 2954 return 0; 2955 } 2956 /* yay! an answer */ 2957 serviced_callbacks(sq, error, c, rep); 2958 return 0; 2959 } 2960 2961 struct serviced_query* 2962 outnet_serviced_query(struct outside_network* outnet, 2963 struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec, 2964 int nocaps, int tcp_upstream, int ssl_upstream, char* tls_auth_name, 2965 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, 2966 size_t zonelen, struct module_qstate* qstate, 2967 comm_point_callback_type* callback, void* callback_arg, sldns_buffer* buff, 2968 struct module_env* env) 2969 { 2970 struct serviced_query* sq; 2971 struct service_callback* cb; 2972 struct edns_string_addr* client_string_addr; 2973 2974 if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone, zonelen, 2975 qstate, qstate->region)) 2976 return NULL; 2977 2978 if((client_string_addr = edns_string_addr_lookup( 2979 &env->edns_strings->client_strings, addr, addrlen))) { 2980 edns_opt_list_append(&qstate->edns_opts_back_out, 2981 env->edns_strings->client_string_opcode, 2982 client_string_addr->string_len, 2983 client_string_addr->string, qstate->region); 2984 } 2985 2986 serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype, 2987 qinfo->qclass, flags); 2988 sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen, 2989 qstate->edns_opts_back_out); 2990 /* duplicate entries are included in the callback list, because 2991 * there is a counterpart registration by our caller that needs to 2992 * be doubly-removed (with callbacks perhaps). */ 2993 if(!(cb = (struct service_callback*)malloc(sizeof(*cb)))) 2994 return NULL; 2995 if(!sq) { 2996 /* make new serviced query entry */ 2997 sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps, 2998 tcp_upstream, ssl_upstream, tls_auth_name, addr, 2999 addrlen, zone, zonelen, (int)qinfo->qtype, 3000 qstate->edns_opts_back_out); 3001 if(!sq) { 3002 free(cb); 3003 return NULL; 3004 } 3005 /* perform first network action */ 3006 if(outnet->do_udp && !(tcp_upstream || ssl_upstream)) { 3007 if(!serviced_udp_send(sq, buff)) { 3008 (void)rbtree_delete(outnet->serviced, sq); 3009 serviced_node_del(&sq->node, NULL); 3010 free(cb); 3011 return NULL; 3012 } 3013 } else { 3014 if(!serviced_tcp_send(sq, buff)) { 3015 (void)rbtree_delete(outnet->serviced, sq); 3016 serviced_node_del(&sq->node, NULL); 3017 free(cb); 3018 return NULL; 3019 } 3020 } 3021 } 3022 /* add callback to list of callbacks */ 3023 cb->cb = callback; 3024 cb->cb_arg = callback_arg; 3025 cb->next = sq->cblist; 3026 sq->cblist = cb; 3027 return sq; 3028 } 3029 3030 /** remove callback from list */ 3031 static void 3032 callback_list_remove(struct serviced_query* sq, void* cb_arg) 3033 { 3034 struct service_callback** pp = &sq->cblist; 3035 while(*pp) { 3036 if((*pp)->cb_arg == cb_arg) { 3037 struct service_callback* del = *pp; 3038 *pp = del->next; 3039 free(del); 3040 return; 3041 } 3042 pp = &(*pp)->next; 3043 } 3044 } 3045 3046 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg) 3047 { 3048 if(!sq) 3049 return; 3050 callback_list_remove(sq, cb_arg); 3051 /* if callbacks() routine scheduled deletion, let it do that */ 3052 if(!sq->cblist && !sq->to_be_deleted) { 3053 (void)rbtree_delete(sq->outnet->serviced, sq); 3054 serviced_delete(sq); 3055 } 3056 } 3057 3058 /** create fd to send to this destination */ 3059 static int 3060 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr, 3061 socklen_t to_addrlen) 3062 { 3063 struct sockaddr_storage* addr; 3064 socklen_t addrlen; 3065 int i, try, pnum, dscp; 3066 struct port_if* pif; 3067 3068 /* create fd */ 3069 dscp = outnet->ip_dscp; 3070 for(try = 0; try<1000; try++) { 3071 int port = 0; 3072 int freebind = 0; 3073 int noproto = 0; 3074 int inuse = 0; 3075 int fd = -1; 3076 3077 /* select interface */ 3078 if(addr_is_ip6(to_addr, to_addrlen)) { 3079 if(outnet->num_ip6 == 0) { 3080 char to[64]; 3081 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3082 verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to); 3083 return -1; 3084 } 3085 i = ub_random_max(outnet->rnd, outnet->num_ip6); 3086 pif = &outnet->ip6_ifs[i]; 3087 } else { 3088 if(outnet->num_ip4 == 0) { 3089 char to[64]; 3090 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3091 verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to); 3092 return -1; 3093 } 3094 i = ub_random_max(outnet->rnd, outnet->num_ip4); 3095 pif = &outnet->ip4_ifs[i]; 3096 } 3097 addr = &pif->addr; 3098 addrlen = pif->addrlen; 3099 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3100 pnum = ub_random_max(outnet->rnd, pif->avail_total); 3101 if(pnum < pif->inuse) { 3102 /* port already open */ 3103 port = pif->out[pnum]->number; 3104 } else { 3105 /* unused ports in start part of array */ 3106 port = pif->avail_ports[pnum - pif->inuse]; 3107 } 3108 #else 3109 pnum = port = 0; 3110 #endif 3111 if(addr_is_ip6(to_addr, to_addrlen)) { 3112 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 3113 sa.sin6_port = (in_port_t)htons((uint16_t)port); 3114 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 3115 (struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto, 3116 0, 0, 0, NULL, 0, freebind, 0, dscp); 3117 } else { 3118 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 3119 sa->sin_port = (in_port_t)htons((uint16_t)port); 3120 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 3121 (struct sockaddr*)addr, addrlen, 1, &inuse, &noproto, 3122 0, 0, 0, NULL, 0, freebind, 0, dscp); 3123 } 3124 if(fd != -1) { 3125 return fd; 3126 } 3127 if(!inuse) { 3128 return -1; 3129 } 3130 } 3131 /* too many tries */ 3132 log_err("cannot send probe, ports are in use"); 3133 return -1; 3134 } 3135 3136 struct comm_point* 3137 outnet_comm_point_for_udp(struct outside_network* outnet, 3138 comm_point_callback_type* cb, void* cb_arg, 3139 struct sockaddr_storage* to_addr, socklen_t to_addrlen) 3140 { 3141 struct comm_point* cp; 3142 int fd = fd_for_dest(outnet, to_addr, to_addrlen); 3143 if(fd == -1) { 3144 return NULL; 3145 } 3146 cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff, 3147 cb, cb_arg); 3148 if(!cp) { 3149 log_err("malloc failure"); 3150 close(fd); 3151 return NULL; 3152 } 3153 return cp; 3154 } 3155 3156 /** setup SSL for comm point */ 3157 static int 3158 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet, 3159 int fd, char* host) 3160 { 3161 cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd); 3162 if(!cp->ssl) { 3163 log_err("cannot create SSL object"); 3164 return 0; 3165 } 3166 #ifdef USE_WINSOCK 3167 comm_point_tcp_win_bio_cb(cp, cp->ssl); 3168 #endif 3169 cp->ssl_shake_state = comm_ssl_shake_write; 3170 /* https verification */ 3171 #ifdef HAVE_SSL 3172 if(outnet->tls_use_sni) { 3173 (void)SSL_set_tlsext_host_name(cp->ssl, host); 3174 } 3175 #endif 3176 #ifdef HAVE_SSL_SET1_HOST 3177 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3178 /* because we set SSL_VERIFY_PEER, in netevent in 3179 * ssl_handshake, it'll check if the certificate 3180 * verification has succeeded */ 3181 /* SSL_VERIFY_PEER is set on the sslctx */ 3182 /* and the certificates to verify with are loaded into 3183 * it with SSL_load_verify_locations or 3184 * SSL_CTX_set_default_verify_paths */ 3185 /* setting the hostname makes openssl verify the 3186 * host name in the x509 certificate in the 3187 * SSL connection*/ 3188 if(!SSL_set1_host(cp->ssl, host)) { 3189 log_err("SSL_set1_host failed"); 3190 return 0; 3191 } 3192 } 3193 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST) 3194 /* openssl 1.0.2 has this function that can be used for 3195 * set1_host like verification */ 3196 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3197 X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl); 3198 # ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 3199 X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS); 3200 # endif 3201 if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) { 3202 log_err("X509_VERIFY_PARAM_set1_host failed"); 3203 return 0; 3204 } 3205 } 3206 #else 3207 (void)host; 3208 #endif /* HAVE_SSL_SET1_HOST */ 3209 return 1; 3210 } 3211 3212 struct comm_point* 3213 outnet_comm_point_for_tcp(struct outside_network* outnet, 3214 comm_point_callback_type* cb, void* cb_arg, 3215 struct sockaddr_storage* to_addr, socklen_t to_addrlen, 3216 sldns_buffer* query, int timeout, int ssl, char* host) 3217 { 3218 struct comm_point* cp; 3219 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3220 if(fd == -1) { 3221 return 0; 3222 } 3223 fd_set_nonblock(fd); 3224 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3225 /* outnet_tcp_connect has closed fd on error for us */ 3226 return 0; 3227 } 3228 cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg); 3229 if(!cp) { 3230 log_err("malloc failure"); 3231 close(fd); 3232 return 0; 3233 } 3234 cp->repinfo.addrlen = to_addrlen; 3235 memcpy(&cp->repinfo.addr, to_addr, to_addrlen); 3236 3237 /* setup for SSL (if needed) */ 3238 if(ssl) { 3239 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3240 log_err("cannot setup XoT"); 3241 comm_point_delete(cp); 3242 return NULL; 3243 } 3244 } 3245 3246 /* set timeout on TCP connection */ 3247 comm_point_start_listening(cp, fd, timeout); 3248 /* copy scratch buffer to cp->buffer */ 3249 sldns_buffer_copy(cp->buffer, query); 3250 return cp; 3251 } 3252 3253 /** setup http request headers in buffer for sending query to destination */ 3254 static int 3255 setup_http_request(sldns_buffer* buf, char* host, char* path) 3256 { 3257 sldns_buffer_clear(buf); 3258 sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path); 3259 sldns_buffer_printf(buf, "Host: %s\r\n", host); 3260 sldns_buffer_printf(buf, "User-Agent: unbound/%s\r\n", 3261 PACKAGE_VERSION); 3262 /* We do not really do multiple queries per connection, 3263 * but this header setting is also not needed. 3264 * sldns_buffer_printf(buf, "Connection: close\r\n") */ 3265 sldns_buffer_printf(buf, "\r\n"); 3266 if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf)) 3267 return 0; /* somehow buffer too short, but it is about 60K 3268 and the request is only a couple bytes long. */ 3269 sldns_buffer_flip(buf); 3270 return 1; 3271 } 3272 3273 struct comm_point* 3274 outnet_comm_point_for_http(struct outside_network* outnet, 3275 comm_point_callback_type* cb, void* cb_arg, 3276 struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout, 3277 int ssl, char* host, char* path) 3278 { 3279 /* cp calls cb with err=NETEVENT_DONE when transfer is done */ 3280 struct comm_point* cp; 3281 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3282 if(fd == -1) { 3283 return 0; 3284 } 3285 fd_set_nonblock(fd); 3286 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3287 /* outnet_tcp_connect has closed fd on error for us */ 3288 return 0; 3289 } 3290 cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg, 3291 outnet->udp_buff); 3292 if(!cp) { 3293 log_err("malloc failure"); 3294 close(fd); 3295 return 0; 3296 } 3297 cp->repinfo.addrlen = to_addrlen; 3298 memcpy(&cp->repinfo.addr, to_addr, to_addrlen); 3299 3300 /* setup for SSL (if needed) */ 3301 if(ssl) { 3302 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3303 log_err("cannot setup https"); 3304 comm_point_delete(cp); 3305 return NULL; 3306 } 3307 } 3308 3309 /* set timeout on TCP connection */ 3310 comm_point_start_listening(cp, fd, timeout); 3311 3312 /* setup http request in cp->buffer */ 3313 if(!setup_http_request(cp->buffer, host, path)) { 3314 log_err("error setting up http request"); 3315 comm_point_delete(cp); 3316 return NULL; 3317 } 3318 return cp; 3319 } 3320 3321 /** get memory used by waiting tcp entry (in use or not) */ 3322 static size_t 3323 waiting_tcp_get_mem(struct waiting_tcp* w) 3324 { 3325 size_t s; 3326 if(!w) return 0; 3327 s = sizeof(*w) + w->pkt_len; 3328 if(w->timer) 3329 s += comm_timer_get_mem(w->timer); 3330 return s; 3331 } 3332 3333 /** get memory used by port if */ 3334 static size_t 3335 if_get_mem(struct port_if* pif) 3336 { 3337 size_t s; 3338 int i; 3339 s = sizeof(*pif) + 3340 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3341 sizeof(int)*pif->avail_total + 3342 #endif 3343 sizeof(struct port_comm*)*pif->maxout; 3344 for(i=0; i<pif->inuse; i++) 3345 s += sizeof(*pif->out[i]) + 3346 comm_point_get_mem(pif->out[i]->cp); 3347 return s; 3348 } 3349 3350 /** get memory used by waiting udp */ 3351 static size_t 3352 waiting_udp_get_mem(struct pending* w) 3353 { 3354 size_t s; 3355 s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len; 3356 return s; 3357 } 3358 3359 size_t outnet_get_mem(struct outside_network* outnet) 3360 { 3361 size_t i; 3362 int k; 3363 struct waiting_tcp* w; 3364 struct pending* u; 3365 struct serviced_query* sq; 3366 struct service_callback* sb; 3367 struct port_comm* pc; 3368 size_t s = sizeof(*outnet) + sizeof(*outnet->base) + 3369 sizeof(*outnet->udp_buff) + 3370 sldns_buffer_capacity(outnet->udp_buff); 3371 /* second buffer is not ours */ 3372 for(pc = outnet->unused_fds; pc; pc = pc->next) { 3373 s += sizeof(*pc) + comm_point_get_mem(pc->cp); 3374 } 3375 for(k=0; k<outnet->num_ip4; k++) 3376 s += if_get_mem(&outnet->ip4_ifs[k]); 3377 for(k=0; k<outnet->num_ip6; k++) 3378 s += if_get_mem(&outnet->ip6_ifs[k]); 3379 for(u=outnet->udp_wait_first; u; u=u->next_waiting) 3380 s += waiting_udp_get_mem(u); 3381 3382 s += sizeof(struct pending_tcp*)*outnet->num_tcp; 3383 for(i=0; i<outnet->num_tcp; i++) { 3384 s += sizeof(struct pending_tcp); 3385 s += comm_point_get_mem(outnet->tcp_conns[i]->c); 3386 if(outnet->tcp_conns[i]->query) 3387 s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query); 3388 } 3389 for(w=outnet->tcp_wait_first; w; w = w->next_waiting) 3390 s += waiting_tcp_get_mem(w); 3391 s += sizeof(*outnet->pending); 3392 s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) * 3393 outnet->pending->count; 3394 s += sizeof(*outnet->serviced); 3395 s += outnet->svcd_overhead; 3396 RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) { 3397 s += sizeof(*sq) + sq->qbuflen; 3398 for(sb = sq->cblist; sb; sb = sb->next) 3399 s += sizeof(*sb); 3400 } 3401 return s; 3402 } 3403 3404 size_t 3405 serviced_get_mem(struct serviced_query* sq) 3406 { 3407 struct service_callback* sb; 3408 size_t s; 3409 s = sizeof(*sq) + sq->qbuflen; 3410 for(sb = sq->cblist; sb; sb = sb->next) 3411 s += sizeof(*sb); 3412 if(sq->status == serviced_query_UDP_EDNS || 3413 sq->status == serviced_query_UDP || 3414 sq->status == serviced_query_UDP_EDNS_FRAG || 3415 sq->status == serviced_query_UDP_EDNS_fallback) { 3416 s += sizeof(struct pending); 3417 s += comm_timer_get_mem(NULL); 3418 } else { 3419 /* does not have size of the pkt pointer */ 3420 /* always has a timer except on malloc failures */ 3421 3422 /* these sizes are part of the main outside network mem */ 3423 /* 3424 s += sizeof(struct waiting_tcp); 3425 s += comm_timer_get_mem(NULL); 3426 */ 3427 } 3428 return s; 3429 } 3430 3431