1 /* 2 * services/outside_network.c - implement sending of queries and wait answer. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file has functions to send queries to authoritative servers and 40 * wait for the pending answer events. 41 */ 42 #include "config.h" 43 #include <ctype.h> 44 #ifdef HAVE_SYS_TYPES_H 45 # include <sys/types.h> 46 #endif 47 #include <sys/time.h> 48 #include "services/outside_network.h" 49 #include "services/listen_dnsport.h" 50 #include "services/cache/infra.h" 51 #include "iterator/iterator.h" 52 #include "util/data/msgparse.h" 53 #include "util/data/msgreply.h" 54 #include "util/data/msgencode.h" 55 #include "util/data/dname.h" 56 #include "util/netevent.h" 57 #include "util/log.h" 58 #include "util/net_help.h" 59 #include "util/random.h" 60 #include "util/fptr_wlist.h" 61 #include "util/edns.h" 62 #include "sldns/sbuffer.h" 63 #include "dnstap/dnstap.h" 64 #ifdef HAVE_OPENSSL_SSL_H 65 #include <openssl/ssl.h> 66 #endif 67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST 68 #include <openssl/x509v3.h> 69 #endif 70 71 #ifdef HAVE_NETDB_H 72 #include <netdb.h> 73 #endif 74 #include <fcntl.h> 75 76 /** number of times to retry making a random ID that is unique. */ 77 #define MAX_ID_RETRY 1000 78 /** number of times to retry finding interface, port that can be opened. */ 79 #define MAX_PORT_RETRY 10000 80 /** number of retries on outgoing UDP queries */ 81 #define OUTBOUND_UDP_RETRY 1 82 83 /** initiate TCP transaction for serviced query */ 84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff); 85 /** with a fd available, randomize and send UDP */ 86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, 87 int timeout); 88 89 /** select a DNS ID for a TCP stream */ 90 static uint16_t tcp_select_id(struct outside_network* outnet, 91 struct reuse_tcp* reuse); 92 93 /** Perform serviced query UDP sending operation */ 94 static int serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff); 95 96 /** Send serviced query over TCP return false on initial failure */ 97 static int serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff); 98 99 /** call the callbacks for a serviced query */ 100 static void serviced_callbacks(struct serviced_query* sq, int error, 101 struct comm_point* c, struct comm_reply* rep); 102 103 int 104 pending_cmp(const void* key1, const void* key2) 105 { 106 struct pending *p1 = (struct pending*)key1; 107 struct pending *p2 = (struct pending*)key2; 108 if(p1->id < p2->id) 109 return -1; 110 if(p1->id > p2->id) 111 return 1; 112 log_assert(p1->id == p2->id); 113 return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen); 114 } 115 116 int 117 serviced_cmp(const void* key1, const void* key2) 118 { 119 struct serviced_query* q1 = (struct serviced_query*)key1; 120 struct serviced_query* q2 = (struct serviced_query*)key2; 121 int r; 122 if(q1->qbuflen < q2->qbuflen) 123 return -1; 124 if(q1->qbuflen > q2->qbuflen) 125 return 1; 126 log_assert(q1->qbuflen == q2->qbuflen); 127 log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */); 128 /* alternate casing of qname is still the same query */ 129 if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0) 130 return r; 131 if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0) 132 return r; 133 if(q1->dnssec != q2->dnssec) { 134 if(q1->dnssec < q2->dnssec) 135 return -1; 136 return 1; 137 } 138 if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0) 139 return r; 140 if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0) 141 return r; 142 return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen); 143 } 144 145 /** compare if the reuse element has the same address, port and same ssl-is 146 * used-for-it characteristic */ 147 static int 148 reuse_cmp_addrportssl(const void* key1, const void* key2) 149 { 150 struct reuse_tcp* r1 = (struct reuse_tcp*)key1; 151 struct reuse_tcp* r2 = (struct reuse_tcp*)key2; 152 int r; 153 /* compare address and port */ 154 r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen); 155 if(r != 0) 156 return r; 157 158 /* compare if SSL-enabled */ 159 if(r1->is_ssl && !r2->is_ssl) 160 return 1; 161 if(!r1->is_ssl && r2->is_ssl) 162 return -1; 163 return 0; 164 } 165 166 int 167 reuse_cmp(const void* key1, const void* key2) 168 { 169 int r; 170 r = reuse_cmp_addrportssl(key1, key2); 171 if(r != 0) 172 return r; 173 174 /* compare ptr value */ 175 if(key1 < key2) return -1; 176 if(key1 > key2) return 1; 177 return 0; 178 } 179 180 int reuse_id_cmp(const void* key1, const void* key2) 181 { 182 struct waiting_tcp* w1 = (struct waiting_tcp*)key1; 183 struct waiting_tcp* w2 = (struct waiting_tcp*)key2; 184 if(w1->id < w2->id) 185 return -1; 186 if(w1->id > w2->id) 187 return 1; 188 return 0; 189 } 190 191 /** delete waiting_tcp entry. Does not unlink from waiting list. 192 * @param w: to delete. 193 */ 194 static void 195 waiting_tcp_delete(struct waiting_tcp* w) 196 { 197 if(!w) return; 198 if(w->timer) 199 comm_timer_delete(w->timer); 200 free(w); 201 } 202 203 /** 204 * Pick random outgoing-interface of that family, and bind it. 205 * port set to 0 so OS picks a port number for us. 206 * if it is the ANY address, do not bind. 207 * @param pend: pending tcp structure, for storing the local address choice. 208 * @param w: tcp structure with destination address. 209 * @param s: socket fd. 210 * @return false on error, socket closed. 211 */ 212 static int 213 pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s) 214 { 215 struct port_if* pi = NULL; 216 int num; 217 pend->pi = NULL; 218 #ifdef INET6 219 if(addr_is_ip6(&w->addr, w->addrlen)) 220 num = w->outnet->num_ip6; 221 else 222 #endif 223 num = w->outnet->num_ip4; 224 if(num == 0) { 225 log_err("no TCP outgoing interfaces of family"); 226 log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen); 227 sock_close(s); 228 return 0; 229 } 230 #ifdef INET6 231 if(addr_is_ip6(&w->addr, w->addrlen)) 232 pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)]; 233 else 234 #endif 235 pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)]; 236 log_assert(pi); 237 pend->pi = pi; 238 if(addr_is_any(&pi->addr, pi->addrlen)) { 239 /* binding to the ANY interface is for listening sockets */ 240 return 1; 241 } 242 /* set port to 0 */ 243 if(addr_is_ip6(&pi->addr, pi->addrlen)) 244 ((struct sockaddr_in6*)&pi->addr)->sin6_port = 0; 245 else ((struct sockaddr_in*)&pi->addr)->sin_port = 0; 246 if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) { 247 #ifndef USE_WINSOCK 248 #ifdef EADDRNOTAVAIL 249 if(!(verbosity < 4 && errno == EADDRNOTAVAIL)) 250 #endif 251 #else /* USE_WINSOCK */ 252 if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL)) 253 #endif 254 log_err("outgoing tcp: bind: %s", sock_strerror(errno)); 255 sock_close(s); 256 return 0; 257 } 258 log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen); 259 return 1; 260 } 261 262 /** get TCP file descriptor for address, returns -1 on failure, 263 * tcp_mss is 0 or maxseg size to set for TCP packets. */ 264 int 265 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp) 266 { 267 int s; 268 int af; 269 char* err; 270 #if defined(SO_REUSEADDR) || defined(IP_BIND_ADDRESS_NO_PORT) 271 int on = 1; 272 #endif 273 #ifdef INET6 274 if(addr_is_ip6(addr, addrlen)){ 275 s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP); 276 af = AF_INET6; 277 } else { 278 #else 279 { 280 #endif 281 af = AF_INET; 282 s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); 283 } 284 if(s == -1) { 285 log_err_addr("outgoing tcp: socket", sock_strerror(errno), 286 addr, addrlen); 287 return -1; 288 } 289 290 #ifdef SO_REUSEADDR 291 if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on, 292 (socklen_t)sizeof(on)) < 0) { 293 verbose(VERB_ALGO, "outgoing tcp:" 294 " setsockopt(.. SO_REUSEADDR ..) failed"); 295 } 296 #endif 297 298 err = set_ip_dscp(s, af, dscp); 299 if(err != NULL) { 300 verbose(VERB_ALGO, "outgoing tcp:" 301 "error setting IP DiffServ codepoint on socket"); 302 } 303 304 if(tcp_mss > 0) { 305 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG) 306 if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG, 307 (void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) { 308 verbose(VERB_ALGO, "outgoing tcp:" 309 " setsockopt(.. TCP_MAXSEG ..) failed"); 310 } 311 #else 312 verbose(VERB_ALGO, "outgoing tcp:" 313 " setsockopt(TCP_MAXSEG) unsupported"); 314 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */ 315 } 316 #ifdef IP_BIND_ADDRESS_NO_PORT 317 if(setsockopt(s, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, (void*)&on, 318 (socklen_t)sizeof(on)) < 0) { 319 verbose(VERB_ALGO, "outgoing tcp:" 320 " setsockopt(.. IP_BIND_ADDRESS_NO_PORT ..) failed"); 321 } 322 #endif /* IP_BIND_ADDRESS_NO_PORT */ 323 return s; 324 } 325 326 /** connect tcp connection to addr, 0 on failure */ 327 int 328 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen) 329 { 330 if(connect(s, (struct sockaddr*)addr, addrlen) == -1) { 331 #ifndef USE_WINSOCK 332 #ifdef EINPROGRESS 333 if(errno != EINPROGRESS) { 334 #endif 335 if(tcp_connect_errno_needs_log( 336 (struct sockaddr*)addr, addrlen)) 337 log_err_addr("outgoing tcp: connect", 338 strerror(errno), addr, addrlen); 339 close(s); 340 return 0; 341 #ifdef EINPROGRESS 342 } 343 #endif 344 #else /* USE_WINSOCK */ 345 if(WSAGetLastError() != WSAEINPROGRESS && 346 WSAGetLastError() != WSAEWOULDBLOCK) { 347 closesocket(s); 348 return 0; 349 } 350 #endif 351 } 352 return 1; 353 } 354 355 /** log reuse item addr and ptr with message */ 356 static void 357 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse) 358 { 359 uint16_t port; 360 char addrbuf[128]; 361 if(verbosity < v) return; 362 if(!reuse || !reuse->pending || !reuse->pending->c) 363 return; 364 addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf)); 365 port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port); 366 verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port, 367 reuse->pending->c->fd); 368 } 369 370 /** pop the first element from the writewait list */ 371 struct waiting_tcp* 372 reuse_write_wait_pop(struct reuse_tcp* reuse) 373 { 374 struct waiting_tcp* w = reuse->write_wait_first; 375 if(!w) 376 return NULL; 377 log_assert(w->write_wait_queued); 378 log_assert(!w->write_wait_prev); 379 reuse->write_wait_first = w->write_wait_next; 380 if(w->write_wait_next) 381 w->write_wait_next->write_wait_prev = NULL; 382 else reuse->write_wait_last = NULL; 383 w->write_wait_queued = 0; 384 w->write_wait_next = NULL; 385 w->write_wait_prev = NULL; 386 return w; 387 } 388 389 /** remove the element from the writewait list */ 390 void 391 reuse_write_wait_remove(struct reuse_tcp* reuse, struct waiting_tcp* w) 392 { 393 log_assert(w); 394 log_assert(w->write_wait_queued); 395 if(!w) 396 return; 397 if(!w->write_wait_queued) 398 return; 399 if(w->write_wait_prev) 400 w->write_wait_prev->write_wait_next = w->write_wait_next; 401 else reuse->write_wait_first = w->write_wait_next; 402 log_assert(!w->write_wait_prev || 403 w->write_wait_prev->write_wait_next != w->write_wait_prev); 404 if(w->write_wait_next) 405 w->write_wait_next->write_wait_prev = w->write_wait_prev; 406 else reuse->write_wait_last = w->write_wait_prev; 407 log_assert(!w->write_wait_next 408 || w->write_wait_next->write_wait_prev != w->write_wait_next); 409 w->write_wait_queued = 0; 410 w->write_wait_next = NULL; 411 w->write_wait_prev = NULL; 412 } 413 414 /** push the element after the last on the writewait list */ 415 void 416 reuse_write_wait_push_back(struct reuse_tcp* reuse, struct waiting_tcp* w) 417 { 418 if(!w) return; 419 log_assert(!w->write_wait_queued); 420 if(reuse->write_wait_last) { 421 reuse->write_wait_last->write_wait_next = w; 422 log_assert(reuse->write_wait_last->write_wait_next != 423 reuse->write_wait_last); 424 w->write_wait_prev = reuse->write_wait_last; 425 } else { 426 reuse->write_wait_first = w; 427 w->write_wait_prev = NULL; 428 } 429 w->write_wait_next = NULL; 430 reuse->write_wait_last = w; 431 w->write_wait_queued = 1; 432 } 433 434 /** insert element in tree by id */ 435 void 436 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w) 437 { 438 #ifdef UNBOUND_DEBUG 439 rbnode_type* added; 440 #endif 441 log_assert(w->id_node.key == NULL); 442 w->id_node.key = w; 443 #ifdef UNBOUND_DEBUG 444 added = 445 #else 446 (void) 447 #endif 448 rbtree_insert(&reuse->tree_by_id, &w->id_node); 449 log_assert(added); /* should have been added */ 450 } 451 452 /** find element in tree by id */ 453 struct waiting_tcp* 454 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id) 455 { 456 struct waiting_tcp key_w; 457 rbnode_type* n; 458 memset(&key_w, 0, sizeof(key_w)); 459 key_w.id_node.key = &key_w; 460 key_w.id = id; 461 n = rbtree_search(&reuse->tree_by_id, &key_w); 462 if(!n) return NULL; 463 return (struct waiting_tcp*)n->key; 464 } 465 466 /** return ID value of rbnode in tree_by_id */ 467 static uint16_t 468 tree_by_id_get_id(rbnode_type* node) 469 { 470 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 471 return w->id; 472 } 473 474 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */ 475 int 476 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp) 477 { 478 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse); 479 if(pend_tcp->reuse.item_on_lru_list) { 480 if(!pend_tcp->reuse.node.key) 481 log_err("internal error: reuse_tcp_insert: " 482 "in lru list without key"); 483 return 1; 484 } 485 pend_tcp->reuse.node.key = &pend_tcp->reuse; 486 pend_tcp->reuse.pending = pend_tcp; 487 if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) { 488 /* We are not in the LRU list but we are already in the 489 * tcp_reuse tree, strange. 490 * Continue to add ourselves to the LRU list. */ 491 log_err("internal error: reuse_tcp_insert: in lru list but " 492 "not in the tree"); 493 } 494 /* insert into LRU, first is newest */ 495 pend_tcp->reuse.lru_prev = NULL; 496 if(outnet->tcp_reuse_first) { 497 pend_tcp->reuse.lru_next = outnet->tcp_reuse_first; 498 log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse); 499 outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse; 500 log_assert(outnet->tcp_reuse_first->lru_prev != 501 outnet->tcp_reuse_first); 502 } else { 503 pend_tcp->reuse.lru_next = NULL; 504 outnet->tcp_reuse_last = &pend_tcp->reuse; 505 } 506 outnet->tcp_reuse_first = &pend_tcp->reuse; 507 pend_tcp->reuse.item_on_lru_list = 1; 508 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 509 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 510 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next && 511 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev); 512 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next && 513 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev); 514 return 1; 515 } 516 517 /** find reuse tcp stream to destination for query, or NULL if none */ 518 static struct reuse_tcp* 519 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr, 520 socklen_t addrlen, int use_ssl) 521 { 522 struct waiting_tcp key_w; 523 struct pending_tcp key_p; 524 struct comm_point c; 525 rbnode_type* result = NULL, *prev; 526 verbose(VERB_CLIENT, "reuse_tcp_find"); 527 memset(&key_w, 0, sizeof(key_w)); 528 memset(&key_p, 0, sizeof(key_p)); 529 memset(&c, 0, sizeof(c)); 530 key_p.query = &key_w; 531 key_p.c = &c; 532 key_p.reuse.pending = &key_p; 533 key_p.reuse.node.key = &key_p.reuse; 534 if(use_ssl) 535 key_p.reuse.is_ssl = 1; 536 if(addrlen > (socklen_t)sizeof(key_p.reuse.addr)) 537 return NULL; 538 memmove(&key_p.reuse.addr, addr, addrlen); 539 key_p.reuse.addrlen = addrlen; 540 541 verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u", 542 (unsigned)outnet->tcp_reuse.count); 543 if(outnet->tcp_reuse.root == NULL || 544 outnet->tcp_reuse.root == RBTREE_NULL) 545 return NULL; 546 if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse, 547 &result)) { 548 /* exact match */ 549 /* but the key is on stack, and ptr is compared, impossible */ 550 log_assert(&key_p.reuse != (struct reuse_tcp*)result); 551 log_assert(&key_p != ((struct reuse_tcp*)result)->pending); 552 } 553 554 /* It is possible that we search for something before the first element 555 * in the tree. Replace a null pointer with the first element. 556 */ 557 if (!result) { 558 verbose(VERB_CLIENT, "reuse_tcp_find: taking first"); 559 result = rbtree_first(&outnet->tcp_reuse); 560 } 561 562 /* not found, return null */ 563 if(!result || result == RBTREE_NULL) 564 return NULL; 565 566 /* It is possible that we got the previous address, but that the 567 * address we are looking for is in the tree. If the address we got 568 * is less than the address we are looking, then take the next entry. 569 */ 570 if (reuse_cmp_addrportssl(result->key, &key_p.reuse) < 0) { 571 verbose(VERB_CLIENT, "reuse_tcp_find: key too low"); 572 result = rbtree_next(result); 573 } 574 575 verbose(VERB_CLIENT, "reuse_tcp_find check inexact match"); 576 /* inexact match, find one of possibly several connections to the 577 * same destination address, with the correct port, ssl, and 578 * also less than max number of open queries, or else, fail to open 579 * a new one */ 580 /* rewind to start of sequence of same address,port,ssl */ 581 prev = rbtree_previous(result); 582 while(prev && prev != RBTREE_NULL && 583 reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) { 584 result = prev; 585 prev = rbtree_previous(result); 586 } 587 588 /* loop to find first one that has correct characteristics */ 589 while(result && result != RBTREE_NULL && 590 reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) { 591 if(((struct reuse_tcp*)result)->tree_by_id.count < 592 outnet->max_reuse_tcp_queries) { 593 /* same address, port, ssl-yes-or-no, and has 594 * space for another query */ 595 return (struct reuse_tcp*)result; 596 } 597 result = rbtree_next(result); 598 } 599 return NULL; 600 } 601 602 /** use the buffer to setup writing the query */ 603 static void 604 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend, 605 struct waiting_tcp* w) 606 { 607 struct timeval tv; 608 verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write " 609 "len %d timeout %d msec", 610 (int)w->pkt_len, w->timeout); 611 pend->c->tcp_write_pkt = w->pkt; 612 pend->c->tcp_write_pkt_len = w->pkt_len; 613 pend->c->tcp_write_and_read = 1; 614 pend->c->tcp_write_byte_count = 0; 615 pend->c->tcp_is_reading = 0; 616 comm_point_start_listening(pend->c, s, -1); 617 /* set timer on the waiting_tcp entry, this is the write timeout 618 * for the written packet. The timer on pend->c is the timer 619 * for when there is no written packet and we have readtimeouts */ 620 #ifndef S_SPLINT_S 621 tv.tv_sec = w->timeout/1000; 622 tv.tv_usec = (w->timeout%1000)*1000; 623 #endif 624 /* if the waiting_tcp was previously waiting for a buffer in the 625 * outside_network.tcpwaitlist, then the timer is reset now that 626 * we start writing it */ 627 comm_timer_set(w->timer, &tv); 628 } 629 630 /** use next free buffer to service a tcp query */ 631 static int 632 outnet_tcp_take_into_use(struct waiting_tcp* w) 633 { 634 struct pending_tcp* pend = w->outnet->tcp_free; 635 int s; 636 log_assert(pend); 637 log_assert(w->pkt); 638 log_assert(w->pkt_len > 0); 639 log_assert(w->addrlen > 0); 640 pend->c->tcp_do_toggle_rw = 0; 641 pend->c->tcp_do_close = 0; 642 643 /* Consistency check, if we have ssl_upstream but no sslctx, then 644 * log an error and return failure. 645 */ 646 if (w->ssl_upstream && !w->outnet->sslctx) { 647 log_err("SSL upstream requested but no SSL context"); 648 return 0; 649 } 650 651 /* open socket */ 652 s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp); 653 654 if(s == -1) 655 return 0; 656 657 if(!pick_outgoing_tcp(pend, w, s)) 658 return 0; 659 660 fd_set_nonblock(s); 661 #ifdef USE_OSX_MSG_FASTOPEN 662 /* API for fast open is different here. We use a connectx() function and 663 then writes can happen as normal even using SSL.*/ 664 /* connectx requires that the len be set in the sockaddr struct*/ 665 struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr; 666 addr_in->sin_len = w->addrlen; 667 sa_endpoints_t endpoints; 668 endpoints.sae_srcif = 0; 669 endpoints.sae_srcaddr = NULL; 670 endpoints.sae_srcaddrlen = 0; 671 endpoints.sae_dstaddr = (struct sockaddr *)&w->addr; 672 endpoints.sae_dstaddrlen = w->addrlen; 673 if (connectx(s, &endpoints, SAE_ASSOCID_ANY, 674 CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE, 675 NULL, 0, NULL, NULL) == -1) { 676 /* if fails, failover to connect for OSX 10.10 */ 677 #ifdef EINPROGRESS 678 if(errno != EINPROGRESS) { 679 #else 680 if(1) { 681 #endif 682 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 683 #else /* USE_OSX_MSG_FASTOPEN*/ 684 #ifdef USE_MSG_FASTOPEN 685 pend->c->tcp_do_fastopen = 1; 686 /* Only do TFO for TCP in which case no connect() is required here. 687 Don't combine client TFO with SSL, since OpenSSL can't 688 currently support doing a handshake on fd that already isn't connected*/ 689 if (w->outnet->sslctx && w->ssl_upstream) { 690 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 691 #else /* USE_MSG_FASTOPEN*/ 692 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 693 #endif /* USE_MSG_FASTOPEN*/ 694 #endif /* USE_OSX_MSG_FASTOPEN*/ 695 #ifndef USE_WINSOCK 696 #ifdef EINPROGRESS 697 if(errno != EINPROGRESS) { 698 #else 699 if(1) { 700 #endif 701 if(tcp_connect_errno_needs_log( 702 (struct sockaddr*)&w->addr, w->addrlen)) 703 log_err_addr("outgoing tcp: connect", 704 strerror(errno), &w->addr, w->addrlen); 705 close(s); 706 #else /* USE_WINSOCK */ 707 if(WSAGetLastError() != WSAEINPROGRESS && 708 WSAGetLastError() != WSAEWOULDBLOCK) { 709 closesocket(s); 710 #endif 711 return 0; 712 } 713 } 714 #ifdef USE_MSG_FASTOPEN 715 } 716 #endif /* USE_MSG_FASTOPEN */ 717 #ifdef USE_OSX_MSG_FASTOPEN 718 } 719 } 720 #endif /* USE_OSX_MSG_FASTOPEN */ 721 if(w->outnet->sslctx && w->ssl_upstream) { 722 pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s); 723 if(!pend->c->ssl) { 724 pend->c->fd = s; 725 comm_point_close(pend->c); 726 return 0; 727 } 728 verbose(VERB_ALGO, "the query is using TLS encryption, for %s", 729 (w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection")); 730 #ifdef USE_WINSOCK 731 comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl); 732 #endif 733 pend->c->ssl_shake_state = comm_ssl_shake_write; 734 if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name, 735 w->outnet->tls_use_sni)) { 736 pend->c->fd = s; 737 #ifdef HAVE_SSL 738 SSL_free(pend->c->ssl); 739 #endif 740 pend->c->ssl = NULL; 741 comm_point_close(pend->c); 742 return 0; 743 } 744 } 745 w->next_waiting = (void*)pend; 746 w->outnet->num_tcp_outgoing++; 747 w->outnet->tcp_free = pend->next_free; 748 pend->next_free = NULL; 749 pend->query = w; 750 pend->reuse.outnet = w->outnet; 751 pend->c->repinfo.remote_addrlen = w->addrlen; 752 pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again; 753 pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again; 754 pend->reuse.cp_more_read_again = 0; 755 pend->reuse.cp_more_write_again = 0; 756 memcpy(&pend->c->repinfo.remote_addr, &w->addr, w->addrlen); 757 pend->reuse.pending = pend; 758 759 /* Remove from tree in case the is_ssl will be different and causes the 760 * identity of the reuse_tcp to change; could result in nodes not being 761 * deleted from the tree (because the new identity does not match the 762 * previous node) but their ->key would be changed to NULL. */ 763 if(pend->reuse.node.key) 764 reuse_tcp_remove_tree_list(w->outnet, &pend->reuse); 765 766 if(pend->c->ssl) 767 pend->reuse.is_ssl = 1; 768 else pend->reuse.is_ssl = 0; 769 /* insert in reuse by address tree if not already inserted there */ 770 (void)reuse_tcp_insert(w->outnet, pend); 771 reuse_tree_by_id_insert(&pend->reuse, w); 772 outnet_tcp_take_query_setup(s, pend, w); 773 return 1; 774 } 775 776 /** Touch the lru of a reuse_tcp element, it is in use. 777 * This moves it to the front of the list, where it is not likely to 778 * be closed. Items at the back of the list are closed to make space. */ 779 void 780 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse) 781 { 782 if(!reuse->item_on_lru_list) { 783 log_err("internal error: we need to touch the lru_list but item not in list"); 784 return; /* not on the list, no lru to modify */ 785 } 786 log_assert(reuse->lru_prev || 787 (!reuse->lru_prev && outnet->tcp_reuse_first == reuse)); 788 if(!reuse->lru_prev) 789 return; /* already first in the list */ 790 /* remove at current position */ 791 /* since it is not first, there is a previous element */ 792 reuse->lru_prev->lru_next = reuse->lru_next; 793 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev); 794 if(reuse->lru_next) 795 reuse->lru_next->lru_prev = reuse->lru_prev; 796 else outnet->tcp_reuse_last = reuse->lru_prev; 797 log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next); 798 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next && 799 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev); 800 /* insert at the front */ 801 reuse->lru_prev = NULL; 802 reuse->lru_next = outnet->tcp_reuse_first; 803 if(outnet->tcp_reuse_first) { 804 outnet->tcp_reuse_first->lru_prev = reuse; 805 } 806 log_assert(reuse->lru_next != reuse); 807 /* since it is not first, it is not the only element and 808 * lru_next is thus not NULL and thus reuse is now not the last in 809 * the list, so outnet->tcp_reuse_last does not need to be modified */ 810 outnet->tcp_reuse_first = reuse; 811 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next && 812 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev); 813 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 814 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 815 } 816 817 /** Snip the last reuse_tcp element off of the LRU list */ 818 struct reuse_tcp* 819 reuse_tcp_lru_snip(struct outside_network* outnet) 820 { 821 struct reuse_tcp* reuse = outnet->tcp_reuse_last; 822 if(!reuse) return NULL; 823 /* snip off of LRU */ 824 log_assert(reuse->lru_next == NULL); 825 if(reuse->lru_prev) { 826 outnet->tcp_reuse_last = reuse->lru_prev; 827 reuse->lru_prev->lru_next = NULL; 828 } else { 829 outnet->tcp_reuse_last = NULL; 830 outnet->tcp_reuse_first = NULL; 831 } 832 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 833 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 834 reuse->item_on_lru_list = 0; 835 reuse->lru_next = NULL; 836 reuse->lru_prev = NULL; 837 return reuse; 838 } 839 840 /** remove waiting tcp from the outnet waiting list */ 841 void 842 outnet_waiting_tcp_list_remove(struct outside_network* outnet, struct waiting_tcp* w) 843 { 844 struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL; 845 w->on_tcp_waiting_list = 0; 846 while(p) { 847 if(p == w) { 848 /* remove w */ 849 if(prev) 850 prev->next_waiting = w->next_waiting; 851 else outnet->tcp_wait_first = w->next_waiting; 852 if(outnet->tcp_wait_last == w) 853 outnet->tcp_wait_last = prev; 854 w->next_waiting = NULL; 855 return; 856 } 857 prev = p; 858 p = p->next_waiting; 859 } 860 /* outnet_waiting_tcp_list_remove is currently called only with items 861 * that are already in the waiting list. */ 862 log_assert(0); 863 } 864 865 /** pop the first waiting tcp from the outnet waiting list */ 866 struct waiting_tcp* 867 outnet_waiting_tcp_list_pop(struct outside_network* outnet) 868 { 869 struct waiting_tcp* w = outnet->tcp_wait_first; 870 if(!outnet->tcp_wait_first) return NULL; 871 log_assert(w->on_tcp_waiting_list); 872 outnet->tcp_wait_first = w->next_waiting; 873 if(outnet->tcp_wait_last == w) 874 outnet->tcp_wait_last = NULL; 875 w->on_tcp_waiting_list = 0; 876 w->next_waiting = NULL; 877 return w; 878 } 879 880 /** add waiting_tcp element to the outnet tcp waiting list */ 881 void 882 outnet_waiting_tcp_list_add(struct outside_network* outnet, 883 struct waiting_tcp* w, int set_timer) 884 { 885 struct timeval tv; 886 log_assert(!w->on_tcp_waiting_list); 887 if(w->on_tcp_waiting_list) 888 return; 889 w->next_waiting = NULL; 890 if(outnet->tcp_wait_last) 891 outnet->tcp_wait_last->next_waiting = w; 892 else outnet->tcp_wait_first = w; 893 outnet->tcp_wait_last = w; 894 w->on_tcp_waiting_list = 1; 895 if(set_timer) { 896 #ifndef S_SPLINT_S 897 tv.tv_sec = w->timeout/1000; 898 tv.tv_usec = (w->timeout%1000)*1000; 899 #endif 900 comm_timer_set(w->timer, &tv); 901 } 902 } 903 904 /** add waiting_tcp element as first to the outnet tcp waiting list */ 905 void 906 outnet_waiting_tcp_list_add_first(struct outside_network* outnet, 907 struct waiting_tcp* w, int reset_timer) 908 { 909 struct timeval tv; 910 log_assert(!w->on_tcp_waiting_list); 911 if(w->on_tcp_waiting_list) 912 return; 913 w->next_waiting = outnet->tcp_wait_first; 914 log_assert(w->next_waiting != w); 915 if(!outnet->tcp_wait_last) 916 outnet->tcp_wait_last = w; 917 outnet->tcp_wait_first = w; 918 w->on_tcp_waiting_list = 1; 919 if(reset_timer) { 920 #ifndef S_SPLINT_S 921 tv.tv_sec = w->timeout/1000; 922 tv.tv_usec = (w->timeout%1000)*1000; 923 #endif 924 comm_timer_set(w->timer, &tv); 925 } 926 log_assert( 927 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 928 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 929 } 930 931 /** call callback on waiting_tcp, if not NULL */ 932 static void 933 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error, 934 struct comm_reply* reply_info) 935 { 936 if(w && w->cb) { 937 fptr_ok(fptr_whitelist_pending_tcp(w->cb)); 938 (void)(*w->cb)(c, w->cb_arg, error, reply_info); 939 } 940 } 941 942 /** see if buffers can be used to service TCP queries */ 943 static void 944 use_free_buffer(struct outside_network* outnet) 945 { 946 struct waiting_tcp* w; 947 while(outnet->tcp_wait_first && !outnet->want_to_quit) { 948 #ifdef USE_DNSTAP 949 struct pending_tcp* pend_tcp = NULL; 950 #endif 951 struct reuse_tcp* reuse = NULL; 952 w = outnet_waiting_tcp_list_pop(outnet); 953 log_assert( 954 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 955 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 956 reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen, 957 w->ssl_upstream); 958 /* re-select an ID when moving to a new TCP buffer */ 959 w->id = tcp_select_id(outnet, reuse); 960 LDNS_ID_SET(w->pkt, w->id); 961 if(reuse) { 962 log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: " 963 "found reuse", reuse); 964 #ifdef USE_DNSTAP 965 pend_tcp = reuse->pending; 966 #endif 967 reuse_tcp_lru_touch(outnet, reuse); 968 comm_timer_disable(w->timer); 969 w->next_waiting = (void*)reuse->pending; 970 reuse_tree_by_id_insert(reuse, w); 971 if(reuse->pending->query) { 972 /* on the write wait list */ 973 reuse_write_wait_push_back(reuse, w); 974 } else { 975 /* write straight away */ 976 /* stop the timer on read of the fd */ 977 comm_point_stop_listening(reuse->pending->c); 978 reuse->pending->query = w; 979 outnet_tcp_take_query_setup( 980 reuse->pending->c->fd, reuse->pending, 981 w); 982 } 983 } else if(outnet->tcp_free) { 984 struct pending_tcp* pend = w->outnet->tcp_free; 985 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 986 pend->reuse.pending = pend; 987 memcpy(&pend->reuse.addr, &w->addr, w->addrlen); 988 pend->reuse.addrlen = w->addrlen; 989 if(!outnet_tcp_take_into_use(w)) { 990 waiting_tcp_callback(w, NULL, NETEVENT_CLOSED, 991 NULL); 992 waiting_tcp_delete(w); 993 #ifdef USE_DNSTAP 994 w = NULL; 995 #endif 996 } 997 #ifdef USE_DNSTAP 998 pend_tcp = pend; 999 #endif 1000 } else { 1001 /* no reuse and no free buffer, put back at the start */ 1002 outnet_waiting_tcp_list_add_first(outnet, w, 0); 1003 break; 1004 } 1005 #ifdef USE_DNSTAP 1006 if(outnet->dtenv && pend_tcp && w && w->sq && 1007 (outnet->dtenv->log_resolver_query_messages || 1008 outnet->dtenv->log_forwarder_query_messages)) { 1009 sldns_buffer tmp; 1010 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len); 1011 dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr, 1012 &pend_tcp->pi->addr, comm_tcp, NULL, w->sq->zone, 1013 w->sq->zonelen, &tmp); 1014 } 1015 #endif 1016 } 1017 } 1018 1019 /** delete element from tree by id */ 1020 static void 1021 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w) 1022 { 1023 #ifdef UNBOUND_DEBUG 1024 rbnode_type* rem; 1025 #endif 1026 log_assert(w->id_node.key != NULL); 1027 #ifdef UNBOUND_DEBUG 1028 rem = 1029 #else 1030 (void) 1031 #endif 1032 rbtree_delete(&reuse->tree_by_id, w); 1033 log_assert(rem); /* should have been there */ 1034 w->id_node.key = NULL; 1035 } 1036 1037 /** move writewait list to go for another connection. */ 1038 static void 1039 reuse_move_writewait_away(struct outside_network* outnet, 1040 struct pending_tcp* pend) 1041 { 1042 /* the writewait list has not been written yet, so if the 1043 * stream was closed, they have not actually been failed, only 1044 * the queries written. Other queries can get written to another 1045 * stream. For upstreams that do not support multiple queries 1046 * and answers, the stream can get closed, and then the queries 1047 * can get written on a new socket */ 1048 struct waiting_tcp* w; 1049 if(pend->query && pend->query->error_count == 0 && 1050 pend->c->tcp_write_pkt == pend->query->pkt && 1051 pend->c->tcp_write_pkt_len == pend->query->pkt_len) { 1052 /* since the current query is not written, it can also 1053 * move to a free buffer */ 1054 if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 && 1055 LDNS_QDCOUNT(pend->query->pkt) > 0 && 1056 dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) { 1057 char buf[LDNS_MAX_DOMAINLEN+1]; 1058 dname_str(pend->query->pkt+12, buf); 1059 verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written", 1060 buf, (int)pend->c->tcp_write_byte_count); 1061 } 1062 pend->c->tcp_write_pkt = NULL; 1063 pend->c->tcp_write_pkt_len = 0; 1064 pend->c->tcp_write_and_read = 0; 1065 pend->reuse.cp_more_read_again = 0; 1066 pend->reuse.cp_more_write_again = 0; 1067 pend->c->tcp_is_reading = 1; 1068 w = pend->query; 1069 pend->query = NULL; 1070 /* increase error count, so that if the next socket fails too 1071 * the server selection is run again with this query failed 1072 * and it can select a different server (if possible), or 1073 * fail the query */ 1074 w->error_count ++; 1075 reuse_tree_by_id_delete(&pend->reuse, w); 1076 outnet_waiting_tcp_list_add(outnet, w, 1); 1077 } 1078 while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) { 1079 if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 && 1080 LDNS_QDCOUNT(w->pkt) > 0 && 1081 dname_valid(w->pkt+12, w->pkt_len-12)) { 1082 char buf[LDNS_MAX_DOMAINLEN+1]; 1083 dname_str(w->pkt+12, buf); 1084 verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf); 1085 } 1086 reuse_tree_by_id_delete(&pend->reuse, w); 1087 outnet_waiting_tcp_list_add(outnet, w, 1); 1088 } 1089 } 1090 1091 /** remove reused element from tree and lru list */ 1092 void 1093 reuse_tcp_remove_tree_list(struct outside_network* outnet, 1094 struct reuse_tcp* reuse) 1095 { 1096 verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list"); 1097 if(reuse->node.key) { 1098 /* delete it from reuse tree */ 1099 if(!rbtree_delete(&outnet->tcp_reuse, reuse)) { 1100 /* should not be possible, it should be there */ 1101 char buf[256]; 1102 addr_to_str(&reuse->addr, reuse->addrlen, buf, 1103 sizeof(buf)); 1104 log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list); 1105 } 1106 reuse->node.key = NULL; 1107 /* defend against loops on broken tree by zeroing the 1108 * rbnode structure */ 1109 memset(&reuse->node, 0, sizeof(reuse->node)); 1110 } 1111 /* delete from reuse list */ 1112 if(reuse->item_on_lru_list) { 1113 if(reuse->lru_prev) { 1114 /* assert that members of the lru list are waiting 1115 * and thus have a pending pointer to the struct */ 1116 log_assert(reuse->lru_prev->pending); 1117 reuse->lru_prev->lru_next = reuse->lru_next; 1118 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev); 1119 } else { 1120 log_assert(!reuse->lru_next || reuse->lru_next->pending); 1121 outnet->tcp_reuse_first = reuse->lru_next; 1122 log_assert(!outnet->tcp_reuse_first || 1123 (outnet->tcp_reuse_first != 1124 outnet->tcp_reuse_first->lru_next && 1125 outnet->tcp_reuse_first != 1126 outnet->tcp_reuse_first->lru_prev)); 1127 } 1128 if(reuse->lru_next) { 1129 /* assert that members of the lru list are waiting 1130 * and thus have a pending pointer to the struct */ 1131 log_assert(reuse->lru_next->pending); 1132 reuse->lru_next->lru_prev = reuse->lru_prev; 1133 log_assert(reuse->lru_next->lru_prev != reuse->lru_next); 1134 } else { 1135 log_assert(!reuse->lru_prev || reuse->lru_prev->pending); 1136 outnet->tcp_reuse_last = reuse->lru_prev; 1137 log_assert(!outnet->tcp_reuse_last || 1138 (outnet->tcp_reuse_last != 1139 outnet->tcp_reuse_last->lru_next && 1140 outnet->tcp_reuse_last != 1141 outnet->tcp_reuse_last->lru_prev)); 1142 } 1143 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 1144 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 1145 reuse->item_on_lru_list = 0; 1146 reuse->lru_next = NULL; 1147 reuse->lru_prev = NULL; 1148 } 1149 reuse->pending = NULL; 1150 } 1151 1152 /** helper function that deletes an element from the tree of readwait 1153 * elements in tcp reuse structure */ 1154 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg)) 1155 { 1156 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 1157 waiting_tcp_delete(w); 1158 } 1159 1160 /** delete readwait waiting_tcp elements, deletes the elements in the list */ 1161 void reuse_del_readwait(rbtree_type* tree_by_id) 1162 { 1163 if(tree_by_id->root == NULL || 1164 tree_by_id->root == RBTREE_NULL) 1165 return; 1166 traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL); 1167 rbtree_init(tree_by_id, reuse_id_cmp); 1168 } 1169 1170 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */ 1171 static void 1172 decommission_pending_tcp(struct outside_network* outnet, 1173 struct pending_tcp* pend) 1174 { 1175 verbose(VERB_CLIENT, "decommission_pending_tcp"); 1176 /* A certain code path can lead here twice for the same pending_tcp 1177 * creating a loop in the free pending_tcp list. */ 1178 if(outnet->tcp_free != pend) { 1179 pend->next_free = outnet->tcp_free; 1180 outnet->tcp_free = pend; 1181 } 1182 if(pend->reuse.node.key) { 1183 /* needs unlink from the reuse tree to get deleted */ 1184 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 1185 } 1186 /* free SSL structure after remove from outnet tcp reuse tree, 1187 * because the c->ssl null or not is used for sorting in the tree */ 1188 if(pend->c->ssl) { 1189 #ifdef HAVE_SSL 1190 SSL_shutdown(pend->c->ssl); 1191 SSL_free(pend->c->ssl); 1192 pend->c->ssl = NULL; 1193 #endif 1194 } 1195 comm_point_close(pend->c); 1196 pend->reuse.cp_more_read_again = 0; 1197 pend->reuse.cp_more_write_again = 0; 1198 /* unlink the query and writewait list, it is part of the tree 1199 * nodes and is deleted */ 1200 pend->query = NULL; 1201 pend->reuse.write_wait_first = NULL; 1202 pend->reuse.write_wait_last = NULL; 1203 reuse_del_readwait(&pend->reuse.tree_by_id); 1204 } 1205 1206 /** perform failure callbacks for waiting queries in reuse read rbtree */ 1207 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err) 1208 { 1209 rbnode_type* node; 1210 if(tree_by_id->root == NULL || 1211 tree_by_id->root == RBTREE_NULL) 1212 return; 1213 node = rbtree_first(tree_by_id); 1214 while(node && node != RBTREE_NULL) { 1215 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 1216 waiting_tcp_callback(w, NULL, err, NULL); 1217 node = rbtree_next(node); 1218 } 1219 } 1220 1221 /** mark the entry for being in the cb_and_decommission stage */ 1222 static void mark_for_cb_and_decommission(rbnode_type* node, 1223 void* ATTR_UNUSED(arg)) 1224 { 1225 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 1226 /* Mark the waiting_tcp to signal later code (serviced_delete) that 1227 * this item is part of the backed up tree_by_id and will be deleted 1228 * later. */ 1229 w->in_cb_and_decommission = 1; 1230 /* Mark the serviced_query for deletion so that later code through 1231 * callbacks (iter_clear .. outnet_serviced_query_stop) won't 1232 * prematurely delete it. */ 1233 if(w->cb) 1234 ((struct serviced_query*)w->cb_arg)->to_be_deleted = 1; 1235 } 1236 1237 /** perform callbacks for failure and also decommission pending tcp. 1238 * the callbacks remove references in sq->pending to the waiting_tcp 1239 * members of the tree_by_id in the pending tcp. The pending_tcp is 1240 * removed before the callbacks, so that the callbacks do not modify 1241 * the pending_tcp due to its reference in the outside_network reuse tree */ 1242 static void reuse_cb_and_decommission(struct outside_network* outnet, 1243 struct pending_tcp* pend, int error) 1244 { 1245 rbtree_type store; 1246 store = pend->reuse.tree_by_id; 1247 pend->query = NULL; 1248 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 1249 pend->reuse.write_wait_first = NULL; 1250 pend->reuse.write_wait_last = NULL; 1251 decommission_pending_tcp(outnet, pend); 1252 if(store.root != NULL && store.root != RBTREE_NULL) { 1253 traverse_postorder(&store, &mark_for_cb_and_decommission, NULL); 1254 } 1255 reuse_cb_readwait_for_failure(&store, error); 1256 reuse_del_readwait(&store); 1257 } 1258 1259 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 1260 static void 1261 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout) 1262 { 1263 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse); 1264 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout); 1265 } 1266 1267 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 1268 static void 1269 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout) 1270 { 1271 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse); 1272 sldns_buffer_clear(pend_tcp->c->buffer); 1273 pend_tcp->c->tcp_is_reading = 1; 1274 pend_tcp->c->tcp_byte_count = 0; 1275 comm_point_stop_listening(pend_tcp->c); 1276 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout); 1277 } 1278 1279 int 1280 outnet_tcp_cb(struct comm_point* c, void* arg, int error, 1281 struct comm_reply *reply_info) 1282 { 1283 struct pending_tcp* pend = (struct pending_tcp*)arg; 1284 struct outside_network* outnet = pend->reuse.outnet; 1285 struct waiting_tcp* w = NULL; 1286 log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key); 1287 verbose(VERB_ALGO, "outnettcp cb"); 1288 if(error == NETEVENT_TIMEOUT) { 1289 if(pend->c->tcp_write_and_read) { 1290 verbose(VERB_QUERY, "outnettcp got tcp timeout " 1291 "for read, ignored because write underway"); 1292 /* if we are writing, ignore readtimer, wait for write timer 1293 * or write is done */ 1294 return 0; 1295 } else { 1296 verbose(VERB_QUERY, "outnettcp got tcp timeout %s", 1297 (pend->reuse.tree_by_id.count?"for reading pkt": 1298 "for keepalive for reuse")); 1299 } 1300 /* must be timeout for reading or keepalive reuse, 1301 * close it. */ 1302 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 1303 } else if(error == NETEVENT_PKT_WRITTEN) { 1304 /* the packet we want to write has been written. */ 1305 verbose(VERB_ALGO, "outnet tcp pkt was written event"); 1306 log_assert(c == pend->c); 1307 log_assert(pend->query->pkt == pend->c->tcp_write_pkt); 1308 log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len); 1309 pend->c->tcp_write_pkt = NULL; 1310 pend->c->tcp_write_pkt_len = 0; 1311 /* the pend.query is already in tree_by_id */ 1312 log_assert(pend->query->id_node.key); 1313 pend->query = NULL; 1314 /* setup to write next packet or setup read timeout */ 1315 if(pend->reuse.write_wait_first) { 1316 verbose(VERB_ALGO, "outnet tcp setup next pkt"); 1317 /* we can write it straight away perhaps, set flag 1318 * because this callback called after a tcp write 1319 * succeeded and likely more buffer space is available 1320 * and we can write some more. */ 1321 pend->reuse.cp_more_write_again = 1; 1322 pend->query = reuse_write_wait_pop(&pend->reuse); 1323 comm_point_stop_listening(pend->c); 1324 outnet_tcp_take_query_setup(pend->c->fd, pend, 1325 pend->query); 1326 } else { 1327 verbose(VERB_ALGO, "outnet tcp writes done, wait"); 1328 pend->c->tcp_write_and_read = 0; 1329 pend->reuse.cp_more_read_again = 0; 1330 pend->reuse.cp_more_write_again = 0; 1331 pend->c->tcp_is_reading = 1; 1332 comm_point_stop_listening(pend->c); 1333 reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout); 1334 } 1335 return 0; 1336 } else if(error != NETEVENT_NOERROR) { 1337 verbose(VERB_QUERY, "outnettcp got tcp error %d", error); 1338 reuse_move_writewait_away(outnet, pend); 1339 /* pass error below and exit */ 1340 } else { 1341 /* check ID */ 1342 if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) { 1343 log_addr(VERB_QUERY, 1344 "outnettcp: bad ID in reply, too short, from:", 1345 &pend->reuse.addr, pend->reuse.addrlen); 1346 error = NETEVENT_CLOSED; 1347 } else { 1348 uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin( 1349 c->buffer)); 1350 /* find the query the reply is for */ 1351 w = reuse_tcp_by_id_find(&pend->reuse, id); 1352 /* Make sure that the reply we got is at least for a 1353 * sent query with the same ID; the waiting_tcp that 1354 * gets a reply is assumed to not be waiting to be 1355 * sent. */ 1356 if(w && (w->on_tcp_waiting_list || w->write_wait_queued)) 1357 w = NULL; 1358 } 1359 } 1360 if(error == NETEVENT_NOERROR && !w) { 1361 /* no struct waiting found in tree, no reply to call */ 1362 log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:", 1363 &pend->reuse.addr, pend->reuse.addrlen); 1364 error = NETEVENT_CLOSED; 1365 } 1366 if(error == NETEVENT_NOERROR) { 1367 /* add to reuse tree so it can be reused, if not a failure. 1368 * This is possible if the state machine wants to make a tcp 1369 * query again to the same destination. */ 1370 if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) { 1371 (void)reuse_tcp_insert(outnet, pend); 1372 } 1373 } 1374 if(w) { 1375 log_assert(!w->on_tcp_waiting_list); 1376 log_assert(!w->write_wait_queued); 1377 reuse_tree_by_id_delete(&pend->reuse, w); 1378 verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d", 1379 error, (int)sldns_buffer_limit(c->buffer)); 1380 waiting_tcp_callback(w, c, error, reply_info); 1381 waiting_tcp_delete(w); 1382 } 1383 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb"); 1384 if(error == NETEVENT_NOERROR && pend->reuse.node.key) { 1385 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it"); 1386 /* it is in the reuse_tcp tree, with other queries, or 1387 * on the empty list. do not decommission it */ 1388 /* if there are more outstanding queries, we could try to 1389 * read again, to see if it is on the input, 1390 * because this callback called after a successful read 1391 * and there could be more bytes to read on the input */ 1392 if(pend->reuse.tree_by_id.count != 0) 1393 pend->reuse.cp_more_read_again = 1; 1394 reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout); 1395 return 0; 1396 } 1397 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it"); 1398 /* no queries on it, no space to keep it. or timeout or closed due 1399 * to error. Close it */ 1400 reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT? 1401 NETEVENT_TIMEOUT:NETEVENT_CLOSED)); 1402 use_free_buffer(outnet); 1403 return 0; 1404 } 1405 1406 /** lower use count on pc, see if it can be closed */ 1407 static void 1408 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc) 1409 { 1410 struct port_if* pif; 1411 pc->num_outstanding--; 1412 if(pc->num_outstanding > 0) { 1413 return; 1414 } 1415 /* close it and replace in unused list */ 1416 verbose(VERB_ALGO, "close of port %d", pc->number); 1417 comm_point_close(pc->cp); 1418 pif = pc->pif; 1419 log_assert(pif->inuse > 0); 1420 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1421 pif->avail_ports[pif->avail_total - pif->inuse] = pc->number; 1422 #endif 1423 pif->inuse--; 1424 pif->out[pc->index] = pif->out[pif->inuse]; 1425 pif->out[pc->index]->index = pc->index; 1426 pc->next = outnet->unused_fds; 1427 outnet->unused_fds = pc; 1428 } 1429 1430 /** try to send waiting UDP queries */ 1431 static void 1432 outnet_send_wait_udp(struct outside_network* outnet) 1433 { 1434 struct pending* pend; 1435 /* process waiting queries */ 1436 while(outnet->udp_wait_first && outnet->unused_fds 1437 && !outnet->want_to_quit) { 1438 pend = outnet->udp_wait_first; 1439 outnet->udp_wait_first = pend->next_waiting; 1440 if(!pend->next_waiting) outnet->udp_wait_last = NULL; 1441 sldns_buffer_clear(outnet->udp_buff); 1442 sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len); 1443 sldns_buffer_flip(outnet->udp_buff); 1444 free(pend->pkt); /* freeing now makes get_mem correct */ 1445 pend->pkt = NULL; 1446 pend->pkt_len = 0; 1447 log_assert(!pend->sq->busy); 1448 pend->sq->busy = 1; 1449 if(!randomize_and_send_udp(pend, outnet->udp_buff, 1450 pend->timeout)) { 1451 /* callback error on pending */ 1452 if(pend->cb) { 1453 fptr_ok(fptr_whitelist_pending_udp(pend->cb)); 1454 (void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg, 1455 NETEVENT_CLOSED, NULL); 1456 } 1457 pending_delete(outnet, pend); 1458 } else { 1459 pend->sq->busy = 0; 1460 } 1461 } 1462 } 1463 1464 int 1465 outnet_udp_cb(struct comm_point* c, void* arg, int error, 1466 struct comm_reply *reply_info) 1467 { 1468 struct outside_network* outnet = (struct outside_network*)arg; 1469 struct pending key; 1470 struct pending* p; 1471 verbose(VERB_ALGO, "answer cb"); 1472 1473 if(error != NETEVENT_NOERROR) { 1474 verbose(VERB_QUERY, "outnetudp got udp error %d", error); 1475 return 0; 1476 } 1477 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) { 1478 verbose(VERB_QUERY, "outnetudp udp too short"); 1479 return 0; 1480 } 1481 log_assert(reply_info); 1482 1483 /* setup lookup key */ 1484 key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer)); 1485 memcpy(&key.addr, &reply_info->remote_addr, reply_info->remote_addrlen); 1486 key.addrlen = reply_info->remote_addrlen; 1487 verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id); 1488 log_addr(VERB_ALGO, "Incoming reply addr =", 1489 &reply_info->remote_addr, reply_info->remote_addrlen); 1490 1491 /* find it, see if this thing is a valid query response */ 1492 verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count); 1493 p = (struct pending*)rbtree_search(outnet->pending, &key); 1494 if(!p) { 1495 verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped."); 1496 log_buf(VERB_ALGO, "dropped message", c->buffer); 1497 outnet->unwanted_replies++; 1498 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1499 >= outnet->unwanted_threshold) { 1500 log_warn("unwanted reply total reached threshold (%u)" 1501 " you may be under attack." 1502 " defensive action: clearing the cache", 1503 (unsigned)outnet->unwanted_threshold); 1504 fptr_ok(fptr_whitelist_alloc_cleanup( 1505 outnet->unwanted_action)); 1506 (*outnet->unwanted_action)(outnet->unwanted_param); 1507 outnet->unwanted_total = 0; 1508 } 1509 return 0; 1510 } 1511 1512 verbose(VERB_ALGO, "received udp reply."); 1513 log_buf(VERB_ALGO, "udp message", c->buffer); 1514 if(p->pc->cp != c) { 1515 verbose(VERB_QUERY, "received reply id,addr on wrong port. " 1516 "dropped."); 1517 outnet->unwanted_replies++; 1518 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1519 >= outnet->unwanted_threshold) { 1520 log_warn("unwanted reply total reached threshold (%u)" 1521 " you may be under attack." 1522 " defensive action: clearing the cache", 1523 (unsigned)outnet->unwanted_threshold); 1524 fptr_ok(fptr_whitelist_alloc_cleanup( 1525 outnet->unwanted_action)); 1526 (*outnet->unwanted_action)(outnet->unwanted_param); 1527 outnet->unwanted_total = 0; 1528 } 1529 return 0; 1530 } 1531 comm_timer_disable(p->timer); 1532 verbose(VERB_ALGO, "outnet handle udp reply"); 1533 /* delete from tree first in case callback creates a retry */ 1534 (void)rbtree_delete(outnet->pending, p->node.key); 1535 if(p->cb) { 1536 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1537 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info); 1538 } 1539 portcomm_loweruse(outnet, p->pc); 1540 pending_delete(NULL, p); 1541 outnet_send_wait_udp(outnet); 1542 return 0; 1543 } 1544 1545 /** calculate number of ip4 and ip6 interfaces*/ 1546 static void 1547 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6, 1548 int* num_ip4, int* num_ip6) 1549 { 1550 int i; 1551 *num_ip4 = 0; 1552 *num_ip6 = 0; 1553 if(num_ifs <= 0) { 1554 if(do_ip4) 1555 *num_ip4 = 1; 1556 if(do_ip6) 1557 *num_ip6 = 1; 1558 return; 1559 } 1560 for(i=0; i<num_ifs; i++) 1561 { 1562 if(str_is_ip6(ifs[i])) { 1563 if(do_ip6) 1564 (*num_ip6)++; 1565 } else { 1566 if(do_ip4) 1567 (*num_ip4)++; 1568 } 1569 } 1570 } 1571 1572 void 1573 pending_udp_timer_delay_cb(void* arg) 1574 { 1575 struct pending* p = (struct pending*)arg; 1576 struct outside_network* outnet = p->outnet; 1577 verbose(VERB_ALGO, "timeout udp with delay"); 1578 portcomm_loweruse(outnet, p->pc); 1579 pending_delete(outnet, p); 1580 outnet_send_wait_udp(outnet); 1581 } 1582 1583 void 1584 pending_udp_timer_cb(void *arg) 1585 { 1586 struct pending* p = (struct pending*)arg; 1587 struct outside_network* outnet = p->outnet; 1588 /* it timed out */ 1589 verbose(VERB_ALGO, "timeout udp"); 1590 if(p->cb) { 1591 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1592 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL); 1593 } 1594 /* if delayclose, keep port open for a longer time. 1595 * But if the udpwaitlist exists, then we are struggling to 1596 * keep up with demand for sockets, so do not wait, but service 1597 * the customer (customer service more important than portICMPs) */ 1598 if(outnet->delayclose && !outnet->udp_wait_first) { 1599 p->cb = NULL; 1600 p->timer->callback = &pending_udp_timer_delay_cb; 1601 comm_timer_set(p->timer, &outnet->delay_tv); 1602 return; 1603 } 1604 portcomm_loweruse(outnet, p->pc); 1605 pending_delete(outnet, p); 1606 outnet_send_wait_udp(outnet); 1607 } 1608 1609 /** create pending_tcp buffers */ 1610 static int 1611 create_pending_tcp(struct outside_network* outnet, size_t bufsize) 1612 { 1613 size_t i; 1614 if(outnet->num_tcp == 0) 1615 return 1; /* no tcp needed, nothing to do */ 1616 if(!(outnet->tcp_conns = (struct pending_tcp **)calloc( 1617 outnet->num_tcp, sizeof(struct pending_tcp*)))) 1618 return 0; 1619 for(i=0; i<outnet->num_tcp; i++) { 1620 if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1, 1621 sizeof(struct pending_tcp)))) 1622 return 0; 1623 outnet->tcp_conns[i]->next_free = outnet->tcp_free; 1624 outnet->tcp_free = outnet->tcp_conns[i]; 1625 outnet->tcp_conns[i]->c = comm_point_create_tcp_out( 1626 outnet->base, bufsize, outnet_tcp_cb, 1627 outnet->tcp_conns[i]); 1628 if(!outnet->tcp_conns[i]->c) 1629 return 0; 1630 } 1631 return 1; 1632 } 1633 1634 /** setup an outgoing interface, ready address */ 1635 static int setup_if(struct port_if* pif, const char* addrstr, 1636 int* avail, int numavail, size_t numfd) 1637 { 1638 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1639 pif->avail_total = numavail; 1640 pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int)); 1641 if(!pif->avail_ports) 1642 return 0; 1643 #endif 1644 if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) && 1645 !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT, 1646 &pif->addr, &pif->addrlen, &pif->pfxlen)) 1647 return 0; 1648 pif->maxout = (int)numfd; 1649 pif->inuse = 0; 1650 pif->out = (struct port_comm**)calloc(numfd, 1651 sizeof(struct port_comm*)); 1652 if(!pif->out) 1653 return 0; 1654 return 1; 1655 } 1656 1657 struct outside_network* 1658 outside_network_create(struct comm_base *base, size_t bufsize, 1659 size_t num_ports, char** ifs, int num_ifs, int do_ip4, 1660 int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra, 1661 struct ub_randstate* rnd, int use_caps_for_id, int* availports, 1662 int numavailports, size_t unwanted_threshold, int tcp_mss, 1663 void (*unwanted_action)(void*), void* unwanted_param, int do_udp, 1664 void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv, 1665 int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout, 1666 int tcp_auth_query_timeout) 1667 { 1668 struct outside_network* outnet = (struct outside_network*) 1669 calloc(1, sizeof(struct outside_network)); 1670 size_t k; 1671 if(!outnet) { 1672 log_err("malloc failed"); 1673 return NULL; 1674 } 1675 comm_base_timept(base, &outnet->now_secs, &outnet->now_tv); 1676 outnet->base = base; 1677 outnet->num_tcp = num_tcp; 1678 outnet->max_reuse_tcp_queries = max_reuse_tcp_queries; 1679 outnet->tcp_reuse_timeout= tcp_reuse_timeout; 1680 outnet->tcp_auth_query_timeout = tcp_auth_query_timeout; 1681 outnet->num_tcp_outgoing = 0; 1682 outnet->num_udp_outgoing = 0; 1683 outnet->infra = infra; 1684 outnet->rnd = rnd; 1685 outnet->sslctx = sslctx; 1686 outnet->tls_use_sni = tls_use_sni; 1687 #ifdef USE_DNSTAP 1688 outnet->dtenv = dtenv; 1689 #else 1690 (void)dtenv; 1691 #endif 1692 outnet->svcd_overhead = 0; 1693 outnet->want_to_quit = 0; 1694 outnet->unwanted_threshold = unwanted_threshold; 1695 outnet->unwanted_action = unwanted_action; 1696 outnet->unwanted_param = unwanted_param; 1697 outnet->use_caps_for_id = use_caps_for_id; 1698 outnet->do_udp = do_udp; 1699 outnet->tcp_mss = tcp_mss; 1700 outnet->ip_dscp = dscp; 1701 #ifndef S_SPLINT_S 1702 if(delayclose) { 1703 outnet->delayclose = 1; 1704 outnet->delay_tv.tv_sec = delayclose/1000; 1705 outnet->delay_tv.tv_usec = (delayclose%1000)*1000; 1706 } 1707 #endif 1708 if(udp_connect) { 1709 outnet->udp_connect = 1; 1710 } 1711 if(numavailports == 0 || num_ports == 0) { 1712 log_err("no outgoing ports available"); 1713 outside_network_delete(outnet); 1714 return NULL; 1715 } 1716 #ifndef INET6 1717 do_ip6 = 0; 1718 #endif 1719 calc_num46(ifs, num_ifs, do_ip4, do_ip6, 1720 &outnet->num_ip4, &outnet->num_ip6); 1721 if(outnet->num_ip4 != 0) { 1722 if(!(outnet->ip4_ifs = (struct port_if*)calloc( 1723 (size_t)outnet->num_ip4, sizeof(struct port_if)))) { 1724 log_err("malloc failed"); 1725 outside_network_delete(outnet); 1726 return NULL; 1727 } 1728 } 1729 if(outnet->num_ip6 != 0) { 1730 if(!(outnet->ip6_ifs = (struct port_if*)calloc( 1731 (size_t)outnet->num_ip6, sizeof(struct port_if)))) { 1732 log_err("malloc failed"); 1733 outside_network_delete(outnet); 1734 return NULL; 1735 } 1736 } 1737 if( !(outnet->udp_buff = sldns_buffer_new(bufsize)) || 1738 !(outnet->pending = rbtree_create(pending_cmp)) || 1739 !(outnet->serviced = rbtree_create(serviced_cmp)) || 1740 !create_pending_tcp(outnet, bufsize)) { 1741 log_err("malloc failed"); 1742 outside_network_delete(outnet); 1743 return NULL; 1744 } 1745 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1746 outnet->tcp_reuse_max = num_tcp; 1747 1748 /* allocate commpoints */ 1749 for(k=0; k<num_ports; k++) { 1750 struct port_comm* pc; 1751 pc = (struct port_comm*)calloc(1, sizeof(*pc)); 1752 if(!pc) { 1753 log_err("malloc failed"); 1754 outside_network_delete(outnet); 1755 return NULL; 1756 } 1757 pc->cp = comm_point_create_udp(outnet->base, -1, 1758 outnet->udp_buff, 0, outnet_udp_cb, outnet, NULL); 1759 if(!pc->cp) { 1760 log_err("malloc failed"); 1761 free(pc); 1762 outside_network_delete(outnet); 1763 return NULL; 1764 } 1765 pc->next = outnet->unused_fds; 1766 outnet->unused_fds = pc; 1767 } 1768 1769 /* allocate interfaces */ 1770 if(num_ifs == 0) { 1771 if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0", 1772 availports, numavailports, num_ports)) { 1773 log_err("malloc failed"); 1774 outside_network_delete(outnet); 1775 return NULL; 1776 } 1777 if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::", 1778 availports, numavailports, num_ports)) { 1779 log_err("malloc failed"); 1780 outside_network_delete(outnet); 1781 return NULL; 1782 } 1783 } else { 1784 size_t done_4 = 0, done_6 = 0; 1785 int i; 1786 for(i=0; i<num_ifs; i++) { 1787 if(str_is_ip6(ifs[i]) && do_ip6) { 1788 if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i], 1789 availports, numavailports, num_ports)){ 1790 log_err("malloc failed"); 1791 outside_network_delete(outnet); 1792 return NULL; 1793 } 1794 done_6++; 1795 } 1796 if(!str_is_ip6(ifs[i]) && do_ip4) { 1797 if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i], 1798 availports, numavailports, num_ports)){ 1799 log_err("malloc failed"); 1800 outside_network_delete(outnet); 1801 return NULL; 1802 } 1803 done_4++; 1804 } 1805 } 1806 } 1807 return outnet; 1808 } 1809 1810 /** helper pending delete */ 1811 static void 1812 pending_node_del(rbnode_type* node, void* arg) 1813 { 1814 struct pending* pend = (struct pending*)node; 1815 struct outside_network* outnet = (struct outside_network*)arg; 1816 pending_delete(outnet, pend); 1817 } 1818 1819 /** helper serviced delete */ 1820 static void 1821 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg)) 1822 { 1823 struct serviced_query* sq = (struct serviced_query*)node; 1824 alloc_reg_release(sq->alloc, sq->region); 1825 if(sq->timer) 1826 comm_timer_delete(sq->timer); 1827 free(sq); 1828 } 1829 1830 void 1831 outside_network_quit_prepare(struct outside_network* outnet) 1832 { 1833 if(!outnet) 1834 return; 1835 /* prevent queued items from being sent */ 1836 outnet->want_to_quit = 1; 1837 } 1838 1839 void 1840 outside_network_delete(struct outside_network* outnet) 1841 { 1842 if(!outnet) 1843 return; 1844 outnet->want_to_quit = 1; 1845 /* check every element, since we can be called on malloc error */ 1846 if(outnet->pending) { 1847 /* free pending elements, but do no unlink from tree. */ 1848 traverse_postorder(outnet->pending, pending_node_del, NULL); 1849 free(outnet->pending); 1850 } 1851 if(outnet->serviced) { 1852 traverse_postorder(outnet->serviced, serviced_node_del, NULL); 1853 free(outnet->serviced); 1854 } 1855 if(outnet->udp_buff) 1856 sldns_buffer_free(outnet->udp_buff); 1857 if(outnet->unused_fds) { 1858 struct port_comm* p = outnet->unused_fds, *np; 1859 while(p) { 1860 np = p->next; 1861 comm_point_delete(p->cp); 1862 free(p); 1863 p = np; 1864 } 1865 outnet->unused_fds = NULL; 1866 } 1867 if(outnet->ip4_ifs) { 1868 int i, k; 1869 for(i=0; i<outnet->num_ip4; i++) { 1870 for(k=0; k<outnet->ip4_ifs[i].inuse; k++) { 1871 struct port_comm* pc = outnet->ip4_ifs[i]. 1872 out[k]; 1873 comm_point_delete(pc->cp); 1874 free(pc); 1875 } 1876 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1877 free(outnet->ip4_ifs[i].avail_ports); 1878 #endif 1879 free(outnet->ip4_ifs[i].out); 1880 } 1881 free(outnet->ip4_ifs); 1882 } 1883 if(outnet->ip6_ifs) { 1884 int i, k; 1885 for(i=0; i<outnet->num_ip6; i++) { 1886 for(k=0; k<outnet->ip6_ifs[i].inuse; k++) { 1887 struct port_comm* pc = outnet->ip6_ifs[i]. 1888 out[k]; 1889 comm_point_delete(pc->cp); 1890 free(pc); 1891 } 1892 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1893 free(outnet->ip6_ifs[i].avail_ports); 1894 #endif 1895 free(outnet->ip6_ifs[i].out); 1896 } 1897 free(outnet->ip6_ifs); 1898 } 1899 if(outnet->tcp_conns) { 1900 size_t i; 1901 for(i=0; i<outnet->num_tcp; i++) 1902 if(outnet->tcp_conns[i]) { 1903 struct pending_tcp* pend; 1904 pend = outnet->tcp_conns[i]; 1905 if(pend->reuse.item_on_lru_list) { 1906 /* delete waiting_tcp elements that 1907 * the tcp conn is working on */ 1908 decommission_pending_tcp(outnet, pend); 1909 } 1910 comm_point_delete(outnet->tcp_conns[i]->c); 1911 free(outnet->tcp_conns[i]); 1912 outnet->tcp_conns[i] = NULL; 1913 } 1914 free(outnet->tcp_conns); 1915 outnet->tcp_conns = NULL; 1916 } 1917 if(outnet->tcp_wait_first) { 1918 struct waiting_tcp* p = outnet->tcp_wait_first, *np; 1919 while(p) { 1920 np = p->next_waiting; 1921 waiting_tcp_delete(p); 1922 p = np; 1923 } 1924 } 1925 /* was allocated in struct pending that was deleted above */ 1926 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1927 outnet->tcp_reuse_first = NULL; 1928 outnet->tcp_reuse_last = NULL; 1929 if(outnet->udp_wait_first) { 1930 struct pending* p = outnet->udp_wait_first, *np; 1931 while(p) { 1932 np = p->next_waiting; 1933 pending_delete(NULL, p); 1934 p = np; 1935 } 1936 } 1937 free(outnet); 1938 } 1939 1940 void 1941 pending_delete(struct outside_network* outnet, struct pending* p) 1942 { 1943 if(!p) 1944 return; 1945 if(outnet && outnet->udp_wait_first && 1946 (p->next_waiting || p == outnet->udp_wait_last) ) { 1947 /* delete from waiting list, if it is in the waiting list */ 1948 struct pending* prev = NULL, *x = outnet->udp_wait_first; 1949 while(x && x != p) { 1950 prev = x; 1951 x = x->next_waiting; 1952 } 1953 if(x) { 1954 log_assert(x == p); 1955 if(prev) 1956 prev->next_waiting = p->next_waiting; 1957 else outnet->udp_wait_first = p->next_waiting; 1958 if(outnet->udp_wait_last == p) 1959 outnet->udp_wait_last = prev; 1960 } 1961 } 1962 if(outnet) { 1963 (void)rbtree_delete(outnet->pending, p->node.key); 1964 } 1965 if(p->timer) 1966 comm_timer_delete(p->timer); 1967 free(p->pkt); 1968 free(p); 1969 } 1970 1971 static void 1972 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd) 1973 { 1974 int i, last; 1975 if(!(pfxlen > 0 && pfxlen < 128)) 1976 return; 1977 for(i = 0; i < (128 - pfxlen) / 8; i++) { 1978 sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256); 1979 } 1980 last = pfxlen & 7; 1981 if(last != 0) { 1982 sa->sin6_addr.s6_addr[15-i] |= 1983 ((0xFF >> last) & ub_random_max(rnd, 256)); 1984 } 1985 } 1986 1987 /** 1988 * Try to open a UDP socket for outgoing communication. 1989 * Sets sockets options as needed. 1990 * @param addr: socket address. 1991 * @param addrlen: length of address. 1992 * @param pfxlen: length of network prefix (for address randomisation). 1993 * @param port: port override for addr. 1994 * @param inuse: if -1 is returned, this bool means the port was in use. 1995 * @param rnd: random state (for address randomisation). 1996 * @param dscp: DSCP to use. 1997 * @return fd or -1 1998 */ 1999 static int 2000 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen, 2001 int port, int* inuse, struct ub_randstate* rnd, int dscp) 2002 { 2003 int fd, noproto; 2004 if(addr_is_ip6(addr, addrlen)) { 2005 int freebind = 0; 2006 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 2007 sa.sin6_port = (in_port_t)htons((uint16_t)port); 2008 sa.sin6_flowinfo = 0; 2009 sa.sin6_scope_id = 0; 2010 if(pfxlen != 0) { 2011 freebind = 1; 2012 sai6_putrandom(&sa, pfxlen, rnd); 2013 } 2014 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 2015 (struct sockaddr*)&sa, addrlen, 1, inuse, &noproto, 2016 0, 0, 0, NULL, 0, freebind, 0, dscp); 2017 } else { 2018 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 2019 sa->sin_port = (in_port_t)htons((uint16_t)port); 2020 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 2021 (struct sockaddr*)addr, addrlen, 1, inuse, &noproto, 2022 0, 0, 0, NULL, 0, 0, 0, dscp); 2023 } 2024 return fd; 2025 } 2026 2027 /** Select random ID */ 2028 static int 2029 select_id(struct outside_network* outnet, struct pending* pend, 2030 sldns_buffer* packet) 2031 { 2032 int id_tries = 0; 2033 pend->id = GET_RANDOM_ID(outnet->rnd); 2034 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 2035 2036 /* insert in tree */ 2037 pend->node.key = pend; 2038 while(!rbtree_insert(outnet->pending, &pend->node)) { 2039 /* change ID to avoid collision */ 2040 pend->id = GET_RANDOM_ID(outnet->rnd); 2041 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 2042 id_tries++; 2043 if(id_tries == MAX_ID_RETRY) { 2044 pend->id=99999; /* non existent ID */ 2045 log_err("failed to generate unique ID, drop msg"); 2046 return 0; 2047 } 2048 } 2049 verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id); 2050 return 1; 2051 } 2052 2053 /** return true is UDP connect error needs to be logged */ 2054 static int udp_connect_needs_log(int err, struct sockaddr_storage* addr, 2055 socklen_t addrlen) 2056 { 2057 switch(err) { 2058 case ECONNREFUSED: 2059 # ifdef ENETUNREACH 2060 case ENETUNREACH: 2061 # endif 2062 # ifdef EHOSTDOWN 2063 case EHOSTDOWN: 2064 # endif 2065 # ifdef EHOSTUNREACH 2066 case EHOSTUNREACH: 2067 # endif 2068 # ifdef ENETDOWN 2069 case ENETDOWN: 2070 # endif 2071 # ifdef EADDRNOTAVAIL 2072 case EADDRNOTAVAIL: 2073 # endif 2074 case EPERM: 2075 case EACCES: 2076 if(verbosity >= VERB_ALGO) 2077 return 1; 2078 return 0; 2079 case EINVAL: 2080 /* Stop 'Invalid argument for fe80::/10' addresses appearing 2081 * in the logs, at low verbosity. They cannot be sent to. */ 2082 if(addr_is_ip6linklocal(addr, addrlen)) { 2083 if(verbosity >= VERB_ALGO) 2084 return 1; 2085 return 0; 2086 } 2087 break; 2088 default: 2089 break; 2090 } 2091 return 1; 2092 } 2093 2094 2095 /** Select random interface and port */ 2096 static int 2097 select_ifport(struct outside_network* outnet, struct pending* pend, 2098 int num_if, struct port_if* ifs) 2099 { 2100 int my_if, my_port, fd, portno, inuse, tries=0; 2101 struct port_if* pif; 2102 /* randomly select interface and port */ 2103 if(num_if == 0) { 2104 verbose(VERB_QUERY, "Need to send query but have no " 2105 "outgoing interfaces of that family"); 2106 return 0; 2107 } 2108 log_assert(outnet->unused_fds); 2109 tries = 0; 2110 while(1) { 2111 my_if = ub_random_max(outnet->rnd, num_if); 2112 pif = &ifs[my_if]; 2113 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 2114 if(outnet->udp_connect) { 2115 /* if we connect() we cannot reuse fds for a port */ 2116 if(pif->inuse >= pif->avail_total) { 2117 tries++; 2118 if(tries < MAX_PORT_RETRY) 2119 continue; 2120 log_err("failed to find an open port, drop msg"); 2121 return 0; 2122 } 2123 my_port = pif->inuse + ub_random_max(outnet->rnd, 2124 pif->avail_total - pif->inuse); 2125 } else { 2126 my_port = ub_random_max(outnet->rnd, pif->avail_total); 2127 if(my_port < pif->inuse) { 2128 /* port already open */ 2129 pend->pc = pif->out[my_port]; 2130 verbose(VERB_ALGO, "using UDP if=%d port=%d", 2131 my_if, pend->pc->number); 2132 break; 2133 } 2134 } 2135 /* try to open new port, if fails, loop to try again */ 2136 log_assert(pif->inuse < pif->maxout); 2137 portno = pif->avail_ports[my_port - pif->inuse]; 2138 #else 2139 my_port = portno = 0; 2140 #endif 2141 fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen, 2142 portno, &inuse, outnet->rnd, outnet->ip_dscp); 2143 if(fd == -1 && !inuse) { 2144 /* nonrecoverable error making socket */ 2145 return 0; 2146 } 2147 if(fd != -1) { 2148 verbose(VERB_ALGO, "opened UDP if=%d port=%d", 2149 my_if, portno); 2150 if(outnet->udp_connect) { 2151 /* connect() to the destination */ 2152 if(connect(fd, (struct sockaddr*)&pend->addr, 2153 pend->addrlen) < 0) { 2154 if(udp_connect_needs_log(errno, 2155 &pend->addr, pend->addrlen)) { 2156 log_err_addr("udp connect failed", 2157 strerror(errno), &pend->addr, 2158 pend->addrlen); 2159 } 2160 sock_close(fd); 2161 return 0; 2162 } 2163 } 2164 /* grab fd */ 2165 pend->pc = outnet->unused_fds; 2166 outnet->unused_fds = pend->pc->next; 2167 2168 /* setup portcomm */ 2169 pend->pc->next = NULL; 2170 pend->pc->number = portno; 2171 pend->pc->pif = pif; 2172 pend->pc->index = pif->inuse; 2173 pend->pc->num_outstanding = 0; 2174 comm_point_start_listening(pend->pc->cp, fd, -1); 2175 2176 /* grab port in interface */ 2177 pif->out[pif->inuse] = pend->pc; 2178 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 2179 pif->avail_ports[my_port - pif->inuse] = 2180 pif->avail_ports[pif->avail_total-pif->inuse-1]; 2181 #endif 2182 pif->inuse++; 2183 break; 2184 } 2185 /* failed, already in use */ 2186 verbose(VERB_QUERY, "port %d in use, trying another", portno); 2187 tries++; 2188 if(tries == MAX_PORT_RETRY) { 2189 log_err("failed to find an open port, drop msg"); 2190 return 0; 2191 } 2192 } 2193 log_assert(pend->pc); 2194 pend->pc->num_outstanding++; 2195 2196 return 1; 2197 } 2198 2199 static int 2200 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout) 2201 { 2202 struct timeval tv; 2203 struct outside_network* outnet = pend->sq->outnet; 2204 2205 /* select id */ 2206 if(!select_id(outnet, pend, packet)) { 2207 return 0; 2208 } 2209 2210 /* select src_if, port */ 2211 if(addr_is_ip6(&pend->addr, pend->addrlen)) { 2212 if(!select_ifport(outnet, pend, 2213 outnet->num_ip6, outnet->ip6_ifs)) 2214 return 0; 2215 } else { 2216 if(!select_ifport(outnet, pend, 2217 outnet->num_ip4, outnet->ip4_ifs)) 2218 return 0; 2219 } 2220 log_assert(pend->pc && pend->pc->cp); 2221 2222 /* send it over the commlink */ 2223 if(!comm_point_send_udp_msg(pend->pc->cp, packet, 2224 (struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) { 2225 portcomm_loweruse(outnet, pend->pc); 2226 return 0; 2227 } 2228 outnet->num_udp_outgoing++; 2229 2230 /* system calls to set timeout after sending UDP to make roundtrip 2231 smaller. */ 2232 #ifndef S_SPLINT_S 2233 tv.tv_sec = timeout/1000; 2234 tv.tv_usec = (timeout%1000)*1000; 2235 #endif 2236 comm_timer_set(pend->timer, &tv); 2237 2238 #ifdef USE_DNSTAP 2239 /* 2240 * sending src (local service)/dst (upstream) addresses over DNSTAP 2241 * There are no chances to get the src (local service) addr if unbound 2242 * is not configured with specific outgoing IP-addresses. So we will 2243 * pass 0.0.0.0 (::) to argument for 2244 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls. 2245 */ 2246 if(outnet->dtenv && 2247 (outnet->dtenv->log_resolver_query_messages || 2248 outnet->dtenv->log_forwarder_query_messages)) { 2249 log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen); 2250 log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen); 2251 dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp, NULL, 2252 pend->sq->zone, pend->sq->zonelen, packet); 2253 } 2254 #endif 2255 return 1; 2256 } 2257 2258 struct pending* 2259 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet, 2260 int timeout, comm_point_callback_type* cb, void* cb_arg) 2261 { 2262 struct pending* pend = (struct pending*)calloc(1, sizeof(*pend)); 2263 if(!pend) return NULL; 2264 pend->outnet = sq->outnet; 2265 pend->sq = sq; 2266 pend->addrlen = sq->addrlen; 2267 memmove(&pend->addr, &sq->addr, sq->addrlen); 2268 pend->cb = cb; 2269 pend->cb_arg = cb_arg; 2270 pend->node.key = pend; 2271 pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb, 2272 pend); 2273 if(!pend->timer) { 2274 free(pend); 2275 return NULL; 2276 } 2277 2278 if(sq->outnet->unused_fds == NULL) { 2279 /* no unused fd, cannot create a new port (randomly) */ 2280 verbose(VERB_ALGO, "no fds available, udp query waiting"); 2281 pend->timeout = timeout; 2282 pend->pkt_len = sldns_buffer_limit(packet); 2283 pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet), 2284 pend->pkt_len); 2285 if(!pend->pkt) { 2286 comm_timer_delete(pend->timer); 2287 free(pend); 2288 return NULL; 2289 } 2290 /* put at end of waiting list */ 2291 if(sq->outnet->udp_wait_last) 2292 sq->outnet->udp_wait_last->next_waiting = pend; 2293 else 2294 sq->outnet->udp_wait_first = pend; 2295 sq->outnet->udp_wait_last = pend; 2296 return pend; 2297 } 2298 log_assert(!sq->busy); 2299 sq->busy = 1; 2300 if(!randomize_and_send_udp(pend, packet, timeout)) { 2301 pending_delete(sq->outnet, pend); 2302 return NULL; 2303 } 2304 sq->busy = 0; 2305 return pend; 2306 } 2307 2308 void 2309 outnet_tcptimer(void* arg) 2310 { 2311 struct waiting_tcp* w = (struct waiting_tcp*)arg; 2312 struct outside_network* outnet = w->outnet; 2313 verbose(VERB_CLIENT, "outnet_tcptimer"); 2314 if(w->on_tcp_waiting_list) { 2315 /* it is on the waiting list */ 2316 outnet_waiting_tcp_list_remove(outnet, w); 2317 waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL); 2318 waiting_tcp_delete(w); 2319 } else { 2320 /* it was in use */ 2321 struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting; 2322 reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT); 2323 } 2324 use_free_buffer(outnet); 2325 } 2326 2327 /** close the oldest reuse_tcp connection to make a fd and struct pend 2328 * available for a new stream connection */ 2329 static void 2330 reuse_tcp_close_oldest(struct outside_network* outnet) 2331 { 2332 struct reuse_tcp* reuse; 2333 verbose(VERB_CLIENT, "reuse_tcp_close_oldest"); 2334 reuse = reuse_tcp_lru_snip(outnet); 2335 if(!reuse) return; 2336 /* free up */ 2337 reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED); 2338 } 2339 2340 static uint16_t 2341 tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse) 2342 { 2343 if(reuse) 2344 return reuse_tcp_select_id(reuse, outnet); 2345 return GET_RANDOM_ID(outnet->rnd); 2346 } 2347 2348 /** find spare ID value for reuse tcp stream. That is random and also does 2349 * not collide with an existing query ID that is in use or waiting */ 2350 uint16_t 2351 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet) 2352 { 2353 uint16_t id = 0, curid, nextid; 2354 const int try_random = 2000; 2355 int i; 2356 unsigned select, count, space; 2357 rbnode_type* node; 2358 2359 /* make really sure the tree is not empty */ 2360 if(reuse->tree_by_id.count == 0) { 2361 id = GET_RANDOM_ID(outnet->rnd); 2362 return id; 2363 } 2364 2365 /* try to find random empty spots by picking them */ 2366 for(i = 0; i<try_random; i++) { 2367 id = GET_RANDOM_ID(outnet->rnd); 2368 if(!reuse_tcp_by_id_find(reuse, id)) { 2369 return id; 2370 } 2371 } 2372 2373 /* equally pick a random unused element from the tree that is 2374 * not in use. Pick a the n-th index of an unused number, 2375 * then loop over the empty spaces in the tree and find it */ 2376 log_assert(reuse->tree_by_id.count < 0xffff); 2377 select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count); 2378 /* select value now in 0 .. num free - 1 */ 2379 2380 count = 0; /* number of free spaces passed by */ 2381 node = rbtree_first(&reuse->tree_by_id); 2382 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2383 /* see if select is before first node */ 2384 if(select < (unsigned)tree_by_id_get_id(node)) 2385 return select; 2386 count += tree_by_id_get_id(node); 2387 /* perhaps select is between nodes */ 2388 while(node && node != RBTREE_NULL) { 2389 rbnode_type* next = rbtree_next(node); 2390 if(next && next != RBTREE_NULL) { 2391 curid = tree_by_id_get_id(node); 2392 nextid = tree_by_id_get_id(next); 2393 log_assert(curid < nextid); 2394 if(curid != 0xffff && curid + 1 < nextid) { 2395 /* space between nodes */ 2396 space = nextid - curid - 1; 2397 log_assert(select >= count); 2398 if(select < count + space) { 2399 /* here it is */ 2400 return curid + 1 + (select - count); 2401 } 2402 count += space; 2403 } 2404 } 2405 node = next; 2406 } 2407 2408 /* select is after the last node */ 2409 /* count is the number of free positions before the nodes in the 2410 * tree */ 2411 node = rbtree_last(&reuse->tree_by_id); 2412 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2413 curid = tree_by_id_get_id(node); 2414 log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff); 2415 return curid + 1 + (select - count); 2416 } 2417 2418 struct waiting_tcp* 2419 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet, 2420 int timeout, comm_point_callback_type* callback, void* callback_arg) 2421 { 2422 struct pending_tcp* pend = sq->outnet->tcp_free; 2423 struct reuse_tcp* reuse = NULL; 2424 struct waiting_tcp* w; 2425 2426 verbose(VERB_CLIENT, "pending_tcp_query"); 2427 if(sldns_buffer_limit(packet) < sizeof(uint16_t)) { 2428 verbose(VERB_ALGO, "pending tcp query with too short buffer < 2"); 2429 return NULL; 2430 } 2431 2432 /* find out if a reused stream to the target exists */ 2433 /* if so, take it into use */ 2434 reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen, 2435 sq->ssl_upstream); 2436 if(reuse) { 2437 log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse); 2438 log_assert(reuse->pending); 2439 pend = reuse->pending; 2440 reuse_tcp_lru_touch(sq->outnet, reuse); 2441 } 2442 2443 log_assert(!reuse || (reuse && pend)); 2444 /* if !pend but we have reuse streams, close a reuse stream 2445 * to be able to open a new one to this target, no use waiting 2446 * to reuse a file descriptor while another query needs to use 2447 * that buffer and file descriptor now. */ 2448 if(!pend) { 2449 reuse_tcp_close_oldest(sq->outnet); 2450 pend = sq->outnet->tcp_free; 2451 log_assert(!reuse || (pend == reuse->pending)); 2452 } 2453 2454 /* allocate space to store query */ 2455 w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp) 2456 + sldns_buffer_limit(packet)); 2457 if(!w) { 2458 return NULL; 2459 } 2460 if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) { 2461 free(w); 2462 return NULL; 2463 } 2464 w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp); 2465 w->pkt_len = sldns_buffer_limit(packet); 2466 memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len); 2467 w->id = tcp_select_id(sq->outnet, reuse); 2468 LDNS_ID_SET(w->pkt, w->id); 2469 memcpy(&w->addr, &sq->addr, sq->addrlen); 2470 w->addrlen = sq->addrlen; 2471 w->outnet = sq->outnet; 2472 w->on_tcp_waiting_list = 0; 2473 w->next_waiting = NULL; 2474 w->cb = callback; 2475 w->cb_arg = callback_arg; 2476 w->ssl_upstream = sq->ssl_upstream; 2477 w->tls_auth_name = sq->tls_auth_name; 2478 w->timeout = timeout; 2479 w->id_node.key = NULL; 2480 w->write_wait_prev = NULL; 2481 w->write_wait_next = NULL; 2482 w->write_wait_queued = 0; 2483 w->error_count = 0; 2484 #ifdef USE_DNSTAP 2485 w->sq = NULL; 2486 #endif 2487 w->in_cb_and_decommission = 0; 2488 if(pend) { 2489 /* we have a buffer available right now */ 2490 if(reuse) { 2491 log_assert(reuse == &pend->reuse); 2492 /* reuse existing fd, write query and continue */ 2493 /* store query in tree by id */ 2494 verbose(VERB_CLIENT, "pending_tcp_query: reuse, store"); 2495 w->next_waiting = (void*)pend; 2496 reuse_tree_by_id_insert(&pend->reuse, w); 2497 /* can we write right now? */ 2498 if(pend->query == NULL) { 2499 /* write straight away */ 2500 /* stop the timer on read of the fd */ 2501 comm_point_stop_listening(pend->c); 2502 pend->query = w; 2503 outnet_tcp_take_query_setup(pend->c->fd, pend, 2504 w); 2505 } else { 2506 /* put it in the waiting list for 2507 * this stream */ 2508 reuse_write_wait_push_back(&pend->reuse, w); 2509 } 2510 } else { 2511 /* create new fd and connect to addr, setup to 2512 * write query */ 2513 verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect"); 2514 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 2515 pend->reuse.pending = pend; 2516 memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen); 2517 pend->reuse.addrlen = sq->addrlen; 2518 if(!outnet_tcp_take_into_use(w)) { 2519 waiting_tcp_delete(w); 2520 return NULL; 2521 } 2522 } 2523 #ifdef USE_DNSTAP 2524 if(sq->outnet->dtenv && 2525 (sq->outnet->dtenv->log_resolver_query_messages || 2526 sq->outnet->dtenv->log_forwarder_query_messages)) { 2527 /* use w->pkt, because it has the ID value */ 2528 sldns_buffer tmp; 2529 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len); 2530 dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr, 2531 &pend->pi->addr, comm_tcp, NULL, sq->zone, 2532 sq->zonelen, &tmp); 2533 } 2534 #endif 2535 } else { 2536 /* queue up */ 2537 /* waiting for a buffer on the outside network buffer wait 2538 * list */ 2539 verbose(VERB_CLIENT, "pending_tcp_query: queue to wait"); 2540 #ifdef USE_DNSTAP 2541 w->sq = sq; 2542 #endif 2543 outnet_waiting_tcp_list_add(sq->outnet, w, 1); 2544 } 2545 return w; 2546 } 2547 2548 /** create query for serviced queries */ 2549 static void 2550 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen, 2551 uint16_t qtype, uint16_t qclass, uint16_t flags) 2552 { 2553 sldns_buffer_clear(buff); 2554 /* skip id */ 2555 sldns_buffer_write_u16(buff, flags); 2556 sldns_buffer_write_u16(buff, 1); /* qdcount */ 2557 sldns_buffer_write_u16(buff, 0); /* ancount */ 2558 sldns_buffer_write_u16(buff, 0); /* nscount */ 2559 sldns_buffer_write_u16(buff, 0); /* arcount */ 2560 sldns_buffer_write(buff, qname, qnamelen); 2561 sldns_buffer_write_u16(buff, qtype); 2562 sldns_buffer_write_u16(buff, qclass); 2563 sldns_buffer_flip(buff); 2564 } 2565 2566 /** lookup serviced query in serviced query rbtree */ 2567 static struct serviced_query* 2568 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2569 struct sockaddr_storage* addr, socklen_t addrlen, 2570 struct edns_option* opt_list) 2571 { 2572 struct serviced_query key; 2573 key.node.key = &key; 2574 key.qbuf = sldns_buffer_begin(buff); 2575 key.qbuflen = sldns_buffer_limit(buff); 2576 key.dnssec = dnssec; 2577 memcpy(&key.addr, addr, addrlen); 2578 key.addrlen = addrlen; 2579 key.outnet = outnet; 2580 key.opt_list = opt_list; 2581 return (struct serviced_query*)rbtree_search(outnet->serviced, &key); 2582 } 2583 2584 void 2585 serviced_timer_cb(void* arg) 2586 { 2587 struct serviced_query* sq = (struct serviced_query*)arg; 2588 struct outside_network* outnet = sq->outnet; 2589 verbose(VERB_ALGO, "serviced send timer"); 2590 /* By the time this cb is called, if we don't have any registered 2591 * callbacks for this serviced_query anymore; do not send. */ 2592 if(!sq->cblist) 2593 goto delete; 2594 /* perform first network action */ 2595 if(outnet->do_udp && !(sq->tcp_upstream || sq->ssl_upstream)) { 2596 if(!serviced_udp_send(sq, outnet->udp_buff)) 2597 goto delete; 2598 } else { 2599 if(!serviced_tcp_send(sq, outnet->udp_buff)) 2600 goto delete; 2601 } 2602 /* Maybe by this time we don't have callbacks attached anymore. Don't 2603 * proactively try to delete; let it run and maybe another callback 2604 * will get attached by the time we get an answer. */ 2605 return; 2606 delete: 2607 serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL); 2608 } 2609 2610 /** Create new serviced entry */ 2611 static struct serviced_query* 2612 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2613 int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream, 2614 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen, 2615 uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list, 2616 size_t pad_queries_block_size, struct alloc_cache* alloc, 2617 struct regional* region) 2618 { 2619 struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq)); 2620 struct timeval t; 2621 #ifdef UNBOUND_DEBUG 2622 rbnode_type* ins; 2623 #endif 2624 if(!sq) { 2625 alloc_reg_release(alloc, region); 2626 return NULL; 2627 } 2628 sq->node.key = sq; 2629 sq->alloc = alloc; 2630 sq->region = region; 2631 sq->qbuf = regional_alloc_init(region, sldns_buffer_begin(buff), 2632 sldns_buffer_limit(buff)); 2633 if(!sq->qbuf) { 2634 alloc_reg_release(alloc, region); 2635 free(sq); 2636 return NULL; 2637 } 2638 sq->qbuflen = sldns_buffer_limit(buff); 2639 sq->zone = regional_alloc_init(region, zone, zonelen); 2640 if(!sq->zone) { 2641 alloc_reg_release(alloc, region); 2642 free(sq); 2643 return NULL; 2644 } 2645 sq->zonelen = zonelen; 2646 sq->qtype = qtype; 2647 sq->dnssec = dnssec; 2648 sq->want_dnssec = want_dnssec; 2649 sq->nocaps = nocaps; 2650 sq->tcp_upstream = tcp_upstream; 2651 sq->ssl_upstream = ssl_upstream; 2652 if(tls_auth_name) { 2653 sq->tls_auth_name = regional_strdup(region, tls_auth_name); 2654 if(!sq->tls_auth_name) { 2655 alloc_reg_release(alloc, region); 2656 free(sq); 2657 return NULL; 2658 } 2659 } else { 2660 sq->tls_auth_name = NULL; 2661 } 2662 memcpy(&sq->addr, addr, addrlen); 2663 sq->addrlen = addrlen; 2664 sq->opt_list = opt_list; 2665 sq->busy = 0; 2666 sq->timer = comm_timer_create(outnet->base, serviced_timer_cb, sq); 2667 if(!sq->timer) { 2668 alloc_reg_release(alloc, region); 2669 free(sq); 2670 return NULL; 2671 } 2672 memset(&t, 0, sizeof(t)); 2673 comm_timer_set(sq->timer, &t); 2674 sq->outnet = outnet; 2675 sq->cblist = NULL; 2676 sq->pending = NULL; 2677 sq->status = serviced_initial; 2678 sq->retry = 0; 2679 sq->to_be_deleted = 0; 2680 sq->padding_block_size = pad_queries_block_size; 2681 #ifdef UNBOUND_DEBUG 2682 ins = 2683 #else 2684 (void) 2685 #endif 2686 rbtree_insert(outnet->serviced, &sq->node); 2687 log_assert(ins != NULL); /* must not be already present */ 2688 return sq; 2689 } 2690 2691 /** reuse tcp stream, remove serviced query from stream, 2692 * return true if the stream is kept, false if it is to be closed */ 2693 static int 2694 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w, 2695 struct serviced_query* sq) 2696 { 2697 struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting; 2698 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep"); 2699 /* remove the callback. let query continue to write to not cancel 2700 * the stream itself. also keep it as an entry in the tree_by_id, 2701 * in case the answer returns (that we no longer want), but we cannot 2702 * pick the same ID number meanwhile */ 2703 w->cb = NULL; 2704 /* see if can be entered in reuse tree 2705 * for that the FD has to be non-1 */ 2706 if(pend_tcp->c->fd == -1) { 2707 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd"); 2708 return 0; 2709 } 2710 /* if in tree and used by other queries */ 2711 if(pend_tcp->reuse.node.key) { 2712 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries"); 2713 /* do not reset the keepalive timer, for that 2714 * we'd need traffic, and this is where the serviced is 2715 * removed due to state machine internal reasons, 2716 * eg. iterator no longer interested in this query */ 2717 return 1; 2718 } 2719 /* if still open and want to keep it open */ 2720 if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count < 2721 sq->outnet->tcp_reuse_max) { 2722 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open"); 2723 /* set a keepalive timer on it */ 2724 if(!reuse_tcp_insert(sq->outnet, pend_tcp)) { 2725 return 0; 2726 } 2727 reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout); 2728 return 1; 2729 } 2730 return 0; 2731 } 2732 2733 /** cleanup serviced query entry */ 2734 static void 2735 serviced_delete(struct serviced_query* sq) 2736 { 2737 verbose(VERB_CLIENT, "serviced_delete"); 2738 if(sq->pending) { 2739 /* clear up the pending query */ 2740 if(sq->status == serviced_query_UDP_EDNS || 2741 sq->status == serviced_query_UDP || 2742 sq->status == serviced_query_UDP_EDNS_FRAG || 2743 sq->status == serviced_query_UDP_EDNS_fallback) { 2744 struct pending* p = (struct pending*)sq->pending; 2745 verbose(VERB_CLIENT, "serviced_delete: UDP"); 2746 if(p->pc) 2747 portcomm_loweruse(sq->outnet, p->pc); 2748 pending_delete(sq->outnet, p); 2749 /* this call can cause reentrant calls back into the 2750 * mesh */ 2751 outnet_send_wait_udp(sq->outnet); 2752 } else { 2753 struct waiting_tcp* w = (struct waiting_tcp*) 2754 sq->pending; 2755 verbose(VERB_CLIENT, "serviced_delete: TCP"); 2756 log_assert(!(w->write_wait_queued && w->on_tcp_waiting_list)); 2757 /* if on stream-write-waiting list then 2758 * remove from waiting list and waiting_tcp_delete */ 2759 if(w->write_wait_queued) { 2760 struct pending_tcp* pend = 2761 (struct pending_tcp*)w->next_waiting; 2762 verbose(VERB_CLIENT, "serviced_delete: writewait"); 2763 if(!w->in_cb_and_decommission) 2764 reuse_tree_by_id_delete(&pend->reuse, w); 2765 reuse_write_wait_remove(&pend->reuse, w); 2766 if(!w->in_cb_and_decommission) 2767 waiting_tcp_delete(w); 2768 } else if(!w->on_tcp_waiting_list) { 2769 struct pending_tcp* pend = 2770 (struct pending_tcp*)w->next_waiting; 2771 verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep"); 2772 /* w needs to stay on tree_by_id to not assign 2773 * the same ID; remove the callback since its 2774 * serviced_query will be gone. */ 2775 w->cb = NULL; 2776 if(!reuse_tcp_remove_serviced_keep(w, sq)) { 2777 if(!w->in_cb_and_decommission) 2778 reuse_cb_and_decommission(sq->outnet, 2779 pend, NETEVENT_CLOSED); 2780 use_free_buffer(sq->outnet); 2781 } 2782 sq->pending = NULL; 2783 } else { 2784 verbose(VERB_CLIENT, "serviced_delete: tcpwait"); 2785 outnet_waiting_tcp_list_remove(sq->outnet, w); 2786 if(!w->in_cb_and_decommission) 2787 waiting_tcp_delete(w); 2788 } 2789 } 2790 } 2791 /* does not delete from tree, caller has to do that */ 2792 serviced_node_del(&sq->node, NULL); 2793 } 2794 2795 /** perturb a dname capitalization randomly */ 2796 static void 2797 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len) 2798 { 2799 uint8_t lablen; 2800 uint8_t* d = qbuf + 10; 2801 long int random = 0; 2802 int bits = 0; 2803 log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */); 2804 (void)len; 2805 lablen = *d++; 2806 while(lablen) { 2807 while(lablen--) { 2808 /* only perturb A-Z, a-z */ 2809 if(isalpha((unsigned char)*d)) { 2810 /* get a random bit */ 2811 if(bits == 0) { 2812 random = ub_random(rnd); 2813 bits = 30; 2814 } 2815 if(random & 0x1) { 2816 *d = (uint8_t)toupper((unsigned char)*d); 2817 } else { 2818 *d = (uint8_t)tolower((unsigned char)*d); 2819 } 2820 random >>= 1; 2821 bits--; 2822 } 2823 d++; 2824 } 2825 lablen = *d++; 2826 } 2827 if(verbosity >= VERB_ALGO) { 2828 char buf[LDNS_MAX_DOMAINLEN+1]; 2829 dname_str(qbuf+10, buf); 2830 verbose(VERB_ALGO, "qname perturbed to %s", buf); 2831 } 2832 } 2833 2834 static uint16_t 2835 serviced_query_udp_size(struct serviced_query* sq, enum serviced_query_status status) { 2836 uint16_t udp_size; 2837 if(status == serviced_query_UDP_EDNS_FRAG) { 2838 if(addr_is_ip6(&sq->addr, sq->addrlen)) { 2839 if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE) 2840 udp_size = EDNS_FRAG_SIZE_IP6; 2841 else udp_size = EDNS_ADVERTISED_SIZE; 2842 } else { 2843 if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE) 2844 udp_size = EDNS_FRAG_SIZE_IP4; 2845 else udp_size = EDNS_ADVERTISED_SIZE; 2846 } 2847 } else { 2848 udp_size = EDNS_ADVERTISED_SIZE; 2849 } 2850 return udp_size; 2851 } 2852 2853 /** put serviced query into a buffer */ 2854 static void 2855 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns) 2856 { 2857 /* if we are using 0x20 bits for ID randomness, perturb them */ 2858 if(sq->outnet->use_caps_for_id && !sq->nocaps) { 2859 serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen); 2860 } 2861 /* generate query */ 2862 sldns_buffer_clear(buff); 2863 sldns_buffer_write_u16(buff, 0); /* id placeholder */ 2864 sldns_buffer_write(buff, sq->qbuf, sq->qbuflen); 2865 sldns_buffer_flip(buff); 2866 if(with_edns) { 2867 /* add edns section */ 2868 struct edns_data edns; 2869 struct edns_option padding_option; 2870 edns.edns_present = 1; 2871 edns.ext_rcode = 0; 2872 edns.edns_version = EDNS_ADVERTISED_VERSION; 2873 edns.opt_list_in = NULL; 2874 edns.opt_list_out = sq->opt_list; 2875 edns.opt_list_inplace_cb_out = NULL; 2876 edns.udp_size = serviced_query_udp_size(sq, sq->status); 2877 edns.bits = 0; 2878 if(sq->dnssec & EDNS_DO) 2879 edns.bits = EDNS_DO; 2880 if(sq->dnssec & BIT_CD) 2881 LDNS_CD_SET(sldns_buffer_begin(buff)); 2882 if (sq->ssl_upstream && sq->padding_block_size) { 2883 padding_option.opt_code = LDNS_EDNS_PADDING; 2884 padding_option.opt_len = 0; 2885 padding_option.opt_data = NULL; 2886 padding_option.next = edns.opt_list_out; 2887 edns.opt_list_out = &padding_option; 2888 edns.padding_block_size = sq->padding_block_size; 2889 } 2890 attach_edns_record(buff, &edns); 2891 } 2892 } 2893 2894 /** 2895 * Perform serviced query UDP sending operation. 2896 * Sends UDP with EDNS, unless infra host marked non EDNS. 2897 * @param sq: query to send. 2898 * @param buff: buffer scratch space. 2899 * @return 0 on error. 2900 */ 2901 static int 2902 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff) 2903 { 2904 int rtt, vs; 2905 uint8_t edns_lame_known; 2906 time_t now = *sq->outnet->now_secs; 2907 2908 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 2909 sq->zonelen, now, &vs, &edns_lame_known, &rtt)) 2910 return 0; 2911 sq->last_rtt = rtt; 2912 verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs); 2913 if(sq->status == serviced_initial) { 2914 if(vs != -1) { 2915 sq->status = serviced_query_UDP_EDNS; 2916 } else { 2917 sq->status = serviced_query_UDP; 2918 } 2919 } 2920 serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) || 2921 (sq->status == serviced_query_UDP_EDNS_FRAG)); 2922 sq->last_sent_time = *sq->outnet->now_tv; 2923 sq->edns_lame_known = (int)edns_lame_known; 2924 verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt); 2925 sq->pending = pending_udp_query(sq, buff, rtt, 2926 serviced_udp_callback, sq); 2927 if(!sq->pending) 2928 return 0; 2929 return 1; 2930 } 2931 2932 /** check that perturbed qname is identical */ 2933 static int 2934 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen) 2935 { 2936 uint8_t* d1 = sldns_buffer_begin(pkt)+12; 2937 uint8_t* d2 = qbuf+10; 2938 uint8_t len1, len2; 2939 int count = 0; 2940 if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */ 2941 return 0; 2942 log_assert(qbuflen >= 15 /* 10 header, root, type, class */); 2943 len1 = *d1++; 2944 len2 = *d2++; 2945 while(len1 != 0 || len2 != 0) { 2946 if(LABEL_IS_PTR(len1)) { 2947 /* check if we can read *d1 with compression ptr rest */ 2948 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2949 return 0; 2950 d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1); 2951 /* check if we can read the destination *d1 */ 2952 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2953 return 0; 2954 len1 = *d1++; 2955 if(count++ > MAX_COMPRESS_PTRS) 2956 return 0; 2957 continue; 2958 } 2959 if(d2 > qbuf+qbuflen) 2960 return 0; 2961 if(len1 != len2) 2962 return 0; 2963 if(len1 > LDNS_MAX_LABELLEN) 2964 return 0; 2965 /* check len1 + 1(next length) are okay to read */ 2966 if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2967 return 0; 2968 log_assert(len1 <= LDNS_MAX_LABELLEN); 2969 log_assert(len2 <= LDNS_MAX_LABELLEN); 2970 log_assert(len1 == len2 && len1 != 0); 2971 /* compare the labels - bitwise identical */ 2972 if(memcmp(d1, d2, len1) != 0) 2973 return 0; 2974 d1 += len1; 2975 d2 += len2; 2976 len1 = *d1++; 2977 len2 = *d2++; 2978 } 2979 return 1; 2980 } 2981 2982 /** call the callbacks for a serviced query */ 2983 static void 2984 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c, 2985 struct comm_reply* rep) 2986 { 2987 struct service_callback* p; 2988 int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/ 2989 uint8_t *backup_p = NULL; 2990 size_t backlen = 0; 2991 #ifdef UNBOUND_DEBUG 2992 rbnode_type* rem = 2993 #else 2994 (void) 2995 #endif 2996 /* remove from tree, and schedule for deletion, so that callbacks 2997 * can safely deregister themselves and even create new serviced 2998 * queries that are identical to this one. */ 2999 rbtree_delete(sq->outnet->serviced, sq); 3000 log_assert(rem); /* should have been present */ 3001 sq->to_be_deleted = 1; 3002 verbose(VERB_ALGO, "svcd callbacks start"); 3003 if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c && 3004 !sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) { 3005 /* for type PTR do not check perturbed name in answer, 3006 * compatibility with cisco dns guard boxes that mess up 3007 * reverse queries 0x20 contents */ 3008 /* noerror and nxdomain must have a qname in reply */ 3009 if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 && 3010 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 3011 == LDNS_RCODE_NOERROR || 3012 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 3013 == LDNS_RCODE_NXDOMAIN)) { 3014 verbose(VERB_DETAIL, "no qname in reply to check 0x20ID"); 3015 log_addr(VERB_DETAIL, "from server", 3016 &sq->addr, sq->addrlen); 3017 log_buf(VERB_DETAIL, "for packet", c->buffer); 3018 error = NETEVENT_CLOSED; 3019 c = NULL; 3020 } else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 && 3021 !serviced_check_qname(c->buffer, sq->qbuf, 3022 sq->qbuflen)) { 3023 verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname"); 3024 log_addr(VERB_DETAIL, "from server", 3025 &sq->addr, sq->addrlen); 3026 log_buf(VERB_DETAIL, "for packet", c->buffer); 3027 error = NETEVENT_CAPSFAIL; 3028 /* and cleanup too */ 3029 pkt_dname_tolower(c->buffer, 3030 sldns_buffer_at(c->buffer, 12)); 3031 } else { 3032 verbose(VERB_ALGO, "good 0x20-ID in reply qname"); 3033 /* cleanup caps, prettier cache contents. */ 3034 pkt_dname_tolower(c->buffer, 3035 sldns_buffer_at(c->buffer, 12)); 3036 } 3037 } 3038 if(dobackup && c) { 3039 /* make a backup of the query, since the querystate processing 3040 * may send outgoing queries that overwrite the buffer. 3041 * use secondary buffer to store the query. 3042 * This is a data copy, but faster than packet to server */ 3043 backlen = sldns_buffer_limit(c->buffer); 3044 backup_p = regional_alloc_init(sq->region, 3045 sldns_buffer_begin(c->buffer), backlen); 3046 if(!backup_p) { 3047 log_err("malloc failure in serviced query callbacks"); 3048 error = NETEVENT_CLOSED; 3049 c = NULL; 3050 } 3051 sq->outnet->svcd_overhead = backlen; 3052 } 3053 /* test the actual sq->cblist, because the next elem could be deleted*/ 3054 while((p=sq->cblist) != NULL) { 3055 sq->cblist = p->next; /* remove this element */ 3056 if(dobackup && c) { 3057 sldns_buffer_clear(c->buffer); 3058 sldns_buffer_write(c->buffer, backup_p, backlen); 3059 sldns_buffer_flip(c->buffer); 3060 } 3061 fptr_ok(fptr_whitelist_serviced_query(p->cb)); 3062 (void)(*p->cb)(c, p->cb_arg, error, rep); 3063 } 3064 if(backup_p) { 3065 sq->outnet->svcd_overhead = 0; 3066 } 3067 verbose(VERB_ALGO, "svcd callbacks end"); 3068 log_assert(sq->cblist == NULL); 3069 serviced_delete(sq); 3070 } 3071 3072 int 3073 serviced_tcp_callback(struct comm_point* c, void* arg, int error, 3074 struct comm_reply* rep) 3075 { 3076 struct serviced_query* sq = (struct serviced_query*)arg; 3077 struct comm_reply r2; 3078 #ifdef USE_DNSTAP 3079 struct waiting_tcp* w = (struct waiting_tcp*)sq->pending; 3080 struct pending_tcp* pend_tcp = NULL; 3081 struct port_if* pi = NULL; 3082 if(w && !w->on_tcp_waiting_list && w->next_waiting) { 3083 pend_tcp = (struct pending_tcp*)w->next_waiting; 3084 pi = pend_tcp->pi; 3085 } 3086 #endif 3087 sq->pending = NULL; /* removed after this callback */ 3088 if(error != NETEVENT_NOERROR) 3089 log_addr(VERB_QUERY, "tcp error for address", 3090 &sq->addr, sq->addrlen); 3091 if(error==NETEVENT_NOERROR) 3092 infra_update_tcp_works(sq->outnet->infra, &sq->addr, 3093 sq->addrlen, sq->zone, sq->zonelen); 3094 #ifdef USE_DNSTAP 3095 /* 3096 * sending src (local service)/dst (upstream) addresses over DNSTAP 3097 */ 3098 if(error==NETEVENT_NOERROR && pi && sq->outnet->dtenv && 3099 (sq->outnet->dtenv->log_resolver_response_messages || 3100 sq->outnet->dtenv->log_forwarder_response_messages)) { 3101 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen); 3102 log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen); 3103 dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr, 3104 &pi->addr, c->type, c->ssl, sq->zone, sq->zonelen, sq->qbuf, 3105 sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv, 3106 c->buffer); 3107 } 3108 #endif 3109 if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS && 3110 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 3111 LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin( 3112 c->buffer)) == LDNS_RCODE_NOTIMPL) ) { 3113 /* attempt to fallback to nonEDNS */ 3114 sq->status = serviced_query_TCP_EDNS_fallback; 3115 serviced_tcp_initiate(sq, c->buffer); 3116 return 0; 3117 } else if(error==NETEVENT_NOERROR && 3118 sq->status == serviced_query_TCP_EDNS_fallback && 3119 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 3120 LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE( 3121 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN 3122 || LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 3123 == LDNS_RCODE_YXDOMAIN)) { 3124 /* the fallback produced a result that looks promising, note 3125 * that this server should be approached without EDNS */ 3126 /* only store noEDNS in cache if domain is noDNSSEC */ 3127 if(!sq->want_dnssec) 3128 if(!infra_edns_update(sq->outnet->infra, &sq->addr, 3129 sq->addrlen, sq->zone, sq->zonelen, -1, 3130 *sq->outnet->now_secs)) 3131 log_err("Out of memory caching no edns for host"); 3132 sq->status = serviced_query_TCP; 3133 } 3134 if(sq->tcp_upstream || sq->ssl_upstream) { 3135 struct timeval now = *sq->outnet->now_tv; 3136 if(error!=NETEVENT_NOERROR) { 3137 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 3138 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 3139 -1, sq->last_rtt, (time_t)now.tv_sec)) 3140 log_err("out of memory in TCP exponential backoff."); 3141 } else if(now.tv_sec > sq->last_sent_time.tv_sec || 3142 (now.tv_sec == sq->last_sent_time.tv_sec && 3143 now.tv_usec > sq->last_sent_time.tv_usec)) { 3144 /* convert from microseconds to milliseconds */ 3145 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 3146 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 3147 verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime); 3148 log_assert(roundtime >= 0); 3149 /* only store if less then AUTH_TIMEOUT seconds, it could be 3150 * huge due to system-hibernated and we woke up */ 3151 if(roundtime < 60000) { 3152 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 3153 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 3154 roundtime, sq->last_rtt, (time_t)now.tv_sec)) 3155 log_err("out of memory noting rtt."); 3156 } 3157 } 3158 } 3159 /* insert address into reply info */ 3160 if(!rep) { 3161 /* create one if there isn't (on errors) */ 3162 rep = &r2; 3163 r2.c = c; 3164 } 3165 memcpy(&rep->remote_addr, &sq->addr, sq->addrlen); 3166 rep->remote_addrlen = sq->addrlen; 3167 serviced_callbacks(sq, error, c, rep); 3168 return 0; 3169 } 3170 3171 static void 3172 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff) 3173 { 3174 verbose(VERB_ALGO, "initiate TCP query %s", 3175 sq->status==serviced_query_TCP_EDNS?"EDNS":""); 3176 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 3177 sq->last_sent_time = *sq->outnet->now_tv; 3178 log_assert(!sq->busy); 3179 sq->busy = 1; 3180 sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout, 3181 serviced_tcp_callback, sq); 3182 sq->busy = 0; 3183 if(!sq->pending) { 3184 /* delete from tree so that a retry by above layer does not 3185 * clash with this entry */ 3186 verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query"); 3187 serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL); 3188 } 3189 } 3190 3191 /** Send serviced query over TCP return false on initial failure */ 3192 static int 3193 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff) 3194 { 3195 int vs, rtt, timeout; 3196 uint8_t edns_lame_known; 3197 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 3198 sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known, 3199 &rtt)) 3200 return 0; 3201 sq->last_rtt = rtt; 3202 if(vs != -1) 3203 sq->status = serviced_query_TCP_EDNS; 3204 else sq->status = serviced_query_TCP; 3205 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 3206 sq->last_sent_time = *sq->outnet->now_tv; 3207 if(sq->tcp_upstream || sq->ssl_upstream) { 3208 timeout = rtt; 3209 if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout) 3210 timeout = sq->outnet->tcp_auth_query_timeout; 3211 } else { 3212 timeout = sq->outnet->tcp_auth_query_timeout; 3213 } 3214 log_assert(!sq->busy); 3215 sq->busy = 1; 3216 sq->pending = pending_tcp_query(sq, buff, timeout, 3217 serviced_tcp_callback, sq); 3218 sq->busy = 0; 3219 return sq->pending != NULL; 3220 } 3221 3222 /* see if packet is edns malformed; got zeroes at start. 3223 * This is from servers that return malformed packets to EDNS0 queries, 3224 * but they return good packets for nonEDNS0 queries. 3225 * We try to detect their output; without resorting to a full parse or 3226 * check for too many bytes after the end of the packet. */ 3227 static int 3228 packet_edns_malformed(struct sldns_buffer* buf, int qtype) 3229 { 3230 size_t len; 3231 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE) 3232 return 1; /* malformed */ 3233 /* they have NOERROR rcode, 1 answer. */ 3234 if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR) 3235 return 0; 3236 /* one query (to skip) and answer records */ 3237 if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 || 3238 LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0) 3239 return 0; 3240 /* skip qname */ 3241 len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE), 3242 sldns_buffer_limit(buf)-LDNS_HEADER_SIZE); 3243 if(len == 0) 3244 return 0; 3245 if(len == 1 && qtype == 0) 3246 return 0; /* we asked for '.' and type 0 */ 3247 /* and then 4 bytes (type and class of query) */ 3248 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3) 3249 return 0; 3250 3251 /* and start with 11 zeroes as the answer RR */ 3252 /* so check the qtype of the answer record, qname=0, type=0 */ 3253 if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 && 3254 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 && 3255 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0) 3256 return 1; 3257 return 0; 3258 } 3259 3260 int 3261 serviced_udp_callback(struct comm_point* c, void* arg, int error, 3262 struct comm_reply* rep) 3263 { 3264 struct serviced_query* sq = (struct serviced_query*)arg; 3265 struct outside_network* outnet = sq->outnet; 3266 struct timeval now = *sq->outnet->now_tv; 3267 #ifdef USE_DNSTAP 3268 struct pending* p = (struct pending*)sq->pending; 3269 #endif 3270 3271 sq->pending = NULL; /* removed after callback */ 3272 if(error == NETEVENT_TIMEOUT) { 3273 if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000 && 3274 (serviced_query_udp_size(sq, serviced_query_UDP_EDNS_FRAG) < serviced_query_udp_size(sq, serviced_query_UDP_EDNS))) { 3275 /* fallback to 1480/1280 */ 3276 sq->status = serviced_query_UDP_EDNS_FRAG; 3277 log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10, 3278 &sq->addr, sq->addrlen); 3279 if(!serviced_udp_send(sq, c->buffer)) { 3280 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 3281 } 3282 return 0; 3283 } 3284 if(sq->status == serviced_query_UDP_EDNS_FRAG) { 3285 /* fragmentation size did not fix it */ 3286 sq->status = serviced_query_UDP_EDNS; 3287 } 3288 sq->retry++; 3289 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 3290 sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt, 3291 (time_t)now.tv_sec)) 3292 log_err("out of memory in UDP exponential backoff"); 3293 if(sq->retry < OUTBOUND_UDP_RETRY) { 3294 log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10, 3295 &sq->addr, sq->addrlen); 3296 if(!serviced_udp_send(sq, c->buffer)) { 3297 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 3298 } 3299 return 0; 3300 } 3301 } 3302 if(error != NETEVENT_NOERROR) { 3303 /* udp returns error (due to no ID or interface available) */ 3304 serviced_callbacks(sq, error, c, rep); 3305 return 0; 3306 } 3307 #ifdef USE_DNSTAP 3308 /* 3309 * sending src (local service)/dst (upstream) addresses over DNSTAP 3310 */ 3311 if(error == NETEVENT_NOERROR && outnet->dtenv && p->pc && 3312 (outnet->dtenv->log_resolver_response_messages || 3313 outnet->dtenv->log_forwarder_response_messages)) { 3314 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen); 3315 log_addr(VERB_ALGO, "to local addr", &p->pc->pif->addr, 3316 p->pc->pif->addrlen); 3317 dt_msg_send_outside_response(outnet->dtenv, &sq->addr, 3318 &p->pc->pif->addr, c->type, c->ssl, sq->zone, sq->zonelen, 3319 sq->qbuf, sq->qbuflen, &sq->last_sent_time, 3320 sq->outnet->now_tv, c->buffer); 3321 } 3322 #endif 3323 if( (sq->status == serviced_query_UDP_EDNS 3324 ||sq->status == serviced_query_UDP_EDNS_FRAG) 3325 && (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 3326 == LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE( 3327 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL 3328 || packet_edns_malformed(c->buffer, sq->qtype) 3329 )) { 3330 /* try to get an answer by falling back without EDNS */ 3331 verbose(VERB_ALGO, "serviced query: attempt without EDNS"); 3332 sq->status = serviced_query_UDP_EDNS_fallback; 3333 sq->retry = 0; 3334 if(!serviced_udp_send(sq, c->buffer)) { 3335 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 3336 } 3337 return 0; 3338 } else if(sq->status == serviced_query_UDP_EDNS && 3339 !sq->edns_lame_known) { 3340 /* now we know that edns queries received answers store that */ 3341 log_addr(VERB_ALGO, "serviced query: EDNS works for", 3342 &sq->addr, sq->addrlen); 3343 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 3344 sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) { 3345 log_err("Out of memory caching edns works"); 3346 } 3347 sq->edns_lame_known = 1; 3348 } else if(sq->status == serviced_query_UDP_EDNS_fallback && 3349 !sq->edns_lame_known && (LDNS_RCODE_WIRE( 3350 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR || 3351 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 3352 LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin( 3353 c->buffer)) == LDNS_RCODE_YXDOMAIN)) { 3354 /* the fallback produced a result that looks promising, note 3355 * that this server should be approached without EDNS */ 3356 /* only store noEDNS in cache if domain is noDNSSEC */ 3357 if(!sq->want_dnssec) { 3358 log_addr(VERB_ALGO, "serviced query: EDNS fails for", 3359 &sq->addr, sq->addrlen); 3360 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 3361 sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) { 3362 log_err("Out of memory caching no edns for host"); 3363 } 3364 } else { 3365 log_addr(VERB_ALGO, "serviced query: EDNS fails, but " 3366 "not stored because need DNSSEC for", &sq->addr, 3367 sq->addrlen); 3368 } 3369 sq->status = serviced_query_UDP; 3370 } 3371 if(now.tv_sec > sq->last_sent_time.tv_sec || 3372 (now.tv_sec == sq->last_sent_time.tv_sec && 3373 now.tv_usec > sq->last_sent_time.tv_usec)) { 3374 /* convert from microseconds to milliseconds */ 3375 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 3376 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 3377 verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime); 3378 log_assert(roundtime >= 0); 3379 /* in case the system hibernated, do not enter a huge value, 3380 * above this value gives trouble with server selection */ 3381 if(roundtime < 60000) { 3382 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 3383 sq->zone, sq->zonelen, sq->qtype, roundtime, 3384 sq->last_rtt, (time_t)now.tv_sec)) 3385 log_err("out of memory noting rtt."); 3386 } 3387 } 3388 /* perform TC flag check and TCP fallback after updating our 3389 * cache entries for EDNS status and RTT times */ 3390 if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) { 3391 /* fallback to TCP */ 3392 /* this discards partial UDP contents */ 3393 if(sq->status == serviced_query_UDP_EDNS || 3394 sq->status == serviced_query_UDP_EDNS_FRAG || 3395 sq->status == serviced_query_UDP_EDNS_fallback) 3396 /* if we have unfinished EDNS_fallback, start again */ 3397 sq->status = serviced_query_TCP_EDNS; 3398 else sq->status = serviced_query_TCP; 3399 serviced_tcp_initiate(sq, c->buffer); 3400 return 0; 3401 } 3402 /* yay! an answer */ 3403 serviced_callbacks(sq, error, c, rep); 3404 return 0; 3405 } 3406 3407 struct serviced_query* 3408 outnet_serviced_query(struct outside_network* outnet, 3409 struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec, 3410 int nocaps, int check_ratelimit, int tcp_upstream, int ssl_upstream, 3411 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen, 3412 uint8_t* zone, size_t zonelen, struct module_qstate* qstate, 3413 comm_point_callback_type* callback, void* callback_arg, 3414 sldns_buffer* buff, struct module_env* env, int* was_ratelimited) 3415 { 3416 struct serviced_query* sq; 3417 struct service_callback* cb; 3418 struct edns_string_addr* client_string_addr; 3419 struct regional* region; 3420 struct edns_option* backed_up_opt_list = qstate->edns_opts_back_out; 3421 struct edns_option* per_upstream_opt_list = NULL; 3422 time_t timenow = 0; 3423 3424 /* If we have an already populated EDNS option list make a copy since 3425 * we may now add upstream specific EDNS options. */ 3426 /* Use a region that could be attached to a serviced_query, if it needs 3427 * to be created. If an existing one is found then this region will be 3428 * destroyed here. */ 3429 region = alloc_reg_obtain(env->alloc); 3430 if(!region) return NULL; 3431 if(qstate->edns_opts_back_out) { 3432 per_upstream_opt_list = edns_opt_copy_region( 3433 qstate->edns_opts_back_out, region); 3434 if(!per_upstream_opt_list) { 3435 alloc_reg_release(env->alloc, region); 3436 return NULL; 3437 } 3438 qstate->edns_opts_back_out = per_upstream_opt_list; 3439 } 3440 3441 if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone, 3442 zonelen, qstate, region)) { 3443 alloc_reg_release(env->alloc, region); 3444 return NULL; 3445 } 3446 /* Restore the option list; we can explicitly use the copied one from 3447 * now on. */ 3448 per_upstream_opt_list = qstate->edns_opts_back_out; 3449 qstate->edns_opts_back_out = backed_up_opt_list; 3450 3451 if((client_string_addr = edns_string_addr_lookup( 3452 &env->edns_strings->client_strings, addr, addrlen))) { 3453 edns_opt_list_append(&per_upstream_opt_list, 3454 env->edns_strings->client_string_opcode, 3455 client_string_addr->string_len, 3456 client_string_addr->string, region); 3457 } 3458 3459 serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype, 3460 qinfo->qclass, flags); 3461 sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen, 3462 per_upstream_opt_list); 3463 if(!sq) { 3464 /* Check ratelimit only for new serviced_query */ 3465 if(check_ratelimit) { 3466 timenow = *env->now; 3467 if(!infra_ratelimit_inc(env->infra_cache, zone, 3468 zonelen, timenow, env->cfg->ratelimit_backoff, 3469 &qstate->qinfo, 3470 qstate->mesh_info->reply_list 3471 ?&qstate->mesh_info->reply_list->query_reply 3472 :NULL)) { 3473 /* Can we pass through with slip factor? */ 3474 if(env->cfg->ratelimit_factor == 0 || 3475 ub_random_max(env->rnd, 3476 env->cfg->ratelimit_factor) != 1) { 3477 *was_ratelimited = 1; 3478 alloc_reg_release(env->alloc, region); 3479 return NULL; 3480 } 3481 log_nametypeclass(VERB_ALGO, 3482 "ratelimit allowed through for " 3483 "delegation point", zone, 3484 LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN); 3485 } 3486 } 3487 /* make new serviced query entry */ 3488 sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps, 3489 tcp_upstream, ssl_upstream, tls_auth_name, addr, 3490 addrlen, zone, zonelen, (int)qinfo->qtype, 3491 per_upstream_opt_list, 3492 ( ssl_upstream && env->cfg->pad_queries 3493 ? env->cfg->pad_queries_block_size : 0 ), 3494 env->alloc, region); 3495 if(!sq) { 3496 if(check_ratelimit) { 3497 infra_ratelimit_dec(env->infra_cache, 3498 zone, zonelen, timenow); 3499 } 3500 return NULL; 3501 } 3502 if(!(cb = (struct service_callback*)regional_alloc( 3503 sq->region, sizeof(*cb)))) { 3504 if(check_ratelimit) { 3505 infra_ratelimit_dec(env->infra_cache, 3506 zone, zonelen, timenow); 3507 } 3508 (void)rbtree_delete(outnet->serviced, sq); 3509 serviced_node_del(&sq->node, NULL); 3510 return NULL; 3511 } 3512 /* No network action at this point; it will be invoked with the 3513 * serviced_query timer instead to run outside of the mesh. */ 3514 } else { 3515 /* We don't need this region anymore. */ 3516 alloc_reg_release(env->alloc, region); 3517 /* duplicate entries are included in the callback list, because 3518 * there is a counterpart registration by our caller that needs 3519 * to be doubly-removed (with callbacks perhaps). */ 3520 if(!(cb = (struct service_callback*)regional_alloc( 3521 sq->region, sizeof(*cb)))) { 3522 return NULL; 3523 } 3524 } 3525 /* add callback to list of callbacks */ 3526 cb->cb = callback; 3527 cb->cb_arg = callback_arg; 3528 cb->next = sq->cblist; 3529 sq->cblist = cb; 3530 return sq; 3531 } 3532 3533 /** remove callback from list */ 3534 static void 3535 callback_list_remove(struct serviced_query* sq, void* cb_arg) 3536 { 3537 struct service_callback** pp = &sq->cblist; 3538 while(*pp) { 3539 if((*pp)->cb_arg == cb_arg) { 3540 struct service_callback* del = *pp; 3541 *pp = del->next; 3542 return; 3543 } 3544 pp = &(*pp)->next; 3545 } 3546 } 3547 3548 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg) 3549 { 3550 if(!sq) 3551 return; 3552 callback_list_remove(sq, cb_arg); 3553 /* if callbacks() routine scheduled deletion, let it do that */ 3554 if(!sq->cblist && !sq->busy && !sq->to_be_deleted) { 3555 (void)rbtree_delete(sq->outnet->serviced, sq); 3556 serviced_delete(sq); 3557 } 3558 } 3559 3560 /** create fd to send to this destination */ 3561 static int 3562 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr, 3563 socklen_t to_addrlen) 3564 { 3565 struct sockaddr_storage* addr; 3566 socklen_t addrlen; 3567 int i, try, pnum, dscp; 3568 struct port_if* pif; 3569 3570 /* create fd */ 3571 dscp = outnet->ip_dscp; 3572 for(try = 0; try<1000; try++) { 3573 int port = 0; 3574 int freebind = 0; 3575 int noproto = 0; 3576 int inuse = 0; 3577 int fd = -1; 3578 3579 /* select interface */ 3580 if(addr_is_ip6(to_addr, to_addrlen)) { 3581 if(outnet->num_ip6 == 0) { 3582 char to[64]; 3583 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3584 verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to); 3585 return -1; 3586 } 3587 i = ub_random_max(outnet->rnd, outnet->num_ip6); 3588 pif = &outnet->ip6_ifs[i]; 3589 } else { 3590 if(outnet->num_ip4 == 0) { 3591 char to[64]; 3592 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3593 verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to); 3594 return -1; 3595 } 3596 i = ub_random_max(outnet->rnd, outnet->num_ip4); 3597 pif = &outnet->ip4_ifs[i]; 3598 } 3599 addr = &pif->addr; 3600 addrlen = pif->addrlen; 3601 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3602 pnum = ub_random_max(outnet->rnd, pif->avail_total); 3603 if(pnum < pif->inuse) { 3604 /* port already open */ 3605 port = pif->out[pnum]->number; 3606 } else { 3607 /* unused ports in start part of array */ 3608 port = pif->avail_ports[pnum - pif->inuse]; 3609 } 3610 #else 3611 pnum = port = 0; 3612 #endif 3613 if(addr_is_ip6(to_addr, to_addrlen)) { 3614 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 3615 sa.sin6_port = (in_port_t)htons((uint16_t)port); 3616 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 3617 (struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto, 3618 0, 0, 0, NULL, 0, freebind, 0, dscp); 3619 } else { 3620 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 3621 sa->sin_port = (in_port_t)htons((uint16_t)port); 3622 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 3623 (struct sockaddr*)addr, addrlen, 1, &inuse, &noproto, 3624 0, 0, 0, NULL, 0, freebind, 0, dscp); 3625 } 3626 if(fd != -1) { 3627 return fd; 3628 } 3629 if(!inuse) { 3630 return -1; 3631 } 3632 } 3633 /* too many tries */ 3634 log_err("cannot send probe, ports are in use"); 3635 return -1; 3636 } 3637 3638 struct comm_point* 3639 outnet_comm_point_for_udp(struct outside_network* outnet, 3640 comm_point_callback_type* cb, void* cb_arg, 3641 struct sockaddr_storage* to_addr, socklen_t to_addrlen) 3642 { 3643 struct comm_point* cp; 3644 int fd = fd_for_dest(outnet, to_addr, to_addrlen); 3645 if(fd == -1) { 3646 return NULL; 3647 } 3648 cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff, 0, 3649 cb, cb_arg, NULL); 3650 if(!cp) { 3651 log_err("malloc failure"); 3652 close(fd); 3653 return NULL; 3654 } 3655 return cp; 3656 } 3657 3658 /** setup SSL for comm point */ 3659 static int 3660 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet, 3661 int fd, char* host) 3662 { 3663 cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd); 3664 if(!cp->ssl) { 3665 log_err("cannot create SSL object"); 3666 return 0; 3667 } 3668 #ifdef USE_WINSOCK 3669 comm_point_tcp_win_bio_cb(cp, cp->ssl); 3670 #endif 3671 cp->ssl_shake_state = comm_ssl_shake_write; 3672 /* https verification */ 3673 #ifdef HAVE_SSL 3674 if(outnet->tls_use_sni) { 3675 (void)SSL_set_tlsext_host_name(cp->ssl, host); 3676 } 3677 #endif 3678 #ifdef HAVE_SSL_SET1_HOST 3679 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3680 /* because we set SSL_VERIFY_PEER, in netevent in 3681 * ssl_handshake, it'll check if the certificate 3682 * verification has succeeded */ 3683 /* SSL_VERIFY_PEER is set on the sslctx */ 3684 /* and the certificates to verify with are loaded into 3685 * it with SSL_load_verify_locations or 3686 * SSL_CTX_set_default_verify_paths */ 3687 /* setting the hostname makes openssl verify the 3688 * host name in the x509 certificate in the 3689 * SSL connection*/ 3690 if(!SSL_set1_host(cp->ssl, host)) { 3691 log_err("SSL_set1_host failed"); 3692 return 0; 3693 } 3694 } 3695 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST) 3696 /* openssl 1.0.2 has this function that can be used for 3697 * set1_host like verification */ 3698 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3699 X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl); 3700 # ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 3701 X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS); 3702 # endif 3703 if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) { 3704 log_err("X509_VERIFY_PARAM_set1_host failed"); 3705 return 0; 3706 } 3707 } 3708 #else 3709 (void)host; 3710 #endif /* HAVE_SSL_SET1_HOST */ 3711 return 1; 3712 } 3713 3714 struct comm_point* 3715 outnet_comm_point_for_tcp(struct outside_network* outnet, 3716 comm_point_callback_type* cb, void* cb_arg, 3717 struct sockaddr_storage* to_addr, socklen_t to_addrlen, 3718 sldns_buffer* query, int timeout, int ssl, char* host) 3719 { 3720 struct comm_point* cp; 3721 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3722 if(fd == -1) { 3723 return 0; 3724 } 3725 fd_set_nonblock(fd); 3726 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3727 /* outnet_tcp_connect has closed fd on error for us */ 3728 return 0; 3729 } 3730 cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg); 3731 if(!cp) { 3732 log_err("malloc failure"); 3733 close(fd); 3734 return 0; 3735 } 3736 cp->repinfo.remote_addrlen = to_addrlen; 3737 memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen); 3738 3739 /* setup for SSL (if needed) */ 3740 if(ssl) { 3741 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3742 log_err("cannot setup XoT"); 3743 comm_point_delete(cp); 3744 return NULL; 3745 } 3746 } 3747 3748 /* set timeout on TCP connection */ 3749 comm_point_start_listening(cp, fd, timeout); 3750 /* copy scratch buffer to cp->buffer */ 3751 sldns_buffer_copy(cp->buffer, query); 3752 return cp; 3753 } 3754 3755 /** setup the User-Agent HTTP header based on http-user-agent configuration */ 3756 static void 3757 setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg) 3758 { 3759 if(cfg->hide_http_user_agent) return; 3760 if(cfg->http_user_agent==NULL || cfg->http_user_agent[0] == 0) { 3761 sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME, 3762 PACKAGE_VERSION); 3763 } else { 3764 sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent); 3765 } 3766 } 3767 3768 /** setup http request headers in buffer for sending query to destination */ 3769 static int 3770 setup_http_request(sldns_buffer* buf, char* host, char* path, 3771 struct config_file* cfg) 3772 { 3773 sldns_buffer_clear(buf); 3774 sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path); 3775 sldns_buffer_printf(buf, "Host: %s\r\n", host); 3776 setup_http_user_agent(buf, cfg); 3777 /* We do not really do multiple queries per connection, 3778 * but this header setting is also not needed. 3779 * sldns_buffer_printf(buf, "Connection: close\r\n") */ 3780 sldns_buffer_printf(buf, "\r\n"); 3781 if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf)) 3782 return 0; /* somehow buffer too short, but it is about 60K 3783 and the request is only a couple bytes long. */ 3784 sldns_buffer_flip(buf); 3785 return 1; 3786 } 3787 3788 struct comm_point* 3789 outnet_comm_point_for_http(struct outside_network* outnet, 3790 comm_point_callback_type* cb, void* cb_arg, 3791 struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout, 3792 int ssl, char* host, char* path, struct config_file* cfg) 3793 { 3794 /* cp calls cb with err=NETEVENT_DONE when transfer is done */ 3795 struct comm_point* cp; 3796 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3797 if(fd == -1) { 3798 return 0; 3799 } 3800 fd_set_nonblock(fd); 3801 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3802 /* outnet_tcp_connect has closed fd on error for us */ 3803 return 0; 3804 } 3805 cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg, 3806 outnet->udp_buff); 3807 if(!cp) { 3808 log_err("malloc failure"); 3809 close(fd); 3810 return 0; 3811 } 3812 cp->repinfo.remote_addrlen = to_addrlen; 3813 memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen); 3814 3815 /* setup for SSL (if needed) */ 3816 if(ssl) { 3817 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3818 log_err("cannot setup https"); 3819 comm_point_delete(cp); 3820 return NULL; 3821 } 3822 } 3823 3824 /* set timeout on TCP connection */ 3825 comm_point_start_listening(cp, fd, timeout); 3826 3827 /* setup http request in cp->buffer */ 3828 if(!setup_http_request(cp->buffer, host, path, cfg)) { 3829 log_err("error setting up http request"); 3830 comm_point_delete(cp); 3831 return NULL; 3832 } 3833 return cp; 3834 } 3835 3836 /** get memory used by waiting tcp entry (in use or not) */ 3837 static size_t 3838 waiting_tcp_get_mem(struct waiting_tcp* w) 3839 { 3840 size_t s; 3841 if(!w) return 0; 3842 s = sizeof(*w) + w->pkt_len; 3843 if(w->timer) 3844 s += comm_timer_get_mem(w->timer); 3845 return s; 3846 } 3847 3848 /** get memory used by port if */ 3849 static size_t 3850 if_get_mem(struct port_if* pif) 3851 { 3852 size_t s; 3853 int i; 3854 s = sizeof(*pif) + 3855 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3856 sizeof(int)*pif->avail_total + 3857 #endif 3858 sizeof(struct port_comm*)*pif->maxout; 3859 for(i=0; i<pif->inuse; i++) 3860 s += sizeof(*pif->out[i]) + 3861 comm_point_get_mem(pif->out[i]->cp); 3862 return s; 3863 } 3864 3865 /** get memory used by waiting udp */ 3866 static size_t 3867 waiting_udp_get_mem(struct pending* w) 3868 { 3869 size_t s; 3870 s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len; 3871 return s; 3872 } 3873 3874 size_t outnet_get_mem(struct outside_network* outnet) 3875 { 3876 size_t i; 3877 int k; 3878 struct waiting_tcp* w; 3879 struct pending* u; 3880 struct serviced_query* sq; 3881 struct service_callback* sb; 3882 struct port_comm* pc; 3883 size_t s = sizeof(*outnet) + sizeof(*outnet->base) + 3884 sizeof(*outnet->udp_buff) + 3885 sldns_buffer_capacity(outnet->udp_buff); 3886 /* second buffer is not ours */ 3887 for(pc = outnet->unused_fds; pc; pc = pc->next) { 3888 s += sizeof(*pc) + comm_point_get_mem(pc->cp); 3889 } 3890 for(k=0; k<outnet->num_ip4; k++) 3891 s += if_get_mem(&outnet->ip4_ifs[k]); 3892 for(k=0; k<outnet->num_ip6; k++) 3893 s += if_get_mem(&outnet->ip6_ifs[k]); 3894 for(u=outnet->udp_wait_first; u; u=u->next_waiting) 3895 s += waiting_udp_get_mem(u); 3896 3897 s += sizeof(struct pending_tcp*)*outnet->num_tcp; 3898 for(i=0; i<outnet->num_tcp; i++) { 3899 s += sizeof(struct pending_tcp); 3900 s += comm_point_get_mem(outnet->tcp_conns[i]->c); 3901 if(outnet->tcp_conns[i]->query) 3902 s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query); 3903 } 3904 for(w=outnet->tcp_wait_first; w; w = w->next_waiting) 3905 s += waiting_tcp_get_mem(w); 3906 s += sizeof(*outnet->pending); 3907 s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) * 3908 outnet->pending->count; 3909 s += sizeof(*outnet->serviced); 3910 s += outnet->svcd_overhead; 3911 RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) { 3912 s += sizeof(*sq) + sq->qbuflen; 3913 for(sb = sq->cblist; sb; sb = sb->next) 3914 s += sizeof(*sb); 3915 } 3916 return s; 3917 } 3918 3919 size_t 3920 serviced_get_mem(struct serviced_query* sq) 3921 { 3922 struct service_callback* sb; 3923 size_t s; 3924 s = sizeof(*sq) + sq->qbuflen; 3925 for(sb = sq->cblist; sb; sb = sb->next) 3926 s += sizeof(*sb); 3927 if(sq->status == serviced_query_UDP_EDNS || 3928 sq->status == serviced_query_UDP || 3929 sq->status == serviced_query_UDP_EDNS_FRAG || 3930 sq->status == serviced_query_UDP_EDNS_fallback) { 3931 s += sizeof(struct pending); 3932 s += comm_timer_get_mem(NULL); 3933 } else { 3934 /* does not have size of the pkt pointer */ 3935 /* always has a timer except on malloc failures */ 3936 3937 /* these sizes are part of the main outside network mem */ 3938 /* 3939 s += sizeof(struct waiting_tcp); 3940 s += comm_timer_get_mem(NULL); 3941 */ 3942 } 3943 return s; 3944 } 3945 3946