1 /* 2 * services/outside_network.c - implement sending of queries and wait answer. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file has functions to send queries to authoritative servers and 40 * wait for the pending answer events. 41 */ 42 #include "config.h" 43 #include <ctype.h> 44 #ifdef HAVE_SYS_TYPES_H 45 # include <sys/types.h> 46 #endif 47 #include <sys/time.h> 48 #include "services/outside_network.h" 49 #include "services/listen_dnsport.h" 50 #include "services/cache/infra.h" 51 #include "iterator/iterator.h" 52 #include "util/data/msgparse.h" 53 #include "util/data/msgreply.h" 54 #include "util/data/msgencode.h" 55 #include "util/data/dname.h" 56 #include "util/netevent.h" 57 #include "util/log.h" 58 #include "util/net_help.h" 59 #include "util/random.h" 60 #include "util/fptr_wlist.h" 61 #include "util/edns.h" 62 #include "sldns/sbuffer.h" 63 #include "dnstap/dnstap.h" 64 #ifdef HAVE_OPENSSL_SSL_H 65 #include <openssl/ssl.h> 66 #endif 67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST 68 #include <openssl/x509v3.h> 69 #endif 70 71 #ifdef HAVE_NETDB_H 72 #include <netdb.h> 73 #endif 74 #include <fcntl.h> 75 76 /** number of times to retry making a random ID that is unique. */ 77 #define MAX_ID_RETRY 1000 78 /** number of times to retry finding interface, port that can be opened. */ 79 #define MAX_PORT_RETRY 10000 80 /** number of retries on outgoing UDP queries */ 81 #define OUTBOUND_UDP_RETRY 1 82 83 /** initiate TCP transaction for serviced query */ 84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff); 85 /** with a fd available, randomize and send UDP */ 86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, 87 int timeout); 88 89 /** select a DNS ID for a TCP stream */ 90 static uint16_t tcp_select_id(struct outside_network* outnet, 91 struct reuse_tcp* reuse); 92 93 /** Perform serviced query UDP sending operation */ 94 static int serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff); 95 96 /** Send serviced query over TCP return false on initial failure */ 97 static int serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff); 98 99 /** call the callbacks for a serviced query */ 100 static void serviced_callbacks(struct serviced_query* sq, int error, 101 struct comm_point* c, struct comm_reply* rep); 102 103 int 104 pending_cmp(const void* key1, const void* key2) 105 { 106 struct pending *p1 = (struct pending*)key1; 107 struct pending *p2 = (struct pending*)key2; 108 if(p1->id < p2->id) 109 return -1; 110 if(p1->id > p2->id) 111 return 1; 112 log_assert(p1->id == p2->id); 113 return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen); 114 } 115 116 int 117 serviced_cmp(const void* key1, const void* key2) 118 { 119 struct serviced_query* q1 = (struct serviced_query*)key1; 120 struct serviced_query* q2 = (struct serviced_query*)key2; 121 int r; 122 if(q1->qbuflen < q2->qbuflen) 123 return -1; 124 if(q1->qbuflen > q2->qbuflen) 125 return 1; 126 log_assert(q1->qbuflen == q2->qbuflen); 127 log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */); 128 /* alternate casing of qname is still the same query */ 129 if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0) 130 return r; 131 if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0) 132 return r; 133 if(q1->dnssec != q2->dnssec) { 134 if(q1->dnssec < q2->dnssec) 135 return -1; 136 return 1; 137 } 138 if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0) 139 return r; 140 if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0) 141 return r; 142 return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen); 143 } 144 145 /** compare if the reuse element has the same address, port and same ssl-is 146 * used-for-it characteristic */ 147 static int 148 reuse_cmp_addrportssl(const void* key1, const void* key2) 149 { 150 struct reuse_tcp* r1 = (struct reuse_tcp*)key1; 151 struct reuse_tcp* r2 = (struct reuse_tcp*)key2; 152 int r; 153 /* compare address and port */ 154 r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen); 155 if(r != 0) 156 return r; 157 158 /* compare if SSL-enabled */ 159 if(r1->is_ssl && !r2->is_ssl) 160 return 1; 161 if(!r1->is_ssl && r2->is_ssl) 162 return -1; 163 return 0; 164 } 165 166 int 167 reuse_cmp(const void* key1, const void* key2) 168 { 169 int r; 170 r = reuse_cmp_addrportssl(key1, key2); 171 if(r != 0) 172 return r; 173 174 /* compare ptr value */ 175 if(key1 < key2) return -1; 176 if(key1 > key2) return 1; 177 return 0; 178 } 179 180 int reuse_id_cmp(const void* key1, const void* key2) 181 { 182 struct waiting_tcp* w1 = (struct waiting_tcp*)key1; 183 struct waiting_tcp* w2 = (struct waiting_tcp*)key2; 184 if(w1->id < w2->id) 185 return -1; 186 if(w1->id > w2->id) 187 return 1; 188 return 0; 189 } 190 191 /** delete waiting_tcp entry. Does not unlink from waiting list. 192 * @param w: to delete. 193 */ 194 static void 195 waiting_tcp_delete(struct waiting_tcp* w) 196 { 197 if(!w) return; 198 if(w->timer) 199 comm_timer_delete(w->timer); 200 free(w); 201 } 202 203 /** 204 * Pick random outgoing-interface of that family, and bind it. 205 * port set to 0 so OS picks a port number for us. 206 * if it is the ANY address, do not bind. 207 * @param pend: pending tcp structure, for storing the local address choice. 208 * @param w: tcp structure with destination address. 209 * @param s: socket fd. 210 * @return false on error, socket closed. 211 */ 212 static int 213 pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s) 214 { 215 struct port_if* pi = NULL; 216 int num; 217 pend->pi = NULL; 218 #ifdef INET6 219 if(addr_is_ip6(&w->addr, w->addrlen)) 220 num = w->outnet->num_ip6; 221 else 222 #endif 223 num = w->outnet->num_ip4; 224 if(num == 0) { 225 log_err("no TCP outgoing interfaces of family"); 226 log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen); 227 sock_close(s); 228 return 0; 229 } 230 #ifdef INET6 231 if(addr_is_ip6(&w->addr, w->addrlen)) 232 pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)]; 233 else 234 #endif 235 pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)]; 236 log_assert(pi); 237 pend->pi = pi; 238 if(addr_is_any(&pi->addr, pi->addrlen)) { 239 /* binding to the ANY interface is for listening sockets */ 240 return 1; 241 } 242 /* set port to 0 */ 243 if(addr_is_ip6(&pi->addr, pi->addrlen)) 244 ((struct sockaddr_in6*)&pi->addr)->sin6_port = 0; 245 else ((struct sockaddr_in*)&pi->addr)->sin_port = 0; 246 if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) { 247 #ifndef USE_WINSOCK 248 #ifdef EADDRNOTAVAIL 249 if(!(verbosity < 4 && errno == EADDRNOTAVAIL)) 250 #endif 251 #else /* USE_WINSOCK */ 252 if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL)) 253 #endif 254 log_err("outgoing tcp: bind: %s", sock_strerror(errno)); 255 sock_close(s); 256 return 0; 257 } 258 log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen); 259 return 1; 260 } 261 262 /** get TCP file descriptor for address, returns -1 on failure, 263 * tcp_mss is 0 or maxseg size to set for TCP packets. */ 264 int 265 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp) 266 { 267 int s; 268 int af; 269 char* err; 270 #if defined(SO_REUSEADDR) || defined(IP_BIND_ADDRESS_NO_PORT) 271 int on = 1; 272 #endif 273 #ifdef INET6 274 if(addr_is_ip6(addr, addrlen)){ 275 s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP); 276 af = AF_INET6; 277 } else { 278 #else 279 { 280 #endif 281 af = AF_INET; 282 s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); 283 } 284 if(s == -1) { 285 log_err_addr("outgoing tcp: socket", sock_strerror(errno), 286 addr, addrlen); 287 return -1; 288 } 289 290 #ifdef SO_REUSEADDR 291 if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on, 292 (socklen_t)sizeof(on)) < 0) { 293 verbose(VERB_ALGO, "outgoing tcp:" 294 " setsockopt(.. SO_REUSEADDR ..) failed"); 295 } 296 #endif 297 298 err = set_ip_dscp(s, af, dscp); 299 if(err != NULL) { 300 verbose(VERB_ALGO, "outgoing tcp:" 301 "error setting IP DiffServ codepoint on socket"); 302 } 303 304 if(tcp_mss > 0) { 305 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG) 306 if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG, 307 (void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) { 308 verbose(VERB_ALGO, "outgoing tcp:" 309 " setsockopt(.. TCP_MAXSEG ..) failed"); 310 } 311 #else 312 verbose(VERB_ALGO, "outgoing tcp:" 313 " setsockopt(TCP_MAXSEG) unsupported"); 314 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */ 315 } 316 #ifdef IP_BIND_ADDRESS_NO_PORT 317 if(setsockopt(s, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, (void*)&on, 318 (socklen_t)sizeof(on)) < 0) { 319 verbose(VERB_ALGO, "outgoing tcp:" 320 " setsockopt(.. IP_BIND_ADDRESS_NO_PORT ..) failed"); 321 } 322 #endif /* IP_BIND_ADDRESS_NO_PORT */ 323 return s; 324 } 325 326 /** connect tcp connection to addr, 0 on failure */ 327 int 328 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen) 329 { 330 if(connect(s, (struct sockaddr*)addr, addrlen) == -1) { 331 #ifndef USE_WINSOCK 332 #ifdef EINPROGRESS 333 if(errno != EINPROGRESS) { 334 #endif 335 if(tcp_connect_errno_needs_log( 336 (struct sockaddr*)addr, addrlen)) 337 log_err_addr("outgoing tcp: connect", 338 strerror(errno), addr, addrlen); 339 close(s); 340 return 0; 341 #ifdef EINPROGRESS 342 } 343 #endif 344 #else /* USE_WINSOCK */ 345 if(WSAGetLastError() != WSAEINPROGRESS && 346 WSAGetLastError() != WSAEWOULDBLOCK) { 347 closesocket(s); 348 return 0; 349 } 350 #endif 351 } 352 return 1; 353 } 354 355 /** log reuse item addr and ptr with message */ 356 static void 357 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse) 358 { 359 uint16_t port; 360 char addrbuf[128]; 361 if(verbosity < v) return; 362 if(!reuse || !reuse->pending || !reuse->pending->c) 363 return; 364 addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf)); 365 port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port); 366 verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port, 367 reuse->pending->c->fd); 368 } 369 370 /** pop the first element from the writewait list */ 371 struct waiting_tcp* 372 reuse_write_wait_pop(struct reuse_tcp* reuse) 373 { 374 struct waiting_tcp* w = reuse->write_wait_first; 375 if(!w) 376 return NULL; 377 log_assert(w->write_wait_queued); 378 log_assert(!w->write_wait_prev); 379 reuse->write_wait_first = w->write_wait_next; 380 if(w->write_wait_next) 381 w->write_wait_next->write_wait_prev = NULL; 382 else reuse->write_wait_last = NULL; 383 w->write_wait_queued = 0; 384 w->write_wait_next = NULL; 385 w->write_wait_prev = NULL; 386 return w; 387 } 388 389 /** remove the element from the writewait list */ 390 void 391 reuse_write_wait_remove(struct reuse_tcp* reuse, struct waiting_tcp* w) 392 { 393 log_assert(w); 394 log_assert(w->write_wait_queued); 395 if(!w) 396 return; 397 if(!w->write_wait_queued) 398 return; 399 if(w->write_wait_prev) 400 w->write_wait_prev->write_wait_next = w->write_wait_next; 401 else reuse->write_wait_first = w->write_wait_next; 402 log_assert(!w->write_wait_prev || 403 w->write_wait_prev->write_wait_next != w->write_wait_prev); 404 if(w->write_wait_next) 405 w->write_wait_next->write_wait_prev = w->write_wait_prev; 406 else reuse->write_wait_last = w->write_wait_prev; 407 log_assert(!w->write_wait_next 408 || w->write_wait_next->write_wait_prev != w->write_wait_next); 409 w->write_wait_queued = 0; 410 w->write_wait_next = NULL; 411 w->write_wait_prev = NULL; 412 } 413 414 /** push the element after the last on the writewait list */ 415 void 416 reuse_write_wait_push_back(struct reuse_tcp* reuse, struct waiting_tcp* w) 417 { 418 if(!w) return; 419 log_assert(!w->write_wait_queued); 420 if(reuse->write_wait_last) { 421 reuse->write_wait_last->write_wait_next = w; 422 log_assert(reuse->write_wait_last->write_wait_next != 423 reuse->write_wait_last); 424 w->write_wait_prev = reuse->write_wait_last; 425 } else { 426 reuse->write_wait_first = w; 427 w->write_wait_prev = NULL; 428 } 429 w->write_wait_next = NULL; 430 reuse->write_wait_last = w; 431 w->write_wait_queued = 1; 432 } 433 434 /** insert element in tree by id */ 435 void 436 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w) 437 { 438 #ifdef UNBOUND_DEBUG 439 rbnode_type* added; 440 #endif 441 log_assert(w->id_node.key == NULL); 442 w->id_node.key = w; 443 #ifdef UNBOUND_DEBUG 444 added = 445 #else 446 (void) 447 #endif 448 rbtree_insert(&reuse->tree_by_id, &w->id_node); 449 log_assert(added); /* should have been added */ 450 } 451 452 /** find element in tree by id */ 453 struct waiting_tcp* 454 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id) 455 { 456 struct waiting_tcp key_w; 457 rbnode_type* n; 458 memset(&key_w, 0, sizeof(key_w)); 459 key_w.id_node.key = &key_w; 460 key_w.id = id; 461 n = rbtree_search(&reuse->tree_by_id, &key_w); 462 if(!n) return NULL; 463 return (struct waiting_tcp*)n->key; 464 } 465 466 /** return ID value of rbnode in tree_by_id */ 467 static uint16_t 468 tree_by_id_get_id(rbnode_type* node) 469 { 470 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 471 return w->id; 472 } 473 474 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */ 475 int 476 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp) 477 { 478 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse); 479 if(pend_tcp->reuse.item_on_lru_list) { 480 if(!pend_tcp->reuse.node.key) 481 log_err("internal error: reuse_tcp_insert: " 482 "in lru list without key"); 483 return 1; 484 } 485 pend_tcp->reuse.node.key = &pend_tcp->reuse; 486 pend_tcp->reuse.pending = pend_tcp; 487 if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) { 488 /* We are not in the LRU list but we are already in the 489 * tcp_reuse tree, strange. 490 * Continue to add ourselves to the LRU list. */ 491 log_err("internal error: reuse_tcp_insert: in lru list but " 492 "not in the tree"); 493 } 494 /* insert into LRU, first is newest */ 495 pend_tcp->reuse.lru_prev = NULL; 496 if(outnet->tcp_reuse_first) { 497 pend_tcp->reuse.lru_next = outnet->tcp_reuse_first; 498 log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse); 499 outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse; 500 log_assert(outnet->tcp_reuse_first->lru_prev != 501 outnet->tcp_reuse_first); 502 } else { 503 pend_tcp->reuse.lru_next = NULL; 504 outnet->tcp_reuse_last = &pend_tcp->reuse; 505 } 506 outnet->tcp_reuse_first = &pend_tcp->reuse; 507 pend_tcp->reuse.item_on_lru_list = 1; 508 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 509 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 510 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next && 511 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev); 512 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next && 513 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev); 514 return 1; 515 } 516 517 /** find reuse tcp stream to destination for query, or NULL if none */ 518 static struct reuse_tcp* 519 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr, 520 socklen_t addrlen, int use_ssl) 521 { 522 struct waiting_tcp key_w; 523 struct pending_tcp key_p; 524 struct comm_point c; 525 rbnode_type* result = NULL, *prev; 526 verbose(VERB_CLIENT, "reuse_tcp_find"); 527 memset(&key_w, 0, sizeof(key_w)); 528 memset(&key_p, 0, sizeof(key_p)); 529 memset(&c, 0, sizeof(c)); 530 key_p.query = &key_w; 531 key_p.c = &c; 532 key_p.reuse.pending = &key_p; 533 key_p.reuse.node.key = &key_p.reuse; 534 if(use_ssl) 535 key_p.reuse.is_ssl = 1; 536 if(addrlen > (socklen_t)sizeof(key_p.reuse.addr)) 537 return NULL; 538 memmove(&key_p.reuse.addr, addr, addrlen); 539 key_p.reuse.addrlen = addrlen; 540 541 verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u", 542 (unsigned)outnet->tcp_reuse.count); 543 if(outnet->tcp_reuse.root == NULL || 544 outnet->tcp_reuse.root == RBTREE_NULL) 545 return NULL; 546 if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse, 547 &result)) { 548 /* exact match */ 549 /* but the key is on stack, and ptr is compared, impossible */ 550 log_assert(&key_p.reuse != (struct reuse_tcp*)result); 551 log_assert(&key_p != ((struct reuse_tcp*)result)->pending); 552 } 553 /* not found, return null */ 554 if(!result || result == RBTREE_NULL) 555 return NULL; 556 verbose(VERB_CLIENT, "reuse_tcp_find check inexact match"); 557 /* inexact match, find one of possibly several connections to the 558 * same destination address, with the correct port, ssl, and 559 * also less than max number of open queries, or else, fail to open 560 * a new one */ 561 /* rewind to start of sequence of same address,port,ssl */ 562 prev = rbtree_previous(result); 563 while(prev && prev != RBTREE_NULL && 564 reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) { 565 result = prev; 566 prev = rbtree_previous(result); 567 } 568 569 /* loop to find first one that has correct characteristics */ 570 while(result && result != RBTREE_NULL && 571 reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) { 572 if(((struct reuse_tcp*)result)->tree_by_id.count < 573 outnet->max_reuse_tcp_queries) { 574 /* same address, port, ssl-yes-or-no, and has 575 * space for another query */ 576 return (struct reuse_tcp*)result; 577 } 578 result = rbtree_next(result); 579 } 580 return NULL; 581 } 582 583 /** use the buffer to setup writing the query */ 584 static void 585 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend, 586 struct waiting_tcp* w) 587 { 588 struct timeval tv; 589 verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write " 590 "len %d timeout %d msec", 591 (int)w->pkt_len, w->timeout); 592 pend->c->tcp_write_pkt = w->pkt; 593 pend->c->tcp_write_pkt_len = w->pkt_len; 594 pend->c->tcp_write_and_read = 1; 595 pend->c->tcp_write_byte_count = 0; 596 pend->c->tcp_is_reading = 0; 597 comm_point_start_listening(pend->c, s, -1); 598 /* set timer on the waiting_tcp entry, this is the write timeout 599 * for the written packet. The timer on pend->c is the timer 600 * for when there is no written packet and we have readtimeouts */ 601 #ifndef S_SPLINT_S 602 tv.tv_sec = w->timeout/1000; 603 tv.tv_usec = (w->timeout%1000)*1000; 604 #endif 605 /* if the waiting_tcp was previously waiting for a buffer in the 606 * outside_network.tcpwaitlist, then the timer is reset now that 607 * we start writing it */ 608 comm_timer_set(w->timer, &tv); 609 } 610 611 /** use next free buffer to service a tcp query */ 612 static int 613 outnet_tcp_take_into_use(struct waiting_tcp* w) 614 { 615 struct pending_tcp* pend = w->outnet->tcp_free; 616 int s; 617 log_assert(pend); 618 log_assert(w->pkt); 619 log_assert(w->pkt_len > 0); 620 log_assert(w->addrlen > 0); 621 pend->c->tcp_do_toggle_rw = 0; 622 pend->c->tcp_do_close = 0; 623 /* open socket */ 624 s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp); 625 626 if(s == -1) 627 return 0; 628 629 if(!pick_outgoing_tcp(pend, w, s)) 630 return 0; 631 632 fd_set_nonblock(s); 633 #ifdef USE_OSX_MSG_FASTOPEN 634 /* API for fast open is different here. We use a connectx() function and 635 then writes can happen as normal even using SSL.*/ 636 /* connectx requires that the len be set in the sockaddr struct*/ 637 struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr; 638 addr_in->sin_len = w->addrlen; 639 sa_endpoints_t endpoints; 640 endpoints.sae_srcif = 0; 641 endpoints.sae_srcaddr = NULL; 642 endpoints.sae_srcaddrlen = 0; 643 endpoints.sae_dstaddr = (struct sockaddr *)&w->addr; 644 endpoints.sae_dstaddrlen = w->addrlen; 645 if (connectx(s, &endpoints, SAE_ASSOCID_ANY, 646 CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE, 647 NULL, 0, NULL, NULL) == -1) { 648 /* if fails, failover to connect for OSX 10.10 */ 649 #ifdef EINPROGRESS 650 if(errno != EINPROGRESS) { 651 #else 652 if(1) { 653 #endif 654 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 655 #else /* USE_OSX_MSG_FASTOPEN*/ 656 #ifdef USE_MSG_FASTOPEN 657 pend->c->tcp_do_fastopen = 1; 658 /* Only do TFO for TCP in which case no connect() is required here. 659 Don't combine client TFO with SSL, since OpenSSL can't 660 currently support doing a handshake on fd that already isn't connected*/ 661 if (w->outnet->sslctx && w->ssl_upstream) { 662 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 663 #else /* USE_MSG_FASTOPEN*/ 664 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 665 #endif /* USE_MSG_FASTOPEN*/ 666 #endif /* USE_OSX_MSG_FASTOPEN*/ 667 #ifndef USE_WINSOCK 668 #ifdef EINPROGRESS 669 if(errno != EINPROGRESS) { 670 #else 671 if(1) { 672 #endif 673 if(tcp_connect_errno_needs_log( 674 (struct sockaddr*)&w->addr, w->addrlen)) 675 log_err_addr("outgoing tcp: connect", 676 strerror(errno), &w->addr, w->addrlen); 677 close(s); 678 #else /* USE_WINSOCK */ 679 if(WSAGetLastError() != WSAEINPROGRESS && 680 WSAGetLastError() != WSAEWOULDBLOCK) { 681 closesocket(s); 682 #endif 683 return 0; 684 } 685 } 686 #ifdef USE_MSG_FASTOPEN 687 } 688 #endif /* USE_MSG_FASTOPEN */ 689 #ifdef USE_OSX_MSG_FASTOPEN 690 } 691 } 692 #endif /* USE_OSX_MSG_FASTOPEN */ 693 if(w->outnet->sslctx && w->ssl_upstream) { 694 pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s); 695 if(!pend->c->ssl) { 696 pend->c->fd = s; 697 comm_point_close(pend->c); 698 return 0; 699 } 700 verbose(VERB_ALGO, "the query is using TLS encryption, for %s", 701 (w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection")); 702 #ifdef USE_WINSOCK 703 comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl); 704 #endif 705 pend->c->ssl_shake_state = comm_ssl_shake_write; 706 if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name, 707 w->outnet->tls_use_sni)) { 708 pend->c->fd = s; 709 #ifdef HAVE_SSL 710 SSL_free(pend->c->ssl); 711 #endif 712 pend->c->ssl = NULL; 713 comm_point_close(pend->c); 714 return 0; 715 } 716 } 717 w->next_waiting = (void*)pend; 718 w->outnet->num_tcp_outgoing++; 719 w->outnet->tcp_free = pend->next_free; 720 pend->next_free = NULL; 721 pend->query = w; 722 pend->reuse.outnet = w->outnet; 723 pend->c->repinfo.remote_addrlen = w->addrlen; 724 pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again; 725 pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again; 726 pend->reuse.cp_more_read_again = 0; 727 pend->reuse.cp_more_write_again = 0; 728 memcpy(&pend->c->repinfo.remote_addr, &w->addr, w->addrlen); 729 pend->reuse.pending = pend; 730 731 /* Remove from tree in case the is_ssl will be different and causes the 732 * identity of the reuse_tcp to change; could result in nodes not being 733 * deleted from the tree (because the new identity does not match the 734 * previous node) but their ->key would be changed to NULL. */ 735 if(pend->reuse.node.key) 736 reuse_tcp_remove_tree_list(w->outnet, &pend->reuse); 737 738 if(pend->c->ssl) 739 pend->reuse.is_ssl = 1; 740 else pend->reuse.is_ssl = 0; 741 /* insert in reuse by address tree if not already inserted there */ 742 (void)reuse_tcp_insert(w->outnet, pend); 743 reuse_tree_by_id_insert(&pend->reuse, w); 744 outnet_tcp_take_query_setup(s, pend, w); 745 return 1; 746 } 747 748 /** Touch the lru of a reuse_tcp element, it is in use. 749 * This moves it to the front of the list, where it is not likely to 750 * be closed. Items at the back of the list are closed to make space. */ 751 void 752 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse) 753 { 754 if(!reuse->item_on_lru_list) { 755 log_err("internal error: we need to touch the lru_list but item not in list"); 756 return; /* not on the list, no lru to modify */ 757 } 758 log_assert(reuse->lru_prev || 759 (!reuse->lru_prev && outnet->tcp_reuse_first == reuse)); 760 if(!reuse->lru_prev) 761 return; /* already first in the list */ 762 /* remove at current position */ 763 /* since it is not first, there is a previous element */ 764 reuse->lru_prev->lru_next = reuse->lru_next; 765 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev); 766 if(reuse->lru_next) 767 reuse->lru_next->lru_prev = reuse->lru_prev; 768 else outnet->tcp_reuse_last = reuse->lru_prev; 769 log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next); 770 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next && 771 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev); 772 /* insert at the front */ 773 reuse->lru_prev = NULL; 774 reuse->lru_next = outnet->tcp_reuse_first; 775 if(outnet->tcp_reuse_first) { 776 outnet->tcp_reuse_first->lru_prev = reuse; 777 } 778 log_assert(reuse->lru_next != reuse); 779 /* since it is not first, it is not the only element and 780 * lru_next is thus not NULL and thus reuse is now not the last in 781 * the list, so outnet->tcp_reuse_last does not need to be modified */ 782 outnet->tcp_reuse_first = reuse; 783 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next && 784 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev); 785 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 786 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 787 } 788 789 /** Snip the last reuse_tcp element off of the LRU list */ 790 struct reuse_tcp* 791 reuse_tcp_lru_snip(struct outside_network* outnet) 792 { 793 struct reuse_tcp* reuse = outnet->tcp_reuse_last; 794 if(!reuse) return NULL; 795 /* snip off of LRU */ 796 log_assert(reuse->lru_next == NULL); 797 if(reuse->lru_prev) { 798 outnet->tcp_reuse_last = reuse->lru_prev; 799 reuse->lru_prev->lru_next = NULL; 800 } else { 801 outnet->tcp_reuse_last = NULL; 802 outnet->tcp_reuse_first = NULL; 803 } 804 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 805 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 806 reuse->item_on_lru_list = 0; 807 reuse->lru_next = NULL; 808 reuse->lru_prev = NULL; 809 return reuse; 810 } 811 812 /** remove waiting tcp from the outnet waiting list */ 813 void 814 outnet_waiting_tcp_list_remove(struct outside_network* outnet, struct waiting_tcp* w) 815 { 816 struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL; 817 w->on_tcp_waiting_list = 0; 818 while(p) { 819 if(p == w) { 820 /* remove w */ 821 if(prev) 822 prev->next_waiting = w->next_waiting; 823 else outnet->tcp_wait_first = w->next_waiting; 824 if(outnet->tcp_wait_last == w) 825 outnet->tcp_wait_last = prev; 826 w->next_waiting = NULL; 827 return; 828 } 829 prev = p; 830 p = p->next_waiting; 831 } 832 /* outnet_waiting_tcp_list_remove is currently called only with items 833 * that are already in the waiting list. */ 834 log_assert(0); 835 } 836 837 /** pop the first waiting tcp from the outnet waiting list */ 838 struct waiting_tcp* 839 outnet_waiting_tcp_list_pop(struct outside_network* outnet) 840 { 841 struct waiting_tcp* w = outnet->tcp_wait_first; 842 if(!outnet->tcp_wait_first) return NULL; 843 log_assert(w->on_tcp_waiting_list); 844 outnet->tcp_wait_first = w->next_waiting; 845 if(outnet->tcp_wait_last == w) 846 outnet->tcp_wait_last = NULL; 847 w->on_tcp_waiting_list = 0; 848 w->next_waiting = NULL; 849 return w; 850 } 851 852 /** add waiting_tcp element to the outnet tcp waiting list */ 853 void 854 outnet_waiting_tcp_list_add(struct outside_network* outnet, 855 struct waiting_tcp* w, int set_timer) 856 { 857 struct timeval tv; 858 log_assert(!w->on_tcp_waiting_list); 859 if(w->on_tcp_waiting_list) 860 return; 861 w->next_waiting = NULL; 862 if(outnet->tcp_wait_last) 863 outnet->tcp_wait_last->next_waiting = w; 864 else outnet->tcp_wait_first = w; 865 outnet->tcp_wait_last = w; 866 w->on_tcp_waiting_list = 1; 867 if(set_timer) { 868 #ifndef S_SPLINT_S 869 tv.tv_sec = w->timeout/1000; 870 tv.tv_usec = (w->timeout%1000)*1000; 871 #endif 872 comm_timer_set(w->timer, &tv); 873 } 874 } 875 876 /** add waiting_tcp element as first to the outnet tcp waiting list */ 877 void 878 outnet_waiting_tcp_list_add_first(struct outside_network* outnet, 879 struct waiting_tcp* w, int reset_timer) 880 { 881 struct timeval tv; 882 log_assert(!w->on_tcp_waiting_list); 883 if(w->on_tcp_waiting_list) 884 return; 885 w->next_waiting = outnet->tcp_wait_first; 886 log_assert(w->next_waiting != w); 887 if(!outnet->tcp_wait_last) 888 outnet->tcp_wait_last = w; 889 outnet->tcp_wait_first = w; 890 w->on_tcp_waiting_list = 1; 891 if(reset_timer) { 892 #ifndef S_SPLINT_S 893 tv.tv_sec = w->timeout/1000; 894 tv.tv_usec = (w->timeout%1000)*1000; 895 #endif 896 comm_timer_set(w->timer, &tv); 897 } 898 log_assert( 899 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 900 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 901 } 902 903 /** call callback on waiting_tcp, if not NULL */ 904 static void 905 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error, 906 struct comm_reply* reply_info) 907 { 908 if(w && w->cb) { 909 fptr_ok(fptr_whitelist_pending_tcp(w->cb)); 910 (void)(*w->cb)(c, w->cb_arg, error, reply_info); 911 } 912 } 913 914 /** see if buffers can be used to service TCP queries */ 915 static void 916 use_free_buffer(struct outside_network* outnet) 917 { 918 struct waiting_tcp* w; 919 while(outnet->tcp_wait_first && !outnet->want_to_quit) { 920 #ifdef USE_DNSTAP 921 struct pending_tcp* pend_tcp = NULL; 922 #endif 923 struct reuse_tcp* reuse = NULL; 924 w = outnet_waiting_tcp_list_pop(outnet); 925 log_assert( 926 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 927 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 928 reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen, 929 w->ssl_upstream); 930 /* re-select an ID when moving to a new TCP buffer */ 931 w->id = tcp_select_id(outnet, reuse); 932 LDNS_ID_SET(w->pkt, w->id); 933 if(reuse) { 934 log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: " 935 "found reuse", reuse); 936 #ifdef USE_DNSTAP 937 pend_tcp = reuse->pending; 938 #endif 939 reuse_tcp_lru_touch(outnet, reuse); 940 comm_timer_disable(w->timer); 941 w->next_waiting = (void*)reuse->pending; 942 reuse_tree_by_id_insert(reuse, w); 943 if(reuse->pending->query) { 944 /* on the write wait list */ 945 reuse_write_wait_push_back(reuse, w); 946 } else { 947 /* write straight away */ 948 /* stop the timer on read of the fd */ 949 comm_point_stop_listening(reuse->pending->c); 950 reuse->pending->query = w; 951 outnet_tcp_take_query_setup( 952 reuse->pending->c->fd, reuse->pending, 953 w); 954 } 955 } else if(outnet->tcp_free) { 956 struct pending_tcp* pend = w->outnet->tcp_free; 957 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 958 pend->reuse.pending = pend; 959 memcpy(&pend->reuse.addr, &w->addr, w->addrlen); 960 pend->reuse.addrlen = w->addrlen; 961 if(!outnet_tcp_take_into_use(w)) { 962 waiting_tcp_callback(w, NULL, NETEVENT_CLOSED, 963 NULL); 964 waiting_tcp_delete(w); 965 #ifdef USE_DNSTAP 966 w = NULL; 967 #endif 968 } 969 #ifdef USE_DNSTAP 970 pend_tcp = pend; 971 #endif 972 } else { 973 /* no reuse and no free buffer, put back at the start */ 974 outnet_waiting_tcp_list_add_first(outnet, w, 0); 975 break; 976 } 977 #ifdef USE_DNSTAP 978 if(outnet->dtenv && pend_tcp && w && w->sq && 979 (outnet->dtenv->log_resolver_query_messages || 980 outnet->dtenv->log_forwarder_query_messages)) { 981 sldns_buffer tmp; 982 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len); 983 dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr, 984 &pend_tcp->pi->addr, comm_tcp, w->sq->zone, 985 w->sq->zonelen, &tmp); 986 } 987 #endif 988 } 989 } 990 991 /** delete element from tree by id */ 992 static void 993 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w) 994 { 995 #ifdef UNBOUND_DEBUG 996 rbnode_type* rem; 997 #endif 998 log_assert(w->id_node.key != NULL); 999 #ifdef UNBOUND_DEBUG 1000 rem = 1001 #else 1002 (void) 1003 #endif 1004 rbtree_delete(&reuse->tree_by_id, w); 1005 log_assert(rem); /* should have been there */ 1006 w->id_node.key = NULL; 1007 } 1008 1009 /** move writewait list to go for another connection. */ 1010 static void 1011 reuse_move_writewait_away(struct outside_network* outnet, 1012 struct pending_tcp* pend) 1013 { 1014 /* the writewait list has not been written yet, so if the 1015 * stream was closed, they have not actually been failed, only 1016 * the queries written. Other queries can get written to another 1017 * stream. For upstreams that do not support multiple queries 1018 * and answers, the stream can get closed, and then the queries 1019 * can get written on a new socket */ 1020 struct waiting_tcp* w; 1021 if(pend->query && pend->query->error_count == 0 && 1022 pend->c->tcp_write_pkt == pend->query->pkt && 1023 pend->c->tcp_write_pkt_len == pend->query->pkt_len) { 1024 /* since the current query is not written, it can also 1025 * move to a free buffer */ 1026 if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 && 1027 LDNS_QDCOUNT(pend->query->pkt) > 0 && 1028 dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) { 1029 char buf[LDNS_MAX_DOMAINLEN+1]; 1030 dname_str(pend->query->pkt+12, buf); 1031 verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written", 1032 buf, (int)pend->c->tcp_write_byte_count); 1033 } 1034 pend->c->tcp_write_pkt = NULL; 1035 pend->c->tcp_write_pkt_len = 0; 1036 pend->c->tcp_write_and_read = 0; 1037 pend->reuse.cp_more_read_again = 0; 1038 pend->reuse.cp_more_write_again = 0; 1039 pend->c->tcp_is_reading = 1; 1040 w = pend->query; 1041 pend->query = NULL; 1042 /* increase error count, so that if the next socket fails too 1043 * the server selection is run again with this query failed 1044 * and it can select a different server (if possible), or 1045 * fail the query */ 1046 w->error_count ++; 1047 reuse_tree_by_id_delete(&pend->reuse, w); 1048 outnet_waiting_tcp_list_add(outnet, w, 1); 1049 } 1050 while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) { 1051 if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 && 1052 LDNS_QDCOUNT(w->pkt) > 0 && 1053 dname_valid(w->pkt+12, w->pkt_len-12)) { 1054 char buf[LDNS_MAX_DOMAINLEN+1]; 1055 dname_str(w->pkt+12, buf); 1056 verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf); 1057 } 1058 reuse_tree_by_id_delete(&pend->reuse, w); 1059 outnet_waiting_tcp_list_add(outnet, w, 1); 1060 } 1061 } 1062 1063 /** remove reused element from tree and lru list */ 1064 void 1065 reuse_tcp_remove_tree_list(struct outside_network* outnet, 1066 struct reuse_tcp* reuse) 1067 { 1068 verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list"); 1069 if(reuse->node.key) { 1070 /* delete it from reuse tree */ 1071 if(!rbtree_delete(&outnet->tcp_reuse, reuse)) { 1072 /* should not be possible, it should be there */ 1073 char buf[256]; 1074 addr_to_str(&reuse->addr, reuse->addrlen, buf, 1075 sizeof(buf)); 1076 log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list); 1077 } 1078 reuse->node.key = NULL; 1079 /* defend against loops on broken tree by zeroing the 1080 * rbnode structure */ 1081 memset(&reuse->node, 0, sizeof(reuse->node)); 1082 } 1083 /* delete from reuse list */ 1084 if(reuse->item_on_lru_list) { 1085 if(reuse->lru_prev) { 1086 /* assert that members of the lru list are waiting 1087 * and thus have a pending pointer to the struct */ 1088 log_assert(reuse->lru_prev->pending); 1089 reuse->lru_prev->lru_next = reuse->lru_next; 1090 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev); 1091 } else { 1092 log_assert(!reuse->lru_next || reuse->lru_next->pending); 1093 outnet->tcp_reuse_first = reuse->lru_next; 1094 log_assert(!outnet->tcp_reuse_first || 1095 (outnet->tcp_reuse_first != 1096 outnet->tcp_reuse_first->lru_next && 1097 outnet->tcp_reuse_first != 1098 outnet->tcp_reuse_first->lru_prev)); 1099 } 1100 if(reuse->lru_next) { 1101 /* assert that members of the lru list are waiting 1102 * and thus have a pending pointer to the struct */ 1103 log_assert(reuse->lru_next->pending); 1104 reuse->lru_next->lru_prev = reuse->lru_prev; 1105 log_assert(reuse->lru_next->lru_prev != reuse->lru_next); 1106 } else { 1107 log_assert(!reuse->lru_prev || reuse->lru_prev->pending); 1108 outnet->tcp_reuse_last = reuse->lru_prev; 1109 log_assert(!outnet->tcp_reuse_last || 1110 (outnet->tcp_reuse_last != 1111 outnet->tcp_reuse_last->lru_next && 1112 outnet->tcp_reuse_last != 1113 outnet->tcp_reuse_last->lru_prev)); 1114 } 1115 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) || 1116 (outnet->tcp_reuse_first && outnet->tcp_reuse_last)); 1117 reuse->item_on_lru_list = 0; 1118 reuse->lru_next = NULL; 1119 reuse->lru_prev = NULL; 1120 } 1121 reuse->pending = NULL; 1122 } 1123 1124 /** helper function that deletes an element from the tree of readwait 1125 * elements in tcp reuse structure */ 1126 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg)) 1127 { 1128 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 1129 waiting_tcp_delete(w); 1130 } 1131 1132 /** delete readwait waiting_tcp elements, deletes the elements in the list */ 1133 void reuse_del_readwait(rbtree_type* tree_by_id) 1134 { 1135 if(tree_by_id->root == NULL || 1136 tree_by_id->root == RBTREE_NULL) 1137 return; 1138 traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL); 1139 rbtree_init(tree_by_id, reuse_id_cmp); 1140 } 1141 1142 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */ 1143 static void 1144 decommission_pending_tcp(struct outside_network* outnet, 1145 struct pending_tcp* pend) 1146 { 1147 verbose(VERB_CLIENT, "decommission_pending_tcp"); 1148 /* A certain code path can lead here twice for the same pending_tcp 1149 * creating a loop in the free pending_tcp list. */ 1150 if(outnet->tcp_free != pend) { 1151 pend->next_free = outnet->tcp_free; 1152 outnet->tcp_free = pend; 1153 } 1154 if(pend->reuse.node.key) { 1155 /* needs unlink from the reuse tree to get deleted */ 1156 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 1157 } 1158 /* free SSL structure after remove from outnet tcp reuse tree, 1159 * because the c->ssl null or not is used for sorting in the tree */ 1160 if(pend->c->ssl) { 1161 #ifdef HAVE_SSL 1162 SSL_shutdown(pend->c->ssl); 1163 SSL_free(pend->c->ssl); 1164 pend->c->ssl = NULL; 1165 #endif 1166 } 1167 comm_point_close(pend->c); 1168 pend->reuse.cp_more_read_again = 0; 1169 pend->reuse.cp_more_write_again = 0; 1170 /* unlink the query and writewait list, it is part of the tree 1171 * nodes and is deleted */ 1172 pend->query = NULL; 1173 pend->reuse.write_wait_first = NULL; 1174 pend->reuse.write_wait_last = NULL; 1175 reuse_del_readwait(&pend->reuse.tree_by_id); 1176 } 1177 1178 /** perform failure callbacks for waiting queries in reuse read rbtree */ 1179 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err) 1180 { 1181 rbnode_type* node; 1182 if(tree_by_id->root == NULL || 1183 tree_by_id->root == RBTREE_NULL) 1184 return; 1185 node = rbtree_first(tree_by_id); 1186 while(node && node != RBTREE_NULL) { 1187 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 1188 waiting_tcp_callback(w, NULL, err, NULL); 1189 node = rbtree_next(node); 1190 } 1191 } 1192 1193 /** mark the entry for being in the cb_and_decommission stage */ 1194 static void mark_for_cb_and_decommission(rbnode_type* node, 1195 void* ATTR_UNUSED(arg)) 1196 { 1197 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 1198 /* Mark the waiting_tcp to signal later code (serviced_delete) that 1199 * this item is part of the backed up tree_by_id and will be deleted 1200 * later. */ 1201 w->in_cb_and_decommission = 1; 1202 /* Mark the serviced_query for deletion so that later code through 1203 * callbacks (iter_clear .. outnet_serviced_query_stop) won't 1204 * prematurely delete it. */ 1205 if(w->cb) 1206 ((struct serviced_query*)w->cb_arg)->to_be_deleted = 1; 1207 } 1208 1209 /** perform callbacks for failure and also decommission pending tcp. 1210 * the callbacks remove references in sq->pending to the waiting_tcp 1211 * members of the tree_by_id in the pending tcp. The pending_tcp is 1212 * removed before the callbacks, so that the callbacks do not modify 1213 * the pending_tcp due to its reference in the outside_network reuse tree */ 1214 static void reuse_cb_and_decommission(struct outside_network* outnet, 1215 struct pending_tcp* pend, int error) 1216 { 1217 rbtree_type store; 1218 store = pend->reuse.tree_by_id; 1219 pend->query = NULL; 1220 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 1221 pend->reuse.write_wait_first = NULL; 1222 pend->reuse.write_wait_last = NULL; 1223 decommission_pending_tcp(outnet, pend); 1224 if(store.root != NULL && store.root != RBTREE_NULL) { 1225 traverse_postorder(&store, &mark_for_cb_and_decommission, NULL); 1226 } 1227 reuse_cb_readwait_for_failure(&store, error); 1228 reuse_del_readwait(&store); 1229 } 1230 1231 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 1232 static void 1233 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout) 1234 { 1235 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse); 1236 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout); 1237 } 1238 1239 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 1240 static void 1241 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout) 1242 { 1243 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse); 1244 sldns_buffer_clear(pend_tcp->c->buffer); 1245 pend_tcp->c->tcp_is_reading = 1; 1246 pend_tcp->c->tcp_byte_count = 0; 1247 comm_point_stop_listening(pend_tcp->c); 1248 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout); 1249 } 1250 1251 int 1252 outnet_tcp_cb(struct comm_point* c, void* arg, int error, 1253 struct comm_reply *reply_info) 1254 { 1255 struct pending_tcp* pend = (struct pending_tcp*)arg; 1256 struct outside_network* outnet = pend->reuse.outnet; 1257 struct waiting_tcp* w = NULL; 1258 log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key); 1259 verbose(VERB_ALGO, "outnettcp cb"); 1260 if(error == NETEVENT_TIMEOUT) { 1261 if(pend->c->tcp_write_and_read) { 1262 verbose(VERB_QUERY, "outnettcp got tcp timeout " 1263 "for read, ignored because write underway"); 1264 /* if we are writing, ignore readtimer, wait for write timer 1265 * or write is done */ 1266 return 0; 1267 } else { 1268 verbose(VERB_QUERY, "outnettcp got tcp timeout %s", 1269 (pend->reuse.tree_by_id.count?"for reading pkt": 1270 "for keepalive for reuse")); 1271 } 1272 /* must be timeout for reading or keepalive reuse, 1273 * close it. */ 1274 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 1275 } else if(error == NETEVENT_PKT_WRITTEN) { 1276 /* the packet we want to write has been written. */ 1277 verbose(VERB_ALGO, "outnet tcp pkt was written event"); 1278 log_assert(c == pend->c); 1279 log_assert(pend->query->pkt == pend->c->tcp_write_pkt); 1280 log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len); 1281 pend->c->tcp_write_pkt = NULL; 1282 pend->c->tcp_write_pkt_len = 0; 1283 /* the pend.query is already in tree_by_id */ 1284 log_assert(pend->query->id_node.key); 1285 pend->query = NULL; 1286 /* setup to write next packet or setup read timeout */ 1287 if(pend->reuse.write_wait_first) { 1288 verbose(VERB_ALGO, "outnet tcp setup next pkt"); 1289 /* we can write it straight away perhaps, set flag 1290 * because this callback called after a tcp write 1291 * succeeded and likely more buffer space is available 1292 * and we can write some more. */ 1293 pend->reuse.cp_more_write_again = 1; 1294 pend->query = reuse_write_wait_pop(&pend->reuse); 1295 comm_point_stop_listening(pend->c); 1296 outnet_tcp_take_query_setup(pend->c->fd, pend, 1297 pend->query); 1298 } else { 1299 verbose(VERB_ALGO, "outnet tcp writes done, wait"); 1300 pend->c->tcp_write_and_read = 0; 1301 pend->reuse.cp_more_read_again = 0; 1302 pend->reuse.cp_more_write_again = 0; 1303 pend->c->tcp_is_reading = 1; 1304 comm_point_stop_listening(pend->c); 1305 reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout); 1306 } 1307 return 0; 1308 } else if(error != NETEVENT_NOERROR) { 1309 verbose(VERB_QUERY, "outnettcp got tcp error %d", error); 1310 reuse_move_writewait_away(outnet, pend); 1311 /* pass error below and exit */ 1312 } else { 1313 /* check ID */ 1314 if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) { 1315 log_addr(VERB_QUERY, 1316 "outnettcp: bad ID in reply, too short, from:", 1317 &pend->reuse.addr, pend->reuse.addrlen); 1318 error = NETEVENT_CLOSED; 1319 } else { 1320 uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin( 1321 c->buffer)); 1322 /* find the query the reply is for */ 1323 w = reuse_tcp_by_id_find(&pend->reuse, id); 1324 /* Make sure that the reply we got is at least for a 1325 * sent query with the same ID; the waiting_tcp that 1326 * gets a reply is assumed to not be waiting to be 1327 * sent. */ 1328 if(w && (w->on_tcp_waiting_list || w->write_wait_queued)) 1329 w = NULL; 1330 } 1331 } 1332 if(error == NETEVENT_NOERROR && !w) { 1333 /* no struct waiting found in tree, no reply to call */ 1334 log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:", 1335 &pend->reuse.addr, pend->reuse.addrlen); 1336 error = NETEVENT_CLOSED; 1337 } 1338 if(error == NETEVENT_NOERROR) { 1339 /* add to reuse tree so it can be reused, if not a failure. 1340 * This is possible if the state machine wants to make a tcp 1341 * query again to the same destination. */ 1342 if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) { 1343 (void)reuse_tcp_insert(outnet, pend); 1344 } 1345 } 1346 if(w) { 1347 log_assert(!w->on_tcp_waiting_list); 1348 log_assert(!w->write_wait_queued); 1349 reuse_tree_by_id_delete(&pend->reuse, w); 1350 verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d", 1351 error, (int)sldns_buffer_limit(c->buffer)); 1352 waiting_tcp_callback(w, c, error, reply_info); 1353 waiting_tcp_delete(w); 1354 } 1355 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb"); 1356 if(error == NETEVENT_NOERROR && pend->reuse.node.key) { 1357 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it"); 1358 /* it is in the reuse_tcp tree, with other queries, or 1359 * on the empty list. do not decommission it */ 1360 /* if there are more outstanding queries, we could try to 1361 * read again, to see if it is on the input, 1362 * because this callback called after a successful read 1363 * and there could be more bytes to read on the input */ 1364 if(pend->reuse.tree_by_id.count != 0) 1365 pend->reuse.cp_more_read_again = 1; 1366 reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout); 1367 return 0; 1368 } 1369 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it"); 1370 /* no queries on it, no space to keep it. or timeout or closed due 1371 * to error. Close it */ 1372 reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT? 1373 NETEVENT_TIMEOUT:NETEVENT_CLOSED)); 1374 use_free_buffer(outnet); 1375 return 0; 1376 } 1377 1378 /** lower use count on pc, see if it can be closed */ 1379 static void 1380 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc) 1381 { 1382 struct port_if* pif; 1383 pc->num_outstanding--; 1384 if(pc->num_outstanding > 0) { 1385 return; 1386 } 1387 /* close it and replace in unused list */ 1388 verbose(VERB_ALGO, "close of port %d", pc->number); 1389 comm_point_close(pc->cp); 1390 pif = pc->pif; 1391 log_assert(pif->inuse > 0); 1392 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1393 pif->avail_ports[pif->avail_total - pif->inuse] = pc->number; 1394 #endif 1395 pif->inuse--; 1396 pif->out[pc->index] = pif->out[pif->inuse]; 1397 pif->out[pc->index]->index = pc->index; 1398 pc->next = outnet->unused_fds; 1399 outnet->unused_fds = pc; 1400 } 1401 1402 /** try to send waiting UDP queries */ 1403 static void 1404 outnet_send_wait_udp(struct outside_network* outnet) 1405 { 1406 struct pending* pend; 1407 /* process waiting queries */ 1408 while(outnet->udp_wait_first && outnet->unused_fds 1409 && !outnet->want_to_quit) { 1410 pend = outnet->udp_wait_first; 1411 outnet->udp_wait_first = pend->next_waiting; 1412 if(!pend->next_waiting) outnet->udp_wait_last = NULL; 1413 sldns_buffer_clear(outnet->udp_buff); 1414 sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len); 1415 sldns_buffer_flip(outnet->udp_buff); 1416 free(pend->pkt); /* freeing now makes get_mem correct */ 1417 pend->pkt = NULL; 1418 pend->pkt_len = 0; 1419 log_assert(!pend->sq->busy); 1420 pend->sq->busy = 1; 1421 if(!randomize_and_send_udp(pend, outnet->udp_buff, 1422 pend->timeout)) { 1423 /* callback error on pending */ 1424 if(pend->cb) { 1425 fptr_ok(fptr_whitelist_pending_udp(pend->cb)); 1426 (void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg, 1427 NETEVENT_CLOSED, NULL); 1428 } 1429 pending_delete(outnet, pend); 1430 } else { 1431 pend->sq->busy = 0; 1432 } 1433 } 1434 } 1435 1436 int 1437 outnet_udp_cb(struct comm_point* c, void* arg, int error, 1438 struct comm_reply *reply_info) 1439 { 1440 struct outside_network* outnet = (struct outside_network*)arg; 1441 struct pending key; 1442 struct pending* p; 1443 verbose(VERB_ALGO, "answer cb"); 1444 1445 if(error != NETEVENT_NOERROR) { 1446 verbose(VERB_QUERY, "outnetudp got udp error %d", error); 1447 return 0; 1448 } 1449 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) { 1450 verbose(VERB_QUERY, "outnetudp udp too short"); 1451 return 0; 1452 } 1453 log_assert(reply_info); 1454 1455 /* setup lookup key */ 1456 key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer)); 1457 memcpy(&key.addr, &reply_info->remote_addr, reply_info->remote_addrlen); 1458 key.addrlen = reply_info->remote_addrlen; 1459 verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id); 1460 log_addr(VERB_ALGO, "Incoming reply addr =", 1461 &reply_info->remote_addr, reply_info->remote_addrlen); 1462 1463 /* find it, see if this thing is a valid query response */ 1464 verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count); 1465 p = (struct pending*)rbtree_search(outnet->pending, &key); 1466 if(!p) { 1467 verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped."); 1468 log_buf(VERB_ALGO, "dropped message", c->buffer); 1469 outnet->unwanted_replies++; 1470 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1471 >= outnet->unwanted_threshold) { 1472 log_warn("unwanted reply total reached threshold (%u)" 1473 " you may be under attack." 1474 " defensive action: clearing the cache", 1475 (unsigned)outnet->unwanted_threshold); 1476 fptr_ok(fptr_whitelist_alloc_cleanup( 1477 outnet->unwanted_action)); 1478 (*outnet->unwanted_action)(outnet->unwanted_param); 1479 outnet->unwanted_total = 0; 1480 } 1481 return 0; 1482 } 1483 1484 verbose(VERB_ALGO, "received udp reply."); 1485 log_buf(VERB_ALGO, "udp message", c->buffer); 1486 if(p->pc->cp != c) { 1487 verbose(VERB_QUERY, "received reply id,addr on wrong port. " 1488 "dropped."); 1489 outnet->unwanted_replies++; 1490 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1491 >= outnet->unwanted_threshold) { 1492 log_warn("unwanted reply total reached threshold (%u)" 1493 " you may be under attack." 1494 " defensive action: clearing the cache", 1495 (unsigned)outnet->unwanted_threshold); 1496 fptr_ok(fptr_whitelist_alloc_cleanup( 1497 outnet->unwanted_action)); 1498 (*outnet->unwanted_action)(outnet->unwanted_param); 1499 outnet->unwanted_total = 0; 1500 } 1501 return 0; 1502 } 1503 comm_timer_disable(p->timer); 1504 verbose(VERB_ALGO, "outnet handle udp reply"); 1505 /* delete from tree first in case callback creates a retry */ 1506 (void)rbtree_delete(outnet->pending, p->node.key); 1507 if(p->cb) { 1508 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1509 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info); 1510 } 1511 portcomm_loweruse(outnet, p->pc); 1512 pending_delete(NULL, p); 1513 outnet_send_wait_udp(outnet); 1514 return 0; 1515 } 1516 1517 /** calculate number of ip4 and ip6 interfaces*/ 1518 static void 1519 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6, 1520 int* num_ip4, int* num_ip6) 1521 { 1522 int i; 1523 *num_ip4 = 0; 1524 *num_ip6 = 0; 1525 if(num_ifs <= 0) { 1526 if(do_ip4) 1527 *num_ip4 = 1; 1528 if(do_ip6) 1529 *num_ip6 = 1; 1530 return; 1531 } 1532 for(i=0; i<num_ifs; i++) 1533 { 1534 if(str_is_ip6(ifs[i])) { 1535 if(do_ip6) 1536 (*num_ip6)++; 1537 } else { 1538 if(do_ip4) 1539 (*num_ip4)++; 1540 } 1541 } 1542 } 1543 1544 void 1545 pending_udp_timer_delay_cb(void* arg) 1546 { 1547 struct pending* p = (struct pending*)arg; 1548 struct outside_network* outnet = p->outnet; 1549 verbose(VERB_ALGO, "timeout udp with delay"); 1550 portcomm_loweruse(outnet, p->pc); 1551 pending_delete(outnet, p); 1552 outnet_send_wait_udp(outnet); 1553 } 1554 1555 void 1556 pending_udp_timer_cb(void *arg) 1557 { 1558 struct pending* p = (struct pending*)arg; 1559 struct outside_network* outnet = p->outnet; 1560 /* it timed out */ 1561 verbose(VERB_ALGO, "timeout udp"); 1562 if(p->cb) { 1563 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1564 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL); 1565 } 1566 /* if delayclose, keep port open for a longer time. 1567 * But if the udpwaitlist exists, then we are struggling to 1568 * keep up with demand for sockets, so do not wait, but service 1569 * the customer (customer service more important than portICMPs) */ 1570 if(outnet->delayclose && !outnet->udp_wait_first) { 1571 p->cb = NULL; 1572 p->timer->callback = &pending_udp_timer_delay_cb; 1573 comm_timer_set(p->timer, &outnet->delay_tv); 1574 return; 1575 } 1576 portcomm_loweruse(outnet, p->pc); 1577 pending_delete(outnet, p); 1578 outnet_send_wait_udp(outnet); 1579 } 1580 1581 /** create pending_tcp buffers */ 1582 static int 1583 create_pending_tcp(struct outside_network* outnet, size_t bufsize) 1584 { 1585 size_t i; 1586 if(outnet->num_tcp == 0) 1587 return 1; /* no tcp needed, nothing to do */ 1588 if(!(outnet->tcp_conns = (struct pending_tcp **)calloc( 1589 outnet->num_tcp, sizeof(struct pending_tcp*)))) 1590 return 0; 1591 for(i=0; i<outnet->num_tcp; i++) { 1592 if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1, 1593 sizeof(struct pending_tcp)))) 1594 return 0; 1595 outnet->tcp_conns[i]->next_free = outnet->tcp_free; 1596 outnet->tcp_free = outnet->tcp_conns[i]; 1597 outnet->tcp_conns[i]->c = comm_point_create_tcp_out( 1598 outnet->base, bufsize, outnet_tcp_cb, 1599 outnet->tcp_conns[i]); 1600 if(!outnet->tcp_conns[i]->c) 1601 return 0; 1602 } 1603 return 1; 1604 } 1605 1606 /** setup an outgoing interface, ready address */ 1607 static int setup_if(struct port_if* pif, const char* addrstr, 1608 int* avail, int numavail, size_t numfd) 1609 { 1610 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1611 pif->avail_total = numavail; 1612 pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int)); 1613 if(!pif->avail_ports) 1614 return 0; 1615 #endif 1616 if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) && 1617 !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT, 1618 &pif->addr, &pif->addrlen, &pif->pfxlen)) 1619 return 0; 1620 pif->maxout = (int)numfd; 1621 pif->inuse = 0; 1622 pif->out = (struct port_comm**)calloc(numfd, 1623 sizeof(struct port_comm*)); 1624 if(!pif->out) 1625 return 0; 1626 return 1; 1627 } 1628 1629 struct outside_network* 1630 outside_network_create(struct comm_base *base, size_t bufsize, 1631 size_t num_ports, char** ifs, int num_ifs, int do_ip4, 1632 int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra, 1633 struct ub_randstate* rnd, int use_caps_for_id, int* availports, 1634 int numavailports, size_t unwanted_threshold, int tcp_mss, 1635 void (*unwanted_action)(void*), void* unwanted_param, int do_udp, 1636 void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv, 1637 int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout, 1638 int tcp_auth_query_timeout) 1639 { 1640 struct outside_network* outnet = (struct outside_network*) 1641 calloc(1, sizeof(struct outside_network)); 1642 size_t k; 1643 if(!outnet) { 1644 log_err("malloc failed"); 1645 return NULL; 1646 } 1647 comm_base_timept(base, &outnet->now_secs, &outnet->now_tv); 1648 outnet->base = base; 1649 outnet->num_tcp = num_tcp; 1650 outnet->max_reuse_tcp_queries = max_reuse_tcp_queries; 1651 outnet->tcp_reuse_timeout= tcp_reuse_timeout; 1652 outnet->tcp_auth_query_timeout = tcp_auth_query_timeout; 1653 outnet->num_tcp_outgoing = 0; 1654 outnet->num_udp_outgoing = 0; 1655 outnet->infra = infra; 1656 outnet->rnd = rnd; 1657 outnet->sslctx = sslctx; 1658 outnet->tls_use_sni = tls_use_sni; 1659 #ifdef USE_DNSTAP 1660 outnet->dtenv = dtenv; 1661 #else 1662 (void)dtenv; 1663 #endif 1664 outnet->svcd_overhead = 0; 1665 outnet->want_to_quit = 0; 1666 outnet->unwanted_threshold = unwanted_threshold; 1667 outnet->unwanted_action = unwanted_action; 1668 outnet->unwanted_param = unwanted_param; 1669 outnet->use_caps_for_id = use_caps_for_id; 1670 outnet->do_udp = do_udp; 1671 outnet->tcp_mss = tcp_mss; 1672 outnet->ip_dscp = dscp; 1673 #ifndef S_SPLINT_S 1674 if(delayclose) { 1675 outnet->delayclose = 1; 1676 outnet->delay_tv.tv_sec = delayclose/1000; 1677 outnet->delay_tv.tv_usec = (delayclose%1000)*1000; 1678 } 1679 #endif 1680 if(udp_connect) { 1681 outnet->udp_connect = 1; 1682 } 1683 if(numavailports == 0 || num_ports == 0) { 1684 log_err("no outgoing ports available"); 1685 outside_network_delete(outnet); 1686 return NULL; 1687 } 1688 #ifndef INET6 1689 do_ip6 = 0; 1690 #endif 1691 calc_num46(ifs, num_ifs, do_ip4, do_ip6, 1692 &outnet->num_ip4, &outnet->num_ip6); 1693 if(outnet->num_ip4 != 0) { 1694 if(!(outnet->ip4_ifs = (struct port_if*)calloc( 1695 (size_t)outnet->num_ip4, sizeof(struct port_if)))) { 1696 log_err("malloc failed"); 1697 outside_network_delete(outnet); 1698 return NULL; 1699 } 1700 } 1701 if(outnet->num_ip6 != 0) { 1702 if(!(outnet->ip6_ifs = (struct port_if*)calloc( 1703 (size_t)outnet->num_ip6, sizeof(struct port_if)))) { 1704 log_err("malloc failed"); 1705 outside_network_delete(outnet); 1706 return NULL; 1707 } 1708 } 1709 if( !(outnet->udp_buff = sldns_buffer_new(bufsize)) || 1710 !(outnet->pending = rbtree_create(pending_cmp)) || 1711 !(outnet->serviced = rbtree_create(serviced_cmp)) || 1712 !create_pending_tcp(outnet, bufsize)) { 1713 log_err("malloc failed"); 1714 outside_network_delete(outnet); 1715 return NULL; 1716 } 1717 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1718 outnet->tcp_reuse_max = num_tcp; 1719 1720 /* allocate commpoints */ 1721 for(k=0; k<num_ports; k++) { 1722 struct port_comm* pc; 1723 pc = (struct port_comm*)calloc(1, sizeof(*pc)); 1724 if(!pc) { 1725 log_err("malloc failed"); 1726 outside_network_delete(outnet); 1727 return NULL; 1728 } 1729 pc->cp = comm_point_create_udp(outnet->base, -1, 1730 outnet->udp_buff, 0, outnet_udp_cb, outnet, NULL); 1731 if(!pc->cp) { 1732 log_err("malloc failed"); 1733 free(pc); 1734 outside_network_delete(outnet); 1735 return NULL; 1736 } 1737 pc->next = outnet->unused_fds; 1738 outnet->unused_fds = pc; 1739 } 1740 1741 /* allocate interfaces */ 1742 if(num_ifs == 0) { 1743 if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0", 1744 availports, numavailports, num_ports)) { 1745 log_err("malloc failed"); 1746 outside_network_delete(outnet); 1747 return NULL; 1748 } 1749 if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::", 1750 availports, numavailports, num_ports)) { 1751 log_err("malloc failed"); 1752 outside_network_delete(outnet); 1753 return NULL; 1754 } 1755 } else { 1756 size_t done_4 = 0, done_6 = 0; 1757 int i; 1758 for(i=0; i<num_ifs; i++) { 1759 if(str_is_ip6(ifs[i]) && do_ip6) { 1760 if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i], 1761 availports, numavailports, num_ports)){ 1762 log_err("malloc failed"); 1763 outside_network_delete(outnet); 1764 return NULL; 1765 } 1766 done_6++; 1767 } 1768 if(!str_is_ip6(ifs[i]) && do_ip4) { 1769 if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i], 1770 availports, numavailports, num_ports)){ 1771 log_err("malloc failed"); 1772 outside_network_delete(outnet); 1773 return NULL; 1774 } 1775 done_4++; 1776 } 1777 } 1778 } 1779 return outnet; 1780 } 1781 1782 /** helper pending delete */ 1783 static void 1784 pending_node_del(rbnode_type* node, void* arg) 1785 { 1786 struct pending* pend = (struct pending*)node; 1787 struct outside_network* outnet = (struct outside_network*)arg; 1788 pending_delete(outnet, pend); 1789 } 1790 1791 /** helper serviced delete */ 1792 static void 1793 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg)) 1794 { 1795 struct serviced_query* sq = (struct serviced_query*)node; 1796 alloc_reg_release(sq->alloc, sq->region); 1797 if(sq->timer) 1798 comm_timer_delete(sq->timer); 1799 free(sq); 1800 } 1801 1802 void 1803 outside_network_quit_prepare(struct outside_network* outnet) 1804 { 1805 if(!outnet) 1806 return; 1807 /* prevent queued items from being sent */ 1808 outnet->want_to_quit = 1; 1809 } 1810 1811 void 1812 outside_network_delete(struct outside_network* outnet) 1813 { 1814 if(!outnet) 1815 return; 1816 outnet->want_to_quit = 1; 1817 /* check every element, since we can be called on malloc error */ 1818 if(outnet->pending) { 1819 /* free pending elements, but do no unlink from tree. */ 1820 traverse_postorder(outnet->pending, pending_node_del, NULL); 1821 free(outnet->pending); 1822 } 1823 if(outnet->serviced) { 1824 traverse_postorder(outnet->serviced, serviced_node_del, NULL); 1825 free(outnet->serviced); 1826 } 1827 if(outnet->udp_buff) 1828 sldns_buffer_free(outnet->udp_buff); 1829 if(outnet->unused_fds) { 1830 struct port_comm* p = outnet->unused_fds, *np; 1831 while(p) { 1832 np = p->next; 1833 comm_point_delete(p->cp); 1834 free(p); 1835 p = np; 1836 } 1837 outnet->unused_fds = NULL; 1838 } 1839 if(outnet->ip4_ifs) { 1840 int i, k; 1841 for(i=0; i<outnet->num_ip4; i++) { 1842 for(k=0; k<outnet->ip4_ifs[i].inuse; k++) { 1843 struct port_comm* pc = outnet->ip4_ifs[i]. 1844 out[k]; 1845 comm_point_delete(pc->cp); 1846 free(pc); 1847 } 1848 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1849 free(outnet->ip4_ifs[i].avail_ports); 1850 #endif 1851 free(outnet->ip4_ifs[i].out); 1852 } 1853 free(outnet->ip4_ifs); 1854 } 1855 if(outnet->ip6_ifs) { 1856 int i, k; 1857 for(i=0; i<outnet->num_ip6; i++) { 1858 for(k=0; k<outnet->ip6_ifs[i].inuse; k++) { 1859 struct port_comm* pc = outnet->ip6_ifs[i]. 1860 out[k]; 1861 comm_point_delete(pc->cp); 1862 free(pc); 1863 } 1864 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1865 free(outnet->ip6_ifs[i].avail_ports); 1866 #endif 1867 free(outnet->ip6_ifs[i].out); 1868 } 1869 free(outnet->ip6_ifs); 1870 } 1871 if(outnet->tcp_conns) { 1872 size_t i; 1873 for(i=0; i<outnet->num_tcp; i++) 1874 if(outnet->tcp_conns[i]) { 1875 struct pending_tcp* pend; 1876 pend = outnet->tcp_conns[i]; 1877 if(pend->reuse.item_on_lru_list) { 1878 /* delete waiting_tcp elements that 1879 * the tcp conn is working on */ 1880 decommission_pending_tcp(outnet, pend); 1881 } 1882 comm_point_delete(outnet->tcp_conns[i]->c); 1883 free(outnet->tcp_conns[i]); 1884 outnet->tcp_conns[i] = NULL; 1885 } 1886 free(outnet->tcp_conns); 1887 outnet->tcp_conns = NULL; 1888 } 1889 if(outnet->tcp_wait_first) { 1890 struct waiting_tcp* p = outnet->tcp_wait_first, *np; 1891 while(p) { 1892 np = p->next_waiting; 1893 waiting_tcp_delete(p); 1894 p = np; 1895 } 1896 } 1897 /* was allocated in struct pending that was deleted above */ 1898 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1899 outnet->tcp_reuse_first = NULL; 1900 outnet->tcp_reuse_last = NULL; 1901 if(outnet->udp_wait_first) { 1902 struct pending* p = outnet->udp_wait_first, *np; 1903 while(p) { 1904 np = p->next_waiting; 1905 pending_delete(NULL, p); 1906 p = np; 1907 } 1908 } 1909 free(outnet); 1910 } 1911 1912 void 1913 pending_delete(struct outside_network* outnet, struct pending* p) 1914 { 1915 if(!p) 1916 return; 1917 if(outnet && outnet->udp_wait_first && 1918 (p->next_waiting || p == outnet->udp_wait_last) ) { 1919 /* delete from waiting list, if it is in the waiting list */ 1920 struct pending* prev = NULL, *x = outnet->udp_wait_first; 1921 while(x && x != p) { 1922 prev = x; 1923 x = x->next_waiting; 1924 } 1925 if(x) { 1926 log_assert(x == p); 1927 if(prev) 1928 prev->next_waiting = p->next_waiting; 1929 else outnet->udp_wait_first = p->next_waiting; 1930 if(outnet->udp_wait_last == p) 1931 outnet->udp_wait_last = prev; 1932 } 1933 } 1934 if(outnet) { 1935 (void)rbtree_delete(outnet->pending, p->node.key); 1936 } 1937 if(p->timer) 1938 comm_timer_delete(p->timer); 1939 free(p->pkt); 1940 free(p); 1941 } 1942 1943 static void 1944 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd) 1945 { 1946 int i, last; 1947 if(!(pfxlen > 0 && pfxlen < 128)) 1948 return; 1949 for(i = 0; i < (128 - pfxlen) / 8; i++) { 1950 sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256); 1951 } 1952 last = pfxlen & 7; 1953 if(last != 0) { 1954 sa->sin6_addr.s6_addr[15-i] |= 1955 ((0xFF >> last) & ub_random_max(rnd, 256)); 1956 } 1957 } 1958 1959 /** 1960 * Try to open a UDP socket for outgoing communication. 1961 * Sets sockets options as needed. 1962 * @param addr: socket address. 1963 * @param addrlen: length of address. 1964 * @param pfxlen: length of network prefix (for address randomisation). 1965 * @param port: port override for addr. 1966 * @param inuse: if -1 is returned, this bool means the port was in use. 1967 * @param rnd: random state (for address randomisation). 1968 * @param dscp: DSCP to use. 1969 * @return fd or -1 1970 */ 1971 static int 1972 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen, 1973 int port, int* inuse, struct ub_randstate* rnd, int dscp) 1974 { 1975 int fd, noproto; 1976 if(addr_is_ip6(addr, addrlen)) { 1977 int freebind = 0; 1978 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 1979 sa.sin6_port = (in_port_t)htons((uint16_t)port); 1980 sa.sin6_flowinfo = 0; 1981 sa.sin6_scope_id = 0; 1982 if(pfxlen != 0) { 1983 freebind = 1; 1984 sai6_putrandom(&sa, pfxlen, rnd); 1985 } 1986 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 1987 (struct sockaddr*)&sa, addrlen, 1, inuse, &noproto, 1988 0, 0, 0, NULL, 0, freebind, 0, dscp); 1989 } else { 1990 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 1991 sa->sin_port = (in_port_t)htons((uint16_t)port); 1992 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 1993 (struct sockaddr*)addr, addrlen, 1, inuse, &noproto, 1994 0, 0, 0, NULL, 0, 0, 0, dscp); 1995 } 1996 return fd; 1997 } 1998 1999 /** Select random ID */ 2000 static int 2001 select_id(struct outside_network* outnet, struct pending* pend, 2002 sldns_buffer* packet) 2003 { 2004 int id_tries = 0; 2005 pend->id = GET_RANDOM_ID(outnet->rnd); 2006 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 2007 2008 /* insert in tree */ 2009 pend->node.key = pend; 2010 while(!rbtree_insert(outnet->pending, &pend->node)) { 2011 /* change ID to avoid collision */ 2012 pend->id = GET_RANDOM_ID(outnet->rnd); 2013 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 2014 id_tries++; 2015 if(id_tries == MAX_ID_RETRY) { 2016 pend->id=99999; /* non existent ID */ 2017 log_err("failed to generate unique ID, drop msg"); 2018 return 0; 2019 } 2020 } 2021 verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id); 2022 return 1; 2023 } 2024 2025 /** return true is UDP connect error needs to be logged */ 2026 static int udp_connect_needs_log(int err) 2027 { 2028 switch(err) { 2029 case ECONNREFUSED: 2030 # ifdef ENETUNREACH 2031 case ENETUNREACH: 2032 # endif 2033 # ifdef EHOSTDOWN 2034 case EHOSTDOWN: 2035 # endif 2036 # ifdef EHOSTUNREACH 2037 case EHOSTUNREACH: 2038 # endif 2039 # ifdef ENETDOWN 2040 case ENETDOWN: 2041 # endif 2042 # ifdef EADDRNOTAVAIL 2043 case EADDRNOTAVAIL: 2044 # endif 2045 case EPERM: 2046 case EACCES: 2047 if(verbosity >= VERB_ALGO) 2048 return 1; 2049 return 0; 2050 default: 2051 break; 2052 } 2053 return 1; 2054 } 2055 2056 2057 /** Select random interface and port */ 2058 static int 2059 select_ifport(struct outside_network* outnet, struct pending* pend, 2060 int num_if, struct port_if* ifs) 2061 { 2062 int my_if, my_port, fd, portno, inuse, tries=0; 2063 struct port_if* pif; 2064 /* randomly select interface and port */ 2065 if(num_if == 0) { 2066 verbose(VERB_QUERY, "Need to send query but have no " 2067 "outgoing interfaces of that family"); 2068 return 0; 2069 } 2070 log_assert(outnet->unused_fds); 2071 tries = 0; 2072 while(1) { 2073 my_if = ub_random_max(outnet->rnd, num_if); 2074 pif = &ifs[my_if]; 2075 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 2076 if(outnet->udp_connect) { 2077 /* if we connect() we cannot reuse fds for a port */ 2078 if(pif->inuse >= pif->avail_total) { 2079 tries++; 2080 if(tries < MAX_PORT_RETRY) 2081 continue; 2082 log_err("failed to find an open port, drop msg"); 2083 return 0; 2084 } 2085 my_port = pif->inuse + ub_random_max(outnet->rnd, 2086 pif->avail_total - pif->inuse); 2087 } else { 2088 my_port = ub_random_max(outnet->rnd, pif->avail_total); 2089 if(my_port < pif->inuse) { 2090 /* port already open */ 2091 pend->pc = pif->out[my_port]; 2092 verbose(VERB_ALGO, "using UDP if=%d port=%d", 2093 my_if, pend->pc->number); 2094 break; 2095 } 2096 } 2097 /* try to open new port, if fails, loop to try again */ 2098 log_assert(pif->inuse < pif->maxout); 2099 portno = pif->avail_ports[my_port - pif->inuse]; 2100 #else 2101 my_port = portno = 0; 2102 #endif 2103 fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen, 2104 portno, &inuse, outnet->rnd, outnet->ip_dscp); 2105 if(fd == -1 && !inuse) { 2106 /* nonrecoverable error making socket */ 2107 return 0; 2108 } 2109 if(fd != -1) { 2110 verbose(VERB_ALGO, "opened UDP if=%d port=%d", 2111 my_if, portno); 2112 if(outnet->udp_connect) { 2113 /* connect() to the destination */ 2114 if(connect(fd, (struct sockaddr*)&pend->addr, 2115 pend->addrlen) < 0) { 2116 if(udp_connect_needs_log(errno)) { 2117 log_err_addr("udp connect failed", 2118 strerror(errno), &pend->addr, 2119 pend->addrlen); 2120 } 2121 sock_close(fd); 2122 return 0; 2123 } 2124 } 2125 /* grab fd */ 2126 pend->pc = outnet->unused_fds; 2127 outnet->unused_fds = pend->pc->next; 2128 2129 /* setup portcomm */ 2130 pend->pc->next = NULL; 2131 pend->pc->number = portno; 2132 pend->pc->pif = pif; 2133 pend->pc->index = pif->inuse; 2134 pend->pc->num_outstanding = 0; 2135 comm_point_start_listening(pend->pc->cp, fd, -1); 2136 2137 /* grab port in interface */ 2138 pif->out[pif->inuse] = pend->pc; 2139 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 2140 pif->avail_ports[my_port - pif->inuse] = 2141 pif->avail_ports[pif->avail_total-pif->inuse-1]; 2142 #endif 2143 pif->inuse++; 2144 break; 2145 } 2146 /* failed, already in use */ 2147 verbose(VERB_QUERY, "port %d in use, trying another", portno); 2148 tries++; 2149 if(tries == MAX_PORT_RETRY) { 2150 log_err("failed to find an open port, drop msg"); 2151 return 0; 2152 } 2153 } 2154 log_assert(pend->pc); 2155 pend->pc->num_outstanding++; 2156 2157 return 1; 2158 } 2159 2160 static int 2161 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout) 2162 { 2163 struct timeval tv; 2164 struct outside_network* outnet = pend->sq->outnet; 2165 2166 /* select id */ 2167 if(!select_id(outnet, pend, packet)) { 2168 return 0; 2169 } 2170 2171 /* select src_if, port */ 2172 if(addr_is_ip6(&pend->addr, pend->addrlen)) { 2173 if(!select_ifport(outnet, pend, 2174 outnet->num_ip6, outnet->ip6_ifs)) 2175 return 0; 2176 } else { 2177 if(!select_ifport(outnet, pend, 2178 outnet->num_ip4, outnet->ip4_ifs)) 2179 return 0; 2180 } 2181 log_assert(pend->pc && pend->pc->cp); 2182 2183 /* send it over the commlink */ 2184 if(!comm_point_send_udp_msg(pend->pc->cp, packet, 2185 (struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) { 2186 portcomm_loweruse(outnet, pend->pc); 2187 return 0; 2188 } 2189 outnet->num_udp_outgoing++; 2190 2191 /* system calls to set timeout after sending UDP to make roundtrip 2192 smaller. */ 2193 #ifndef S_SPLINT_S 2194 tv.tv_sec = timeout/1000; 2195 tv.tv_usec = (timeout%1000)*1000; 2196 #endif 2197 comm_timer_set(pend->timer, &tv); 2198 2199 #ifdef USE_DNSTAP 2200 /* 2201 * sending src (local service)/dst (upstream) addresses over DNSTAP 2202 * There are no chances to get the src (local service) addr if unbound 2203 * is not configured with specific outgoing IP-addresses. So we will 2204 * pass 0.0.0.0 (::) to argument for 2205 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls. 2206 */ 2207 if(outnet->dtenv && 2208 (outnet->dtenv->log_resolver_query_messages || 2209 outnet->dtenv->log_forwarder_query_messages)) { 2210 log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen); 2211 log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen); 2212 dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp, 2213 pend->sq->zone, pend->sq->zonelen, packet); 2214 } 2215 #endif 2216 return 1; 2217 } 2218 2219 struct pending* 2220 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet, 2221 int timeout, comm_point_callback_type* cb, void* cb_arg) 2222 { 2223 struct pending* pend = (struct pending*)calloc(1, sizeof(*pend)); 2224 if(!pend) return NULL; 2225 pend->outnet = sq->outnet; 2226 pend->sq = sq; 2227 pend->addrlen = sq->addrlen; 2228 memmove(&pend->addr, &sq->addr, sq->addrlen); 2229 pend->cb = cb; 2230 pend->cb_arg = cb_arg; 2231 pend->node.key = pend; 2232 pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb, 2233 pend); 2234 if(!pend->timer) { 2235 free(pend); 2236 return NULL; 2237 } 2238 2239 if(sq->outnet->unused_fds == NULL) { 2240 /* no unused fd, cannot create a new port (randomly) */ 2241 verbose(VERB_ALGO, "no fds available, udp query waiting"); 2242 pend->timeout = timeout; 2243 pend->pkt_len = sldns_buffer_limit(packet); 2244 pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet), 2245 pend->pkt_len); 2246 if(!pend->pkt) { 2247 comm_timer_delete(pend->timer); 2248 free(pend); 2249 return NULL; 2250 } 2251 /* put at end of waiting list */ 2252 if(sq->outnet->udp_wait_last) 2253 sq->outnet->udp_wait_last->next_waiting = pend; 2254 else 2255 sq->outnet->udp_wait_first = pend; 2256 sq->outnet->udp_wait_last = pend; 2257 return pend; 2258 } 2259 log_assert(!sq->busy); 2260 sq->busy = 1; 2261 if(!randomize_and_send_udp(pend, packet, timeout)) { 2262 pending_delete(sq->outnet, pend); 2263 return NULL; 2264 } 2265 sq->busy = 0; 2266 return pend; 2267 } 2268 2269 void 2270 outnet_tcptimer(void* arg) 2271 { 2272 struct waiting_tcp* w = (struct waiting_tcp*)arg; 2273 struct outside_network* outnet = w->outnet; 2274 verbose(VERB_CLIENT, "outnet_tcptimer"); 2275 if(w->on_tcp_waiting_list) { 2276 /* it is on the waiting list */ 2277 outnet_waiting_tcp_list_remove(outnet, w); 2278 waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL); 2279 waiting_tcp_delete(w); 2280 } else { 2281 /* it was in use */ 2282 struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting; 2283 reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT); 2284 } 2285 use_free_buffer(outnet); 2286 } 2287 2288 /** close the oldest reuse_tcp connection to make a fd and struct pend 2289 * available for a new stream connection */ 2290 static void 2291 reuse_tcp_close_oldest(struct outside_network* outnet) 2292 { 2293 struct reuse_tcp* reuse; 2294 verbose(VERB_CLIENT, "reuse_tcp_close_oldest"); 2295 reuse = reuse_tcp_lru_snip(outnet); 2296 if(!reuse) return; 2297 /* free up */ 2298 reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED); 2299 } 2300 2301 static uint16_t 2302 tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse) 2303 { 2304 if(reuse) 2305 return reuse_tcp_select_id(reuse, outnet); 2306 return GET_RANDOM_ID(outnet->rnd); 2307 } 2308 2309 /** find spare ID value for reuse tcp stream. That is random and also does 2310 * not collide with an existing query ID that is in use or waiting */ 2311 uint16_t 2312 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet) 2313 { 2314 uint16_t id = 0, curid, nextid; 2315 const int try_random = 2000; 2316 int i; 2317 unsigned select, count, space; 2318 rbnode_type* node; 2319 2320 /* make really sure the tree is not empty */ 2321 if(reuse->tree_by_id.count == 0) { 2322 id = GET_RANDOM_ID(outnet->rnd); 2323 return id; 2324 } 2325 2326 /* try to find random empty spots by picking them */ 2327 for(i = 0; i<try_random; i++) { 2328 id = GET_RANDOM_ID(outnet->rnd); 2329 if(!reuse_tcp_by_id_find(reuse, id)) { 2330 return id; 2331 } 2332 } 2333 2334 /* equally pick a random unused element from the tree that is 2335 * not in use. Pick a the n-th index of an unused number, 2336 * then loop over the empty spaces in the tree and find it */ 2337 log_assert(reuse->tree_by_id.count < 0xffff); 2338 select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count); 2339 /* select value now in 0 .. num free - 1 */ 2340 2341 count = 0; /* number of free spaces passed by */ 2342 node = rbtree_first(&reuse->tree_by_id); 2343 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2344 /* see if select is before first node */ 2345 if(select < (unsigned)tree_by_id_get_id(node)) 2346 return select; 2347 count += tree_by_id_get_id(node); 2348 /* perhaps select is between nodes */ 2349 while(node && node != RBTREE_NULL) { 2350 rbnode_type* next = rbtree_next(node); 2351 if(next && next != RBTREE_NULL) { 2352 curid = tree_by_id_get_id(node); 2353 nextid = tree_by_id_get_id(next); 2354 log_assert(curid < nextid); 2355 if(curid != 0xffff && curid + 1 < nextid) { 2356 /* space between nodes */ 2357 space = nextid - curid - 1; 2358 log_assert(select >= count); 2359 if(select < count + space) { 2360 /* here it is */ 2361 return curid + 1 + (select - count); 2362 } 2363 count += space; 2364 } 2365 } 2366 node = next; 2367 } 2368 2369 /* select is after the last node */ 2370 /* count is the number of free positions before the nodes in the 2371 * tree */ 2372 node = rbtree_last(&reuse->tree_by_id); 2373 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2374 curid = tree_by_id_get_id(node); 2375 log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff); 2376 return curid + 1 + (select - count); 2377 } 2378 2379 struct waiting_tcp* 2380 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet, 2381 int timeout, comm_point_callback_type* callback, void* callback_arg) 2382 { 2383 struct pending_tcp* pend = sq->outnet->tcp_free; 2384 struct reuse_tcp* reuse = NULL; 2385 struct waiting_tcp* w; 2386 2387 verbose(VERB_CLIENT, "pending_tcp_query"); 2388 if(sldns_buffer_limit(packet) < sizeof(uint16_t)) { 2389 verbose(VERB_ALGO, "pending tcp query with too short buffer < 2"); 2390 return NULL; 2391 } 2392 2393 /* find out if a reused stream to the target exists */ 2394 /* if so, take it into use */ 2395 reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen, 2396 sq->ssl_upstream); 2397 if(reuse) { 2398 log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse); 2399 log_assert(reuse->pending); 2400 pend = reuse->pending; 2401 reuse_tcp_lru_touch(sq->outnet, reuse); 2402 } 2403 2404 log_assert(!reuse || (reuse && pend)); 2405 /* if !pend but we have reuse streams, close a reuse stream 2406 * to be able to open a new one to this target, no use waiting 2407 * to reuse a file descriptor while another query needs to use 2408 * that buffer and file descriptor now. */ 2409 if(!pend) { 2410 reuse_tcp_close_oldest(sq->outnet); 2411 pend = sq->outnet->tcp_free; 2412 log_assert(!reuse || (pend == reuse->pending)); 2413 } 2414 2415 /* allocate space to store query */ 2416 w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp) 2417 + sldns_buffer_limit(packet)); 2418 if(!w) { 2419 return NULL; 2420 } 2421 if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) { 2422 free(w); 2423 return NULL; 2424 } 2425 w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp); 2426 w->pkt_len = sldns_buffer_limit(packet); 2427 memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len); 2428 w->id = tcp_select_id(sq->outnet, reuse); 2429 LDNS_ID_SET(w->pkt, w->id); 2430 memcpy(&w->addr, &sq->addr, sq->addrlen); 2431 w->addrlen = sq->addrlen; 2432 w->outnet = sq->outnet; 2433 w->on_tcp_waiting_list = 0; 2434 w->next_waiting = NULL; 2435 w->cb = callback; 2436 w->cb_arg = callback_arg; 2437 w->ssl_upstream = sq->ssl_upstream; 2438 w->tls_auth_name = sq->tls_auth_name; 2439 w->timeout = timeout; 2440 w->id_node.key = NULL; 2441 w->write_wait_prev = NULL; 2442 w->write_wait_next = NULL; 2443 w->write_wait_queued = 0; 2444 w->error_count = 0; 2445 #ifdef USE_DNSTAP 2446 w->sq = NULL; 2447 #endif 2448 w->in_cb_and_decommission = 0; 2449 if(pend) { 2450 /* we have a buffer available right now */ 2451 if(reuse) { 2452 log_assert(reuse == &pend->reuse); 2453 /* reuse existing fd, write query and continue */ 2454 /* store query in tree by id */ 2455 verbose(VERB_CLIENT, "pending_tcp_query: reuse, store"); 2456 w->next_waiting = (void*)pend; 2457 reuse_tree_by_id_insert(&pend->reuse, w); 2458 /* can we write right now? */ 2459 if(pend->query == NULL) { 2460 /* write straight away */ 2461 /* stop the timer on read of the fd */ 2462 comm_point_stop_listening(pend->c); 2463 pend->query = w; 2464 outnet_tcp_take_query_setup(pend->c->fd, pend, 2465 w); 2466 } else { 2467 /* put it in the waiting list for 2468 * this stream */ 2469 reuse_write_wait_push_back(&pend->reuse, w); 2470 } 2471 } else { 2472 /* create new fd and connect to addr, setup to 2473 * write query */ 2474 verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect"); 2475 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 2476 pend->reuse.pending = pend; 2477 memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen); 2478 pend->reuse.addrlen = sq->addrlen; 2479 if(!outnet_tcp_take_into_use(w)) { 2480 waiting_tcp_delete(w); 2481 return NULL; 2482 } 2483 } 2484 #ifdef USE_DNSTAP 2485 if(sq->outnet->dtenv && 2486 (sq->outnet->dtenv->log_resolver_query_messages || 2487 sq->outnet->dtenv->log_forwarder_query_messages)) { 2488 /* use w->pkt, because it has the ID value */ 2489 sldns_buffer tmp; 2490 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len); 2491 dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr, 2492 &pend->pi->addr, comm_tcp, sq->zone, 2493 sq->zonelen, &tmp); 2494 } 2495 #endif 2496 } else { 2497 /* queue up */ 2498 /* waiting for a buffer on the outside network buffer wait 2499 * list */ 2500 verbose(VERB_CLIENT, "pending_tcp_query: queue to wait"); 2501 #ifdef USE_DNSTAP 2502 w->sq = sq; 2503 #endif 2504 outnet_waiting_tcp_list_add(sq->outnet, w, 1); 2505 } 2506 return w; 2507 } 2508 2509 /** create query for serviced queries */ 2510 static void 2511 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen, 2512 uint16_t qtype, uint16_t qclass, uint16_t flags) 2513 { 2514 sldns_buffer_clear(buff); 2515 /* skip id */ 2516 sldns_buffer_write_u16(buff, flags); 2517 sldns_buffer_write_u16(buff, 1); /* qdcount */ 2518 sldns_buffer_write_u16(buff, 0); /* ancount */ 2519 sldns_buffer_write_u16(buff, 0); /* nscount */ 2520 sldns_buffer_write_u16(buff, 0); /* arcount */ 2521 sldns_buffer_write(buff, qname, qnamelen); 2522 sldns_buffer_write_u16(buff, qtype); 2523 sldns_buffer_write_u16(buff, qclass); 2524 sldns_buffer_flip(buff); 2525 } 2526 2527 /** lookup serviced query in serviced query rbtree */ 2528 static struct serviced_query* 2529 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2530 struct sockaddr_storage* addr, socklen_t addrlen, 2531 struct edns_option* opt_list) 2532 { 2533 struct serviced_query key; 2534 key.node.key = &key; 2535 key.qbuf = sldns_buffer_begin(buff); 2536 key.qbuflen = sldns_buffer_limit(buff); 2537 key.dnssec = dnssec; 2538 memcpy(&key.addr, addr, addrlen); 2539 key.addrlen = addrlen; 2540 key.outnet = outnet; 2541 key.opt_list = opt_list; 2542 return (struct serviced_query*)rbtree_search(outnet->serviced, &key); 2543 } 2544 2545 void 2546 serviced_timer_cb(void* arg) 2547 { 2548 struct serviced_query* sq = (struct serviced_query*)arg; 2549 struct outside_network* outnet = sq->outnet; 2550 verbose(VERB_ALGO, "serviced send timer"); 2551 /* By the time this cb is called, if we don't have any registered 2552 * callbacks for this serviced_query anymore; do not send. */ 2553 if(!sq->cblist) 2554 goto delete; 2555 /* perform first network action */ 2556 if(outnet->do_udp && !(sq->tcp_upstream || sq->ssl_upstream)) { 2557 if(!serviced_udp_send(sq, outnet->udp_buff)) 2558 goto delete; 2559 } else { 2560 if(!serviced_tcp_send(sq, outnet->udp_buff)) 2561 goto delete; 2562 } 2563 /* Maybe by this time we don't have callbacks attached anymore. Don't 2564 * proactively try to delete; let it run and maybe another callback 2565 * will get attached by the time we get an answer. */ 2566 return; 2567 delete: 2568 serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL); 2569 } 2570 2571 /** Create new serviced entry */ 2572 static struct serviced_query* 2573 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2574 int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream, 2575 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen, 2576 uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list, 2577 size_t pad_queries_block_size, struct alloc_cache* alloc, 2578 struct regional* region) 2579 { 2580 struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq)); 2581 struct timeval t; 2582 #ifdef UNBOUND_DEBUG 2583 rbnode_type* ins; 2584 #endif 2585 if(!sq) { 2586 alloc_reg_release(alloc, region); 2587 return NULL; 2588 } 2589 sq->node.key = sq; 2590 sq->alloc = alloc; 2591 sq->region = region; 2592 sq->qbuf = regional_alloc_init(region, sldns_buffer_begin(buff), 2593 sldns_buffer_limit(buff)); 2594 if(!sq->qbuf) { 2595 alloc_reg_release(alloc, region); 2596 free(sq); 2597 return NULL; 2598 } 2599 sq->qbuflen = sldns_buffer_limit(buff); 2600 sq->zone = regional_alloc_init(region, zone, zonelen); 2601 if(!sq->zone) { 2602 alloc_reg_release(alloc, region); 2603 free(sq); 2604 return NULL; 2605 } 2606 sq->zonelen = zonelen; 2607 sq->qtype = qtype; 2608 sq->dnssec = dnssec; 2609 sq->want_dnssec = want_dnssec; 2610 sq->nocaps = nocaps; 2611 sq->tcp_upstream = tcp_upstream; 2612 sq->ssl_upstream = ssl_upstream; 2613 if(tls_auth_name) { 2614 sq->tls_auth_name = regional_strdup(region, tls_auth_name); 2615 if(!sq->tls_auth_name) { 2616 alloc_reg_release(alloc, region); 2617 free(sq); 2618 return NULL; 2619 } 2620 } else { 2621 sq->tls_auth_name = NULL; 2622 } 2623 memcpy(&sq->addr, addr, addrlen); 2624 sq->addrlen = addrlen; 2625 sq->opt_list = opt_list; 2626 sq->busy = 0; 2627 sq->timer = comm_timer_create(outnet->base, serviced_timer_cb, sq); 2628 if(!sq->timer) { 2629 alloc_reg_release(alloc, region); 2630 free(sq); 2631 return NULL; 2632 } 2633 memset(&t, 0, sizeof(t)); 2634 comm_timer_set(sq->timer, &t); 2635 sq->outnet = outnet; 2636 sq->cblist = NULL; 2637 sq->pending = NULL; 2638 sq->status = serviced_initial; 2639 sq->retry = 0; 2640 sq->to_be_deleted = 0; 2641 sq->padding_block_size = pad_queries_block_size; 2642 #ifdef UNBOUND_DEBUG 2643 ins = 2644 #else 2645 (void) 2646 #endif 2647 rbtree_insert(outnet->serviced, &sq->node); 2648 log_assert(ins != NULL); /* must not be already present */ 2649 return sq; 2650 } 2651 2652 /** reuse tcp stream, remove serviced query from stream, 2653 * return true if the stream is kept, false if it is to be closed */ 2654 static int 2655 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w, 2656 struct serviced_query* sq) 2657 { 2658 struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting; 2659 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep"); 2660 /* remove the callback. let query continue to write to not cancel 2661 * the stream itself. also keep it as an entry in the tree_by_id, 2662 * in case the answer returns (that we no longer want), but we cannot 2663 * pick the same ID number meanwhile */ 2664 w->cb = NULL; 2665 /* see if can be entered in reuse tree 2666 * for that the FD has to be non-1 */ 2667 if(pend_tcp->c->fd == -1) { 2668 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd"); 2669 return 0; 2670 } 2671 /* if in tree and used by other queries */ 2672 if(pend_tcp->reuse.node.key) { 2673 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries"); 2674 /* do not reset the keepalive timer, for that 2675 * we'd need traffic, and this is where the serviced is 2676 * removed due to state machine internal reasons, 2677 * eg. iterator no longer interested in this query */ 2678 return 1; 2679 } 2680 /* if still open and want to keep it open */ 2681 if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count < 2682 sq->outnet->tcp_reuse_max) { 2683 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open"); 2684 /* set a keepalive timer on it */ 2685 if(!reuse_tcp_insert(sq->outnet, pend_tcp)) { 2686 return 0; 2687 } 2688 reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout); 2689 return 1; 2690 } 2691 return 0; 2692 } 2693 2694 /** cleanup serviced query entry */ 2695 static void 2696 serviced_delete(struct serviced_query* sq) 2697 { 2698 verbose(VERB_CLIENT, "serviced_delete"); 2699 if(sq->pending) { 2700 /* clear up the pending query */ 2701 if(sq->status == serviced_query_UDP_EDNS || 2702 sq->status == serviced_query_UDP || 2703 sq->status == serviced_query_UDP_EDNS_FRAG || 2704 sq->status == serviced_query_UDP_EDNS_fallback) { 2705 struct pending* p = (struct pending*)sq->pending; 2706 verbose(VERB_CLIENT, "serviced_delete: UDP"); 2707 if(p->pc) 2708 portcomm_loweruse(sq->outnet, p->pc); 2709 pending_delete(sq->outnet, p); 2710 /* this call can cause reentrant calls back into the 2711 * mesh */ 2712 outnet_send_wait_udp(sq->outnet); 2713 } else { 2714 struct waiting_tcp* w = (struct waiting_tcp*) 2715 sq->pending; 2716 verbose(VERB_CLIENT, "serviced_delete: TCP"); 2717 log_assert(!(w->write_wait_queued && w->on_tcp_waiting_list)); 2718 /* if on stream-write-waiting list then 2719 * remove from waiting list and waiting_tcp_delete */ 2720 if(w->write_wait_queued) { 2721 struct pending_tcp* pend = 2722 (struct pending_tcp*)w->next_waiting; 2723 verbose(VERB_CLIENT, "serviced_delete: writewait"); 2724 if(!w->in_cb_and_decommission) 2725 reuse_tree_by_id_delete(&pend->reuse, w); 2726 reuse_write_wait_remove(&pend->reuse, w); 2727 if(!w->in_cb_and_decommission) 2728 waiting_tcp_delete(w); 2729 } else if(!w->on_tcp_waiting_list) { 2730 struct pending_tcp* pend = 2731 (struct pending_tcp*)w->next_waiting; 2732 verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep"); 2733 /* w needs to stay on tree_by_id to not assign 2734 * the same ID; remove the callback since its 2735 * serviced_query will be gone. */ 2736 w->cb = NULL; 2737 if(!reuse_tcp_remove_serviced_keep(w, sq)) { 2738 if(!w->in_cb_and_decommission) 2739 reuse_cb_and_decommission(sq->outnet, 2740 pend, NETEVENT_CLOSED); 2741 use_free_buffer(sq->outnet); 2742 } 2743 sq->pending = NULL; 2744 } else { 2745 verbose(VERB_CLIENT, "serviced_delete: tcpwait"); 2746 outnet_waiting_tcp_list_remove(sq->outnet, w); 2747 if(!w->in_cb_and_decommission) 2748 waiting_tcp_delete(w); 2749 } 2750 } 2751 } 2752 /* does not delete from tree, caller has to do that */ 2753 serviced_node_del(&sq->node, NULL); 2754 } 2755 2756 /** perturb a dname capitalization randomly */ 2757 static void 2758 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len) 2759 { 2760 uint8_t lablen; 2761 uint8_t* d = qbuf + 10; 2762 long int random = 0; 2763 int bits = 0; 2764 log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */); 2765 (void)len; 2766 lablen = *d++; 2767 while(lablen) { 2768 while(lablen--) { 2769 /* only perturb A-Z, a-z */ 2770 if(isalpha((unsigned char)*d)) { 2771 /* get a random bit */ 2772 if(bits == 0) { 2773 random = ub_random(rnd); 2774 bits = 30; 2775 } 2776 if(random & 0x1) { 2777 *d = (uint8_t)toupper((unsigned char)*d); 2778 } else { 2779 *d = (uint8_t)tolower((unsigned char)*d); 2780 } 2781 random >>= 1; 2782 bits--; 2783 } 2784 d++; 2785 } 2786 lablen = *d++; 2787 } 2788 if(verbosity >= VERB_ALGO) { 2789 char buf[LDNS_MAX_DOMAINLEN+1]; 2790 dname_str(qbuf+10, buf); 2791 verbose(VERB_ALGO, "qname perturbed to %s", buf); 2792 } 2793 } 2794 2795 /** put serviced query into a buffer */ 2796 static void 2797 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns) 2798 { 2799 /* if we are using 0x20 bits for ID randomness, perturb them */ 2800 if(sq->outnet->use_caps_for_id && !sq->nocaps) { 2801 serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen); 2802 } 2803 /* generate query */ 2804 sldns_buffer_clear(buff); 2805 sldns_buffer_write_u16(buff, 0); /* id placeholder */ 2806 sldns_buffer_write(buff, sq->qbuf, sq->qbuflen); 2807 sldns_buffer_flip(buff); 2808 if(with_edns) { 2809 /* add edns section */ 2810 struct edns_data edns; 2811 struct edns_option padding_option; 2812 edns.edns_present = 1; 2813 edns.ext_rcode = 0; 2814 edns.edns_version = EDNS_ADVERTISED_VERSION; 2815 edns.opt_list_in = NULL; 2816 edns.opt_list_out = sq->opt_list; 2817 edns.opt_list_inplace_cb_out = NULL; 2818 if(sq->status == serviced_query_UDP_EDNS_FRAG) { 2819 if(addr_is_ip6(&sq->addr, sq->addrlen)) { 2820 if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE) 2821 edns.udp_size = EDNS_FRAG_SIZE_IP6; 2822 else edns.udp_size = EDNS_ADVERTISED_SIZE; 2823 } else { 2824 if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE) 2825 edns.udp_size = EDNS_FRAG_SIZE_IP4; 2826 else edns.udp_size = EDNS_ADVERTISED_SIZE; 2827 } 2828 } else { 2829 edns.udp_size = EDNS_ADVERTISED_SIZE; 2830 } 2831 edns.bits = 0; 2832 if(sq->dnssec & EDNS_DO) 2833 edns.bits = EDNS_DO; 2834 if(sq->dnssec & BIT_CD) 2835 LDNS_CD_SET(sldns_buffer_begin(buff)); 2836 if (sq->ssl_upstream && sq->padding_block_size) { 2837 padding_option.opt_code = LDNS_EDNS_PADDING; 2838 padding_option.opt_len = 0; 2839 padding_option.opt_data = NULL; 2840 padding_option.next = edns.opt_list_out; 2841 edns.opt_list_out = &padding_option; 2842 edns.padding_block_size = sq->padding_block_size; 2843 } 2844 attach_edns_record(buff, &edns); 2845 } 2846 } 2847 2848 /** 2849 * Perform serviced query UDP sending operation. 2850 * Sends UDP with EDNS, unless infra host marked non EDNS. 2851 * @param sq: query to send. 2852 * @param buff: buffer scratch space. 2853 * @return 0 on error. 2854 */ 2855 static int 2856 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff) 2857 { 2858 int rtt, vs; 2859 uint8_t edns_lame_known; 2860 time_t now = *sq->outnet->now_secs; 2861 2862 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 2863 sq->zonelen, now, &vs, &edns_lame_known, &rtt)) 2864 return 0; 2865 sq->last_rtt = rtt; 2866 verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs); 2867 if(sq->status == serviced_initial) { 2868 if(vs != -1) { 2869 sq->status = serviced_query_UDP_EDNS; 2870 } else { 2871 sq->status = serviced_query_UDP; 2872 } 2873 } 2874 serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) || 2875 (sq->status == serviced_query_UDP_EDNS_FRAG)); 2876 sq->last_sent_time = *sq->outnet->now_tv; 2877 sq->edns_lame_known = (int)edns_lame_known; 2878 verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt); 2879 sq->pending = pending_udp_query(sq, buff, rtt, 2880 serviced_udp_callback, sq); 2881 if(!sq->pending) 2882 return 0; 2883 return 1; 2884 } 2885 2886 /** check that perturbed qname is identical */ 2887 static int 2888 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen) 2889 { 2890 uint8_t* d1 = sldns_buffer_begin(pkt)+12; 2891 uint8_t* d2 = qbuf+10; 2892 uint8_t len1, len2; 2893 int count = 0; 2894 if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */ 2895 return 0; 2896 log_assert(qbuflen >= 15 /* 10 header, root, type, class */); 2897 len1 = *d1++; 2898 len2 = *d2++; 2899 while(len1 != 0 || len2 != 0) { 2900 if(LABEL_IS_PTR(len1)) { 2901 /* check if we can read *d1 with compression ptr rest */ 2902 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2903 return 0; 2904 d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1); 2905 /* check if we can read the destination *d1 */ 2906 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2907 return 0; 2908 len1 = *d1++; 2909 if(count++ > MAX_COMPRESS_PTRS) 2910 return 0; 2911 continue; 2912 } 2913 if(d2 > qbuf+qbuflen) 2914 return 0; 2915 if(len1 != len2) 2916 return 0; 2917 if(len1 > LDNS_MAX_LABELLEN) 2918 return 0; 2919 /* check len1 + 1(next length) are okay to read */ 2920 if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2921 return 0; 2922 log_assert(len1 <= LDNS_MAX_LABELLEN); 2923 log_assert(len2 <= LDNS_MAX_LABELLEN); 2924 log_assert(len1 == len2 && len1 != 0); 2925 /* compare the labels - bitwise identical */ 2926 if(memcmp(d1, d2, len1) != 0) 2927 return 0; 2928 d1 += len1; 2929 d2 += len2; 2930 len1 = *d1++; 2931 len2 = *d2++; 2932 } 2933 return 1; 2934 } 2935 2936 /** call the callbacks for a serviced query */ 2937 static void 2938 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c, 2939 struct comm_reply* rep) 2940 { 2941 struct service_callback* p; 2942 int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/ 2943 uint8_t *backup_p = NULL; 2944 size_t backlen = 0; 2945 #ifdef UNBOUND_DEBUG 2946 rbnode_type* rem = 2947 #else 2948 (void) 2949 #endif 2950 /* remove from tree, and schedule for deletion, so that callbacks 2951 * can safely deregister themselves and even create new serviced 2952 * queries that are identical to this one. */ 2953 rbtree_delete(sq->outnet->serviced, sq); 2954 log_assert(rem); /* should have been present */ 2955 sq->to_be_deleted = 1; 2956 verbose(VERB_ALGO, "svcd callbacks start"); 2957 if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c && 2958 !sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) { 2959 /* for type PTR do not check perturbed name in answer, 2960 * compatibility with cisco dns guard boxes that mess up 2961 * reverse queries 0x20 contents */ 2962 /* noerror and nxdomain must have a qname in reply */ 2963 if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 && 2964 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2965 == LDNS_RCODE_NOERROR || 2966 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2967 == LDNS_RCODE_NXDOMAIN)) { 2968 verbose(VERB_DETAIL, "no qname in reply to check 0x20ID"); 2969 log_addr(VERB_DETAIL, "from server", 2970 &sq->addr, sq->addrlen); 2971 log_buf(VERB_DETAIL, "for packet", c->buffer); 2972 error = NETEVENT_CLOSED; 2973 c = NULL; 2974 } else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 && 2975 !serviced_check_qname(c->buffer, sq->qbuf, 2976 sq->qbuflen)) { 2977 verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname"); 2978 log_addr(VERB_DETAIL, "from server", 2979 &sq->addr, sq->addrlen); 2980 log_buf(VERB_DETAIL, "for packet", c->buffer); 2981 error = NETEVENT_CAPSFAIL; 2982 /* and cleanup too */ 2983 pkt_dname_tolower(c->buffer, 2984 sldns_buffer_at(c->buffer, 12)); 2985 } else { 2986 verbose(VERB_ALGO, "good 0x20-ID in reply qname"); 2987 /* cleanup caps, prettier cache contents. */ 2988 pkt_dname_tolower(c->buffer, 2989 sldns_buffer_at(c->buffer, 12)); 2990 } 2991 } 2992 if(dobackup && c) { 2993 /* make a backup of the query, since the querystate processing 2994 * may send outgoing queries that overwrite the buffer. 2995 * use secondary buffer to store the query. 2996 * This is a data copy, but faster than packet to server */ 2997 backlen = sldns_buffer_limit(c->buffer); 2998 backup_p = regional_alloc_init(sq->region, 2999 sldns_buffer_begin(c->buffer), backlen); 3000 if(!backup_p) { 3001 log_err("malloc failure in serviced query callbacks"); 3002 error = NETEVENT_CLOSED; 3003 c = NULL; 3004 } 3005 sq->outnet->svcd_overhead = backlen; 3006 } 3007 /* test the actual sq->cblist, because the next elem could be deleted*/ 3008 while((p=sq->cblist) != NULL) { 3009 sq->cblist = p->next; /* remove this element */ 3010 if(dobackup && c) { 3011 sldns_buffer_clear(c->buffer); 3012 sldns_buffer_write(c->buffer, backup_p, backlen); 3013 sldns_buffer_flip(c->buffer); 3014 } 3015 fptr_ok(fptr_whitelist_serviced_query(p->cb)); 3016 (void)(*p->cb)(c, p->cb_arg, error, rep); 3017 } 3018 if(backup_p) { 3019 sq->outnet->svcd_overhead = 0; 3020 } 3021 verbose(VERB_ALGO, "svcd callbacks end"); 3022 log_assert(sq->cblist == NULL); 3023 serviced_delete(sq); 3024 } 3025 3026 int 3027 serviced_tcp_callback(struct comm_point* c, void* arg, int error, 3028 struct comm_reply* rep) 3029 { 3030 struct serviced_query* sq = (struct serviced_query*)arg; 3031 struct comm_reply r2; 3032 #ifdef USE_DNSTAP 3033 struct waiting_tcp* w = (struct waiting_tcp*)sq->pending; 3034 struct pending_tcp* pend_tcp = NULL; 3035 struct port_if* pi = NULL; 3036 if(w && !w->on_tcp_waiting_list && w->next_waiting) { 3037 pend_tcp = (struct pending_tcp*)w->next_waiting; 3038 pi = pend_tcp->pi; 3039 } 3040 #endif 3041 sq->pending = NULL; /* removed after this callback */ 3042 if(error != NETEVENT_NOERROR) 3043 log_addr(VERB_QUERY, "tcp error for address", 3044 &sq->addr, sq->addrlen); 3045 if(error==NETEVENT_NOERROR) 3046 infra_update_tcp_works(sq->outnet->infra, &sq->addr, 3047 sq->addrlen, sq->zone, sq->zonelen); 3048 #ifdef USE_DNSTAP 3049 /* 3050 * sending src (local service)/dst (upstream) addresses over DNSTAP 3051 */ 3052 if(error==NETEVENT_NOERROR && pi && sq->outnet->dtenv && 3053 (sq->outnet->dtenv->log_resolver_response_messages || 3054 sq->outnet->dtenv->log_forwarder_response_messages)) { 3055 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen); 3056 log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen); 3057 dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr, 3058 &pi->addr, c->type, sq->zone, sq->zonelen, sq->qbuf, 3059 sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv, 3060 c->buffer); 3061 } 3062 #endif 3063 if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS && 3064 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 3065 LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin( 3066 c->buffer)) == LDNS_RCODE_NOTIMPL) ) { 3067 /* attempt to fallback to nonEDNS */ 3068 sq->status = serviced_query_TCP_EDNS_fallback; 3069 serviced_tcp_initiate(sq, c->buffer); 3070 return 0; 3071 } else if(error==NETEVENT_NOERROR && 3072 sq->status == serviced_query_TCP_EDNS_fallback && 3073 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 3074 LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE( 3075 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN 3076 || LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 3077 == LDNS_RCODE_YXDOMAIN)) { 3078 /* the fallback produced a result that looks promising, note 3079 * that this server should be approached without EDNS */ 3080 /* only store noEDNS in cache if domain is noDNSSEC */ 3081 if(!sq->want_dnssec) 3082 if(!infra_edns_update(sq->outnet->infra, &sq->addr, 3083 sq->addrlen, sq->zone, sq->zonelen, -1, 3084 *sq->outnet->now_secs)) 3085 log_err("Out of memory caching no edns for host"); 3086 sq->status = serviced_query_TCP; 3087 } 3088 if(sq->tcp_upstream || sq->ssl_upstream) { 3089 struct timeval now = *sq->outnet->now_tv; 3090 if(error!=NETEVENT_NOERROR) { 3091 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 3092 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 3093 -1, sq->last_rtt, (time_t)now.tv_sec)) 3094 log_err("out of memory in TCP exponential backoff."); 3095 } else if(now.tv_sec > sq->last_sent_time.tv_sec || 3096 (now.tv_sec == sq->last_sent_time.tv_sec && 3097 now.tv_usec > sq->last_sent_time.tv_usec)) { 3098 /* convert from microseconds to milliseconds */ 3099 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 3100 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 3101 verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime); 3102 log_assert(roundtime >= 0); 3103 /* only store if less then AUTH_TIMEOUT seconds, it could be 3104 * huge due to system-hibernated and we woke up */ 3105 if(roundtime < 60000) { 3106 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 3107 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 3108 roundtime, sq->last_rtt, (time_t)now.tv_sec)) 3109 log_err("out of memory noting rtt."); 3110 } 3111 } 3112 } 3113 /* insert address into reply info */ 3114 if(!rep) { 3115 /* create one if there isn't (on errors) */ 3116 rep = &r2; 3117 r2.c = c; 3118 } 3119 memcpy(&rep->remote_addr, &sq->addr, sq->addrlen); 3120 rep->remote_addrlen = sq->addrlen; 3121 serviced_callbacks(sq, error, c, rep); 3122 return 0; 3123 } 3124 3125 static void 3126 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff) 3127 { 3128 verbose(VERB_ALGO, "initiate TCP query %s", 3129 sq->status==serviced_query_TCP_EDNS?"EDNS":""); 3130 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 3131 sq->last_sent_time = *sq->outnet->now_tv; 3132 log_assert(!sq->busy); 3133 sq->busy = 1; 3134 sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout, 3135 serviced_tcp_callback, sq); 3136 sq->busy = 0; 3137 if(!sq->pending) { 3138 /* delete from tree so that a retry by above layer does not 3139 * clash with this entry */ 3140 verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query"); 3141 serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL); 3142 } 3143 } 3144 3145 /** Send serviced query over TCP return false on initial failure */ 3146 static int 3147 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff) 3148 { 3149 int vs, rtt, timeout; 3150 uint8_t edns_lame_known; 3151 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 3152 sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known, 3153 &rtt)) 3154 return 0; 3155 sq->last_rtt = rtt; 3156 if(vs != -1) 3157 sq->status = serviced_query_TCP_EDNS; 3158 else sq->status = serviced_query_TCP; 3159 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 3160 sq->last_sent_time = *sq->outnet->now_tv; 3161 if(sq->tcp_upstream || sq->ssl_upstream) { 3162 timeout = rtt; 3163 if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout) 3164 timeout = sq->outnet->tcp_auth_query_timeout; 3165 } else { 3166 timeout = sq->outnet->tcp_auth_query_timeout; 3167 } 3168 log_assert(!sq->busy); 3169 sq->busy = 1; 3170 sq->pending = pending_tcp_query(sq, buff, timeout, 3171 serviced_tcp_callback, sq); 3172 sq->busy = 0; 3173 return sq->pending != NULL; 3174 } 3175 3176 /* see if packet is edns malformed; got zeroes at start. 3177 * This is from servers that return malformed packets to EDNS0 queries, 3178 * but they return good packets for nonEDNS0 queries. 3179 * We try to detect their output; without resorting to a full parse or 3180 * check for too many bytes after the end of the packet. */ 3181 static int 3182 packet_edns_malformed(struct sldns_buffer* buf, int qtype) 3183 { 3184 size_t len; 3185 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE) 3186 return 1; /* malformed */ 3187 /* they have NOERROR rcode, 1 answer. */ 3188 if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR) 3189 return 0; 3190 /* one query (to skip) and answer records */ 3191 if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 || 3192 LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0) 3193 return 0; 3194 /* skip qname */ 3195 len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE), 3196 sldns_buffer_limit(buf)-LDNS_HEADER_SIZE); 3197 if(len == 0) 3198 return 0; 3199 if(len == 1 && qtype == 0) 3200 return 0; /* we asked for '.' and type 0 */ 3201 /* and then 4 bytes (type and class of query) */ 3202 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3) 3203 return 0; 3204 3205 /* and start with 11 zeroes as the answer RR */ 3206 /* so check the qtype of the answer record, qname=0, type=0 */ 3207 if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 && 3208 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 && 3209 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0) 3210 return 1; 3211 return 0; 3212 } 3213 3214 int 3215 serviced_udp_callback(struct comm_point* c, void* arg, int error, 3216 struct comm_reply* rep) 3217 { 3218 struct serviced_query* sq = (struct serviced_query*)arg; 3219 struct outside_network* outnet = sq->outnet; 3220 struct timeval now = *sq->outnet->now_tv; 3221 #ifdef USE_DNSTAP 3222 struct pending* p = (struct pending*)sq->pending; 3223 #endif 3224 3225 sq->pending = NULL; /* removed after callback */ 3226 if(error == NETEVENT_TIMEOUT) { 3227 if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) { 3228 /* fallback to 1480/1280 */ 3229 sq->status = serviced_query_UDP_EDNS_FRAG; 3230 log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10, 3231 &sq->addr, sq->addrlen); 3232 if(!serviced_udp_send(sq, c->buffer)) { 3233 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 3234 } 3235 return 0; 3236 } 3237 if(sq->status == serviced_query_UDP_EDNS_FRAG) { 3238 /* fragmentation size did not fix it */ 3239 sq->status = serviced_query_UDP_EDNS; 3240 } 3241 sq->retry++; 3242 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 3243 sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt, 3244 (time_t)now.tv_sec)) 3245 log_err("out of memory in UDP exponential backoff"); 3246 if(sq->retry < OUTBOUND_UDP_RETRY) { 3247 log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10, 3248 &sq->addr, sq->addrlen); 3249 if(!serviced_udp_send(sq, c->buffer)) { 3250 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 3251 } 3252 return 0; 3253 } 3254 } 3255 if(error != NETEVENT_NOERROR) { 3256 /* udp returns error (due to no ID or interface available) */ 3257 serviced_callbacks(sq, error, c, rep); 3258 return 0; 3259 } 3260 #ifdef USE_DNSTAP 3261 /* 3262 * sending src (local service)/dst (upstream) addresses over DNSTAP 3263 */ 3264 if(error == NETEVENT_NOERROR && outnet->dtenv && p->pc && 3265 (outnet->dtenv->log_resolver_response_messages || 3266 outnet->dtenv->log_forwarder_response_messages)) { 3267 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen); 3268 log_addr(VERB_ALGO, "to local addr", &p->pc->pif->addr, 3269 p->pc->pif->addrlen); 3270 dt_msg_send_outside_response(outnet->dtenv, &sq->addr, 3271 &p->pc->pif->addr, c->type, sq->zone, sq->zonelen, 3272 sq->qbuf, sq->qbuflen, &sq->last_sent_time, 3273 sq->outnet->now_tv, c->buffer); 3274 } 3275 #endif 3276 if( (sq->status == serviced_query_UDP_EDNS 3277 ||sq->status == serviced_query_UDP_EDNS_FRAG) 3278 && (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 3279 == LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE( 3280 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL 3281 || packet_edns_malformed(c->buffer, sq->qtype) 3282 )) { 3283 /* try to get an answer by falling back without EDNS */ 3284 verbose(VERB_ALGO, "serviced query: attempt without EDNS"); 3285 sq->status = serviced_query_UDP_EDNS_fallback; 3286 sq->retry = 0; 3287 if(!serviced_udp_send(sq, c->buffer)) { 3288 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 3289 } 3290 return 0; 3291 } else if(sq->status == serviced_query_UDP_EDNS && 3292 !sq->edns_lame_known) { 3293 /* now we know that edns queries received answers store that */ 3294 log_addr(VERB_ALGO, "serviced query: EDNS works for", 3295 &sq->addr, sq->addrlen); 3296 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 3297 sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) { 3298 log_err("Out of memory caching edns works"); 3299 } 3300 sq->edns_lame_known = 1; 3301 } else if(sq->status == serviced_query_UDP_EDNS_fallback && 3302 !sq->edns_lame_known && (LDNS_RCODE_WIRE( 3303 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR || 3304 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 3305 LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin( 3306 c->buffer)) == LDNS_RCODE_YXDOMAIN)) { 3307 /* the fallback produced a result that looks promising, note 3308 * that this server should be approached without EDNS */ 3309 /* only store noEDNS in cache if domain is noDNSSEC */ 3310 if(!sq->want_dnssec) { 3311 log_addr(VERB_ALGO, "serviced query: EDNS fails for", 3312 &sq->addr, sq->addrlen); 3313 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 3314 sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) { 3315 log_err("Out of memory caching no edns for host"); 3316 } 3317 } else { 3318 log_addr(VERB_ALGO, "serviced query: EDNS fails, but " 3319 "not stored because need DNSSEC for", &sq->addr, 3320 sq->addrlen); 3321 } 3322 sq->status = serviced_query_UDP; 3323 } 3324 if(now.tv_sec > sq->last_sent_time.tv_sec || 3325 (now.tv_sec == sq->last_sent_time.tv_sec && 3326 now.tv_usec > sq->last_sent_time.tv_usec)) { 3327 /* convert from microseconds to milliseconds */ 3328 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 3329 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 3330 verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime); 3331 log_assert(roundtime >= 0); 3332 /* in case the system hibernated, do not enter a huge value, 3333 * above this value gives trouble with server selection */ 3334 if(roundtime < 60000) { 3335 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 3336 sq->zone, sq->zonelen, sq->qtype, roundtime, 3337 sq->last_rtt, (time_t)now.tv_sec)) 3338 log_err("out of memory noting rtt."); 3339 } 3340 } 3341 /* perform TC flag check and TCP fallback after updating our 3342 * cache entries for EDNS status and RTT times */ 3343 if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) { 3344 /* fallback to TCP */ 3345 /* this discards partial UDP contents */ 3346 if(sq->status == serviced_query_UDP_EDNS || 3347 sq->status == serviced_query_UDP_EDNS_FRAG || 3348 sq->status == serviced_query_UDP_EDNS_fallback) 3349 /* if we have unfinished EDNS_fallback, start again */ 3350 sq->status = serviced_query_TCP_EDNS; 3351 else sq->status = serviced_query_TCP; 3352 serviced_tcp_initiate(sq, c->buffer); 3353 return 0; 3354 } 3355 /* yay! an answer */ 3356 serviced_callbacks(sq, error, c, rep); 3357 return 0; 3358 } 3359 3360 struct serviced_query* 3361 outnet_serviced_query(struct outside_network* outnet, 3362 struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec, 3363 int nocaps, int check_ratelimit, int tcp_upstream, int ssl_upstream, 3364 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen, 3365 uint8_t* zone, size_t zonelen, struct module_qstate* qstate, 3366 comm_point_callback_type* callback, void* callback_arg, 3367 sldns_buffer* buff, struct module_env* env, int* was_ratelimited) 3368 { 3369 struct serviced_query* sq; 3370 struct service_callback* cb; 3371 struct edns_string_addr* client_string_addr; 3372 struct regional* region; 3373 struct edns_option* backed_up_opt_list = qstate->edns_opts_back_out; 3374 struct edns_option* per_upstream_opt_list = NULL; 3375 time_t timenow = 0; 3376 3377 /* If we have an already populated EDNS option list make a copy since 3378 * we may now add upstream specific EDNS options. */ 3379 /* Use a region that could be attached to a serviced_query, if it needs 3380 * to be created. If an existing one is found then this region will be 3381 * destroyed here. */ 3382 region = alloc_reg_obtain(env->alloc); 3383 if(!region) return NULL; 3384 if(qstate->edns_opts_back_out) { 3385 per_upstream_opt_list = edns_opt_copy_region( 3386 qstate->edns_opts_back_out, region); 3387 if(!per_upstream_opt_list) { 3388 alloc_reg_release(env->alloc, region); 3389 return NULL; 3390 } 3391 qstate->edns_opts_back_out = per_upstream_opt_list; 3392 } 3393 3394 if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone, 3395 zonelen, qstate, region)) { 3396 alloc_reg_release(env->alloc, region); 3397 return NULL; 3398 } 3399 /* Restore the option list; we can explicitly use the copied one from 3400 * now on. */ 3401 per_upstream_opt_list = qstate->edns_opts_back_out; 3402 qstate->edns_opts_back_out = backed_up_opt_list; 3403 3404 if((client_string_addr = edns_string_addr_lookup( 3405 &env->edns_strings->client_strings, addr, addrlen))) { 3406 edns_opt_list_append(&per_upstream_opt_list, 3407 env->edns_strings->client_string_opcode, 3408 client_string_addr->string_len, 3409 client_string_addr->string, region); 3410 } 3411 3412 serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype, 3413 qinfo->qclass, flags); 3414 sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen, 3415 per_upstream_opt_list); 3416 if(!sq) { 3417 /* Check ratelimit only for new serviced_query */ 3418 if(check_ratelimit) { 3419 timenow = *env->now; 3420 if(!infra_ratelimit_inc(env->infra_cache, zone, 3421 zonelen, timenow, env->cfg->ratelimit_backoff, 3422 &qstate->qinfo, qstate->reply)) { 3423 /* Can we pass through with slip factor? */ 3424 if(env->cfg->ratelimit_factor == 0 || 3425 ub_random_max(env->rnd, 3426 env->cfg->ratelimit_factor) != 1) { 3427 *was_ratelimited = 1; 3428 alloc_reg_release(env->alloc, region); 3429 return NULL; 3430 } 3431 log_nametypeclass(VERB_ALGO, 3432 "ratelimit allowed through for " 3433 "delegation point", zone, 3434 LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN); 3435 } 3436 } 3437 /* make new serviced query entry */ 3438 sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps, 3439 tcp_upstream, ssl_upstream, tls_auth_name, addr, 3440 addrlen, zone, zonelen, (int)qinfo->qtype, 3441 per_upstream_opt_list, 3442 ( ssl_upstream && env->cfg->pad_queries 3443 ? env->cfg->pad_queries_block_size : 0 ), 3444 env->alloc, region); 3445 if(!sq) { 3446 if(check_ratelimit) { 3447 infra_ratelimit_dec(env->infra_cache, 3448 zone, zonelen, timenow); 3449 } 3450 return NULL; 3451 } 3452 if(!(cb = (struct service_callback*)regional_alloc( 3453 sq->region, sizeof(*cb)))) { 3454 if(check_ratelimit) { 3455 infra_ratelimit_dec(env->infra_cache, 3456 zone, zonelen, timenow); 3457 } 3458 (void)rbtree_delete(outnet->serviced, sq); 3459 serviced_node_del(&sq->node, NULL); 3460 return NULL; 3461 } 3462 /* No network action at this point; it will be invoked with the 3463 * serviced_query timer instead to run outside of the mesh. */ 3464 } else { 3465 /* We don't need this region anymore. */ 3466 alloc_reg_release(env->alloc, region); 3467 /* duplicate entries are included in the callback list, because 3468 * there is a counterpart registration by our caller that needs 3469 * to be doubly-removed (with callbacks perhaps). */ 3470 if(!(cb = (struct service_callback*)regional_alloc( 3471 sq->region, sizeof(*cb)))) { 3472 return NULL; 3473 } 3474 } 3475 /* add callback to list of callbacks */ 3476 cb->cb = callback; 3477 cb->cb_arg = callback_arg; 3478 cb->next = sq->cblist; 3479 sq->cblist = cb; 3480 return sq; 3481 } 3482 3483 /** remove callback from list */ 3484 static void 3485 callback_list_remove(struct serviced_query* sq, void* cb_arg) 3486 { 3487 struct service_callback** pp = &sq->cblist; 3488 while(*pp) { 3489 if((*pp)->cb_arg == cb_arg) { 3490 struct service_callback* del = *pp; 3491 *pp = del->next; 3492 return; 3493 } 3494 pp = &(*pp)->next; 3495 } 3496 } 3497 3498 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg) 3499 { 3500 if(!sq) 3501 return; 3502 callback_list_remove(sq, cb_arg); 3503 /* if callbacks() routine scheduled deletion, let it do that */ 3504 if(!sq->cblist && !sq->busy && !sq->to_be_deleted) { 3505 (void)rbtree_delete(sq->outnet->serviced, sq); 3506 serviced_delete(sq); 3507 } 3508 } 3509 3510 /** create fd to send to this destination */ 3511 static int 3512 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr, 3513 socklen_t to_addrlen) 3514 { 3515 struct sockaddr_storage* addr; 3516 socklen_t addrlen; 3517 int i, try, pnum, dscp; 3518 struct port_if* pif; 3519 3520 /* create fd */ 3521 dscp = outnet->ip_dscp; 3522 for(try = 0; try<1000; try++) { 3523 int port = 0; 3524 int freebind = 0; 3525 int noproto = 0; 3526 int inuse = 0; 3527 int fd = -1; 3528 3529 /* select interface */ 3530 if(addr_is_ip6(to_addr, to_addrlen)) { 3531 if(outnet->num_ip6 == 0) { 3532 char to[64]; 3533 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3534 verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to); 3535 return -1; 3536 } 3537 i = ub_random_max(outnet->rnd, outnet->num_ip6); 3538 pif = &outnet->ip6_ifs[i]; 3539 } else { 3540 if(outnet->num_ip4 == 0) { 3541 char to[64]; 3542 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3543 verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to); 3544 return -1; 3545 } 3546 i = ub_random_max(outnet->rnd, outnet->num_ip4); 3547 pif = &outnet->ip4_ifs[i]; 3548 } 3549 addr = &pif->addr; 3550 addrlen = pif->addrlen; 3551 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3552 pnum = ub_random_max(outnet->rnd, pif->avail_total); 3553 if(pnum < pif->inuse) { 3554 /* port already open */ 3555 port = pif->out[pnum]->number; 3556 } else { 3557 /* unused ports in start part of array */ 3558 port = pif->avail_ports[pnum - pif->inuse]; 3559 } 3560 #else 3561 pnum = port = 0; 3562 #endif 3563 if(addr_is_ip6(to_addr, to_addrlen)) { 3564 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 3565 sa.sin6_port = (in_port_t)htons((uint16_t)port); 3566 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 3567 (struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto, 3568 0, 0, 0, NULL, 0, freebind, 0, dscp); 3569 } else { 3570 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 3571 sa->sin_port = (in_port_t)htons((uint16_t)port); 3572 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 3573 (struct sockaddr*)addr, addrlen, 1, &inuse, &noproto, 3574 0, 0, 0, NULL, 0, freebind, 0, dscp); 3575 } 3576 if(fd != -1) { 3577 return fd; 3578 } 3579 if(!inuse) { 3580 return -1; 3581 } 3582 } 3583 /* too many tries */ 3584 log_err("cannot send probe, ports are in use"); 3585 return -1; 3586 } 3587 3588 struct comm_point* 3589 outnet_comm_point_for_udp(struct outside_network* outnet, 3590 comm_point_callback_type* cb, void* cb_arg, 3591 struct sockaddr_storage* to_addr, socklen_t to_addrlen) 3592 { 3593 struct comm_point* cp; 3594 int fd = fd_for_dest(outnet, to_addr, to_addrlen); 3595 if(fd == -1) { 3596 return NULL; 3597 } 3598 cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff, 0, 3599 cb, cb_arg, NULL); 3600 if(!cp) { 3601 log_err("malloc failure"); 3602 close(fd); 3603 return NULL; 3604 } 3605 return cp; 3606 } 3607 3608 /** setup SSL for comm point */ 3609 static int 3610 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet, 3611 int fd, char* host) 3612 { 3613 cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd); 3614 if(!cp->ssl) { 3615 log_err("cannot create SSL object"); 3616 return 0; 3617 } 3618 #ifdef USE_WINSOCK 3619 comm_point_tcp_win_bio_cb(cp, cp->ssl); 3620 #endif 3621 cp->ssl_shake_state = comm_ssl_shake_write; 3622 /* https verification */ 3623 #ifdef HAVE_SSL 3624 if(outnet->tls_use_sni) { 3625 (void)SSL_set_tlsext_host_name(cp->ssl, host); 3626 } 3627 #endif 3628 #ifdef HAVE_SSL_SET1_HOST 3629 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3630 /* because we set SSL_VERIFY_PEER, in netevent in 3631 * ssl_handshake, it'll check if the certificate 3632 * verification has succeeded */ 3633 /* SSL_VERIFY_PEER is set on the sslctx */ 3634 /* and the certificates to verify with are loaded into 3635 * it with SSL_load_verify_locations or 3636 * SSL_CTX_set_default_verify_paths */ 3637 /* setting the hostname makes openssl verify the 3638 * host name in the x509 certificate in the 3639 * SSL connection*/ 3640 if(!SSL_set1_host(cp->ssl, host)) { 3641 log_err("SSL_set1_host failed"); 3642 return 0; 3643 } 3644 } 3645 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST) 3646 /* openssl 1.0.2 has this function that can be used for 3647 * set1_host like verification */ 3648 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3649 X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl); 3650 # ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 3651 X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS); 3652 # endif 3653 if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) { 3654 log_err("X509_VERIFY_PARAM_set1_host failed"); 3655 return 0; 3656 } 3657 } 3658 #else 3659 (void)host; 3660 #endif /* HAVE_SSL_SET1_HOST */ 3661 return 1; 3662 } 3663 3664 struct comm_point* 3665 outnet_comm_point_for_tcp(struct outside_network* outnet, 3666 comm_point_callback_type* cb, void* cb_arg, 3667 struct sockaddr_storage* to_addr, socklen_t to_addrlen, 3668 sldns_buffer* query, int timeout, int ssl, char* host) 3669 { 3670 struct comm_point* cp; 3671 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3672 if(fd == -1) { 3673 return 0; 3674 } 3675 fd_set_nonblock(fd); 3676 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3677 /* outnet_tcp_connect has closed fd on error for us */ 3678 return 0; 3679 } 3680 cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg); 3681 if(!cp) { 3682 log_err("malloc failure"); 3683 close(fd); 3684 return 0; 3685 } 3686 cp->repinfo.remote_addrlen = to_addrlen; 3687 memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen); 3688 3689 /* setup for SSL (if needed) */ 3690 if(ssl) { 3691 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3692 log_err("cannot setup XoT"); 3693 comm_point_delete(cp); 3694 return NULL; 3695 } 3696 } 3697 3698 /* set timeout on TCP connection */ 3699 comm_point_start_listening(cp, fd, timeout); 3700 /* copy scratch buffer to cp->buffer */ 3701 sldns_buffer_copy(cp->buffer, query); 3702 return cp; 3703 } 3704 3705 /** setup the User-Agent HTTP header based on http-user-agent configuration */ 3706 static void 3707 setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg) 3708 { 3709 if(cfg->hide_http_user_agent) return; 3710 if(cfg->http_user_agent==NULL || cfg->http_user_agent[0] == 0) { 3711 sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME, 3712 PACKAGE_VERSION); 3713 } else { 3714 sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent); 3715 } 3716 } 3717 3718 /** setup http request headers in buffer for sending query to destination */ 3719 static int 3720 setup_http_request(sldns_buffer* buf, char* host, char* path, 3721 struct config_file* cfg) 3722 { 3723 sldns_buffer_clear(buf); 3724 sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path); 3725 sldns_buffer_printf(buf, "Host: %s\r\n", host); 3726 setup_http_user_agent(buf, cfg); 3727 /* We do not really do multiple queries per connection, 3728 * but this header setting is also not needed. 3729 * sldns_buffer_printf(buf, "Connection: close\r\n") */ 3730 sldns_buffer_printf(buf, "\r\n"); 3731 if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf)) 3732 return 0; /* somehow buffer too short, but it is about 60K 3733 and the request is only a couple bytes long. */ 3734 sldns_buffer_flip(buf); 3735 return 1; 3736 } 3737 3738 struct comm_point* 3739 outnet_comm_point_for_http(struct outside_network* outnet, 3740 comm_point_callback_type* cb, void* cb_arg, 3741 struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout, 3742 int ssl, char* host, char* path, struct config_file* cfg) 3743 { 3744 /* cp calls cb with err=NETEVENT_DONE when transfer is done */ 3745 struct comm_point* cp; 3746 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3747 if(fd == -1) { 3748 return 0; 3749 } 3750 fd_set_nonblock(fd); 3751 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3752 /* outnet_tcp_connect has closed fd on error for us */ 3753 return 0; 3754 } 3755 cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg, 3756 outnet->udp_buff); 3757 if(!cp) { 3758 log_err("malloc failure"); 3759 close(fd); 3760 return 0; 3761 } 3762 cp->repinfo.remote_addrlen = to_addrlen; 3763 memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen); 3764 3765 /* setup for SSL (if needed) */ 3766 if(ssl) { 3767 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3768 log_err("cannot setup https"); 3769 comm_point_delete(cp); 3770 return NULL; 3771 } 3772 } 3773 3774 /* set timeout on TCP connection */ 3775 comm_point_start_listening(cp, fd, timeout); 3776 3777 /* setup http request in cp->buffer */ 3778 if(!setup_http_request(cp->buffer, host, path, cfg)) { 3779 log_err("error setting up http request"); 3780 comm_point_delete(cp); 3781 return NULL; 3782 } 3783 return cp; 3784 } 3785 3786 /** get memory used by waiting tcp entry (in use or not) */ 3787 static size_t 3788 waiting_tcp_get_mem(struct waiting_tcp* w) 3789 { 3790 size_t s; 3791 if(!w) return 0; 3792 s = sizeof(*w) + w->pkt_len; 3793 if(w->timer) 3794 s += comm_timer_get_mem(w->timer); 3795 return s; 3796 } 3797 3798 /** get memory used by port if */ 3799 static size_t 3800 if_get_mem(struct port_if* pif) 3801 { 3802 size_t s; 3803 int i; 3804 s = sizeof(*pif) + 3805 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3806 sizeof(int)*pif->avail_total + 3807 #endif 3808 sizeof(struct port_comm*)*pif->maxout; 3809 for(i=0; i<pif->inuse; i++) 3810 s += sizeof(*pif->out[i]) + 3811 comm_point_get_mem(pif->out[i]->cp); 3812 return s; 3813 } 3814 3815 /** get memory used by waiting udp */ 3816 static size_t 3817 waiting_udp_get_mem(struct pending* w) 3818 { 3819 size_t s; 3820 s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len; 3821 return s; 3822 } 3823 3824 size_t outnet_get_mem(struct outside_network* outnet) 3825 { 3826 size_t i; 3827 int k; 3828 struct waiting_tcp* w; 3829 struct pending* u; 3830 struct serviced_query* sq; 3831 struct service_callback* sb; 3832 struct port_comm* pc; 3833 size_t s = sizeof(*outnet) + sizeof(*outnet->base) + 3834 sizeof(*outnet->udp_buff) + 3835 sldns_buffer_capacity(outnet->udp_buff); 3836 /* second buffer is not ours */ 3837 for(pc = outnet->unused_fds; pc; pc = pc->next) { 3838 s += sizeof(*pc) + comm_point_get_mem(pc->cp); 3839 } 3840 for(k=0; k<outnet->num_ip4; k++) 3841 s += if_get_mem(&outnet->ip4_ifs[k]); 3842 for(k=0; k<outnet->num_ip6; k++) 3843 s += if_get_mem(&outnet->ip6_ifs[k]); 3844 for(u=outnet->udp_wait_first; u; u=u->next_waiting) 3845 s += waiting_udp_get_mem(u); 3846 3847 s += sizeof(struct pending_tcp*)*outnet->num_tcp; 3848 for(i=0; i<outnet->num_tcp; i++) { 3849 s += sizeof(struct pending_tcp); 3850 s += comm_point_get_mem(outnet->tcp_conns[i]->c); 3851 if(outnet->tcp_conns[i]->query) 3852 s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query); 3853 } 3854 for(w=outnet->tcp_wait_first; w; w = w->next_waiting) 3855 s += waiting_tcp_get_mem(w); 3856 s += sizeof(*outnet->pending); 3857 s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) * 3858 outnet->pending->count; 3859 s += sizeof(*outnet->serviced); 3860 s += outnet->svcd_overhead; 3861 RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) { 3862 s += sizeof(*sq) + sq->qbuflen; 3863 for(sb = sq->cblist; sb; sb = sb->next) 3864 s += sizeof(*sb); 3865 } 3866 return s; 3867 } 3868 3869 size_t 3870 serviced_get_mem(struct serviced_query* sq) 3871 { 3872 struct service_callback* sb; 3873 size_t s; 3874 s = sizeof(*sq) + sq->qbuflen; 3875 for(sb = sq->cblist; sb; sb = sb->next) 3876 s += sizeof(*sb); 3877 if(sq->status == serviced_query_UDP_EDNS || 3878 sq->status == serviced_query_UDP || 3879 sq->status == serviced_query_UDP_EDNS_FRAG || 3880 sq->status == serviced_query_UDP_EDNS_fallback) { 3881 s += sizeof(struct pending); 3882 s += comm_timer_get_mem(NULL); 3883 } else { 3884 /* does not have size of the pkt pointer */ 3885 /* always has a timer except on malloc failures */ 3886 3887 /* these sizes are part of the main outside network mem */ 3888 /* 3889 s += sizeof(struct waiting_tcp); 3890 s += comm_timer_get_mem(NULL); 3891 */ 3892 } 3893 return s; 3894 } 3895 3896