1 /* 2 * services/outside_network.c - implement sending of queries and wait answer. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file has functions to send queries to authoritative servers and 40 * wait for the pending answer events. 41 */ 42 #include "config.h" 43 #include <ctype.h> 44 #ifdef HAVE_SYS_TYPES_H 45 # include <sys/types.h> 46 #endif 47 #include <sys/time.h> 48 #include "services/outside_network.h" 49 #include "services/listen_dnsport.h" 50 #include "services/cache/infra.h" 51 #include "iterator/iterator.h" 52 #include "util/data/msgparse.h" 53 #include "util/data/msgreply.h" 54 #include "util/data/msgencode.h" 55 #include "util/data/dname.h" 56 #include "util/netevent.h" 57 #include "util/log.h" 58 #include "util/net_help.h" 59 #include "util/random.h" 60 #include "util/fptr_wlist.h" 61 #include "util/edns.h" 62 #include "sldns/sbuffer.h" 63 #include "dnstap/dnstap.h" 64 #ifdef HAVE_OPENSSL_SSL_H 65 #include <openssl/ssl.h> 66 #endif 67 #ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST 68 #include <openssl/x509v3.h> 69 #endif 70 71 #ifdef HAVE_NETDB_H 72 #include <netdb.h> 73 #endif 74 #include <fcntl.h> 75 76 /** number of times to retry making a random ID that is unique. */ 77 #define MAX_ID_RETRY 1000 78 /** number of times to retry finding interface, port that can be opened. */ 79 #define MAX_PORT_RETRY 10000 80 /** number of retries on outgoing UDP queries */ 81 #define OUTBOUND_UDP_RETRY 1 82 83 /** initiate TCP transaction for serviced query */ 84 static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff); 85 /** with a fd available, randomize and send UDP */ 86 static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, 87 int timeout); 88 89 /** remove waiting tcp from the outnet waiting list */ 90 static void waiting_list_remove(struct outside_network* outnet, 91 struct waiting_tcp* w); 92 93 /** remove reused element from tree and lru list */ 94 static void reuse_tcp_remove_tree_list(struct outside_network* outnet, 95 struct reuse_tcp* reuse); 96 97 int 98 pending_cmp(const void* key1, const void* key2) 99 { 100 struct pending *p1 = (struct pending*)key1; 101 struct pending *p2 = (struct pending*)key2; 102 if(p1->id < p2->id) 103 return -1; 104 if(p1->id > p2->id) 105 return 1; 106 log_assert(p1->id == p2->id); 107 return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen); 108 } 109 110 int 111 serviced_cmp(const void* key1, const void* key2) 112 { 113 struct serviced_query* q1 = (struct serviced_query*)key1; 114 struct serviced_query* q2 = (struct serviced_query*)key2; 115 int r; 116 if(q1->qbuflen < q2->qbuflen) 117 return -1; 118 if(q1->qbuflen > q2->qbuflen) 119 return 1; 120 log_assert(q1->qbuflen == q2->qbuflen); 121 log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */); 122 /* alternate casing of qname is still the same query */ 123 if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0) 124 return r; 125 if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0) 126 return r; 127 if(q1->dnssec != q2->dnssec) { 128 if(q1->dnssec < q2->dnssec) 129 return -1; 130 return 1; 131 } 132 if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0) 133 return r; 134 if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0) 135 return r; 136 return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen); 137 } 138 139 /** compare if the reuse element has the same address, port and same ssl-is 140 * used-for-it characteristic */ 141 static int 142 reuse_cmp_addrportssl(const void* key1, const void* key2) 143 { 144 struct reuse_tcp* r1 = (struct reuse_tcp*)key1; 145 struct reuse_tcp* r2 = (struct reuse_tcp*)key2; 146 int r; 147 /* compare address and port */ 148 r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen); 149 if(r != 0) 150 return r; 151 152 /* compare if SSL-enabled */ 153 if(r1->is_ssl && !r2->is_ssl) 154 return 1; 155 if(!r1->is_ssl && r2->is_ssl) 156 return -1; 157 return 0; 158 } 159 160 int 161 reuse_cmp(const void* key1, const void* key2) 162 { 163 int r; 164 r = reuse_cmp_addrportssl(key1, key2); 165 if(r != 0) 166 return r; 167 168 /* compare ptr value */ 169 if(key1 < key2) return -1; 170 if(key1 > key2) return 1; 171 return 0; 172 } 173 174 int reuse_id_cmp(const void* key1, const void* key2) 175 { 176 struct waiting_tcp* w1 = (struct waiting_tcp*)key1; 177 struct waiting_tcp* w2 = (struct waiting_tcp*)key2; 178 if(w1->id < w2->id) 179 return -1; 180 if(w1->id > w2->id) 181 return 1; 182 return 0; 183 } 184 185 /** delete waiting_tcp entry. Does not unlink from waiting list. 186 * @param w: to delete. 187 */ 188 static void 189 waiting_tcp_delete(struct waiting_tcp* w) 190 { 191 if(!w) return; 192 if(w->timer) 193 comm_timer_delete(w->timer); 194 free(w); 195 } 196 197 /** 198 * Pick random outgoing-interface of that family, and bind it. 199 * port set to 0 so OS picks a port number for us. 200 * if it is the ANY address, do not bind. 201 * @param w: tcp structure with destination address. 202 * @param s: socket fd. 203 * @return false on error, socket closed. 204 */ 205 static int 206 pick_outgoing_tcp(struct waiting_tcp* w, int s) 207 { 208 struct port_if* pi = NULL; 209 int num; 210 #ifdef INET6 211 if(addr_is_ip6(&w->addr, w->addrlen)) 212 num = w->outnet->num_ip6; 213 else 214 #endif 215 num = w->outnet->num_ip4; 216 if(num == 0) { 217 log_err("no TCP outgoing interfaces of family"); 218 log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen); 219 sock_close(s); 220 return 0; 221 } 222 #ifdef INET6 223 if(addr_is_ip6(&w->addr, w->addrlen)) 224 pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)]; 225 else 226 #endif 227 pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)]; 228 log_assert(pi); 229 if(addr_is_any(&pi->addr, pi->addrlen)) { 230 /* binding to the ANY interface is for listening sockets */ 231 return 1; 232 } 233 /* set port to 0 */ 234 if(addr_is_ip6(&pi->addr, pi->addrlen)) 235 ((struct sockaddr_in6*)&pi->addr)->sin6_port = 0; 236 else ((struct sockaddr_in*)&pi->addr)->sin_port = 0; 237 if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) { 238 log_err("outgoing tcp: bind: %s", sock_strerror(errno)); 239 sock_close(s); 240 return 0; 241 } 242 log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen); 243 return 1; 244 } 245 246 /** get TCP file descriptor for address, returns -1 on failure, 247 * tcp_mss is 0 or maxseg size to set for TCP packets. */ 248 int 249 outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp) 250 { 251 int s; 252 int af; 253 char* err; 254 #ifdef SO_REUSEADDR 255 int on = 1; 256 #endif 257 #ifdef INET6 258 if(addr_is_ip6(addr, addrlen)){ 259 s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP); 260 af = AF_INET6; 261 } else { 262 #else 263 { 264 #endif 265 af = AF_INET; 266 s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); 267 } 268 if(s == -1) { 269 log_err_addr("outgoing tcp: socket", sock_strerror(errno), 270 addr, addrlen); 271 return -1; 272 } 273 274 #ifdef SO_REUSEADDR 275 if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (void*)&on, 276 (socklen_t)sizeof(on)) < 0) { 277 verbose(VERB_ALGO, "outgoing tcp:" 278 " setsockopt(.. SO_REUSEADDR ..) failed"); 279 } 280 #endif 281 282 err = set_ip_dscp(s, af, dscp); 283 if(err != NULL) { 284 verbose(VERB_ALGO, "outgoing tcp:" 285 "error setting IP DiffServ codepoint on socket"); 286 } 287 288 if(tcp_mss > 0) { 289 #if defined(IPPROTO_TCP) && defined(TCP_MAXSEG) 290 if(setsockopt(s, IPPROTO_TCP, TCP_MAXSEG, 291 (void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) { 292 verbose(VERB_ALGO, "outgoing tcp:" 293 " setsockopt(.. TCP_MAXSEG ..) failed"); 294 } 295 #else 296 verbose(VERB_ALGO, "outgoing tcp:" 297 " setsockopt(TCP_MAXSEG) unsupported"); 298 #endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */ 299 } 300 301 return s; 302 } 303 304 /** connect tcp connection to addr, 0 on failure */ 305 int 306 outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen) 307 { 308 if(connect(s, (struct sockaddr*)addr, addrlen) == -1) { 309 #ifndef USE_WINSOCK 310 #ifdef EINPROGRESS 311 if(errno != EINPROGRESS) { 312 #endif 313 if(tcp_connect_errno_needs_log( 314 (struct sockaddr*)addr, addrlen)) 315 log_err_addr("outgoing tcp: connect", 316 strerror(errno), addr, addrlen); 317 close(s); 318 return 0; 319 #ifdef EINPROGRESS 320 } 321 #endif 322 #else /* USE_WINSOCK */ 323 if(WSAGetLastError() != WSAEINPROGRESS && 324 WSAGetLastError() != WSAEWOULDBLOCK) { 325 closesocket(s); 326 return 0; 327 } 328 #endif 329 } 330 return 1; 331 } 332 333 /** log reuse item addr and ptr with message */ 334 static void 335 log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse) 336 { 337 uint16_t port; 338 char addrbuf[128]; 339 if(verbosity < v) return; 340 addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf)); 341 port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port); 342 verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port, 343 reuse->pending->c->fd); 344 } 345 346 /** pop the first element from the writewait list */ 347 static struct waiting_tcp* reuse_write_wait_pop(struct reuse_tcp* reuse) 348 { 349 struct waiting_tcp* w = reuse->write_wait_first; 350 if(!w) 351 return NULL; 352 log_assert(w->write_wait_queued); 353 log_assert(!w->write_wait_prev); 354 reuse->write_wait_first = w->write_wait_next; 355 if(w->write_wait_next) 356 w->write_wait_next->write_wait_prev = NULL; 357 else reuse->write_wait_last = NULL; 358 w->write_wait_queued = 0; 359 return w; 360 } 361 362 /** remove the element from the writewait list */ 363 static void reuse_write_wait_remove(struct reuse_tcp* reuse, 364 struct waiting_tcp* w) 365 { 366 if(!w) 367 return; 368 if(!w->write_wait_queued) 369 return; 370 if(w->write_wait_prev) 371 w->write_wait_prev->write_wait_next = w->write_wait_next; 372 else reuse->write_wait_first = w->write_wait_next; 373 if(w->write_wait_next) 374 w->write_wait_next->write_wait_prev = w->write_wait_prev; 375 else reuse->write_wait_last = w->write_wait_prev; 376 w->write_wait_queued = 0; 377 } 378 379 /** push the element after the last on the writewait list */ 380 static void reuse_write_wait_push_back(struct reuse_tcp* reuse, 381 struct waiting_tcp* w) 382 { 383 if(!w) return; 384 log_assert(!w->write_wait_queued); 385 if(reuse->write_wait_last) { 386 reuse->write_wait_last->write_wait_next = w; 387 w->write_wait_prev = reuse->write_wait_last; 388 } else { 389 reuse->write_wait_first = w; 390 } 391 reuse->write_wait_last = w; 392 w->write_wait_queued = 1; 393 } 394 395 /** insert element in tree by id */ 396 void 397 reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w) 398 { 399 log_assert(w->id_node.key == NULL); 400 w->id_node.key = w; 401 rbtree_insert(&reuse->tree_by_id, &w->id_node); 402 } 403 404 /** find element in tree by id */ 405 struct waiting_tcp* 406 reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id) 407 { 408 struct waiting_tcp key_w; 409 rbnode_type* n; 410 memset(&key_w, 0, sizeof(key_w)); 411 key_w.id_node.key = &key_w; 412 key_w.id = id; 413 n = rbtree_search(&reuse->tree_by_id, &key_w); 414 if(!n) return NULL; 415 return (struct waiting_tcp*)n->key; 416 } 417 418 /** return ID value of rbnode in tree_by_id */ 419 static uint16_t 420 tree_by_id_get_id(rbnode_type* node) 421 { 422 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 423 return w->id; 424 } 425 426 /** insert into reuse tcp tree and LRU, false on failure (duplicate) */ 427 static int 428 reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp) 429 { 430 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse); 431 if(pend_tcp->reuse.item_on_lru_list) { 432 if(!pend_tcp->reuse.node.key) 433 log_err("internal error: reuse_tcp_insert: on lru list without key"); 434 return 1; 435 } 436 pend_tcp->reuse.node.key = &pend_tcp->reuse; 437 pend_tcp->reuse.pending = pend_tcp; 438 if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) { 439 /* this is a duplicate connection, close this one */ 440 verbose(VERB_CLIENT, "reuse_tcp_insert: duplicate connection"); 441 pend_tcp->reuse.node.key = NULL; 442 return 0; 443 } 444 /* insert into LRU, first is newest */ 445 pend_tcp->reuse.lru_prev = NULL; 446 if(outnet->tcp_reuse_first) { 447 pend_tcp->reuse.lru_next = outnet->tcp_reuse_first; 448 outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse; 449 } else { 450 pend_tcp->reuse.lru_next = NULL; 451 outnet->tcp_reuse_last = &pend_tcp->reuse; 452 } 453 outnet->tcp_reuse_first = &pend_tcp->reuse; 454 pend_tcp->reuse.item_on_lru_list = 1; 455 return 1; 456 } 457 458 /** find reuse tcp stream to destination for query, or NULL if none */ 459 static struct reuse_tcp* 460 reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr, 461 socklen_t addrlen, int use_ssl) 462 { 463 struct waiting_tcp key_w; 464 struct pending_tcp key_p; 465 struct comm_point c; 466 rbnode_type* result = NULL, *prev; 467 verbose(VERB_CLIENT, "reuse_tcp_find"); 468 memset(&key_w, 0, sizeof(key_w)); 469 memset(&key_p, 0, sizeof(key_p)); 470 memset(&c, 0, sizeof(c)); 471 key_p.query = &key_w; 472 key_p.c = &c; 473 key_p.reuse.pending = &key_p; 474 key_p.reuse.node.key = &key_p.reuse; 475 if(use_ssl) 476 key_p.reuse.is_ssl = 1; 477 if(addrlen > (socklen_t)sizeof(key_p.reuse.addr)) 478 return NULL; 479 memmove(&key_p.reuse.addr, addr, addrlen); 480 key_p.reuse.addrlen = addrlen; 481 482 verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u", 483 (unsigned)outnet->tcp_reuse.count); 484 if(outnet->tcp_reuse.root == NULL || 485 outnet->tcp_reuse.root == RBTREE_NULL) 486 return NULL; 487 if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse, 488 &result)) { 489 /* exact match */ 490 /* but the key is on stack, and ptr is compared, impossible */ 491 log_assert(&key_p.reuse != (struct reuse_tcp*)result); 492 log_assert(&key_p != ((struct reuse_tcp*)result)->pending); 493 } 494 /* not found, return null */ 495 if(!result || result == RBTREE_NULL) 496 return NULL; 497 verbose(VERB_CLIENT, "reuse_tcp_find check inexact match"); 498 /* inexact match, find one of possibly several connections to the 499 * same destination address, with the correct port, ssl, and 500 * also less than max number of open queries, or else, fail to open 501 * a new one */ 502 /* rewind to start of sequence of same address,port,ssl */ 503 prev = rbtree_previous(result); 504 while(prev && prev != RBTREE_NULL && 505 reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) { 506 result = prev; 507 prev = rbtree_previous(result); 508 } 509 510 /* loop to find first one that has correct characteristics */ 511 while(result && result != RBTREE_NULL && 512 reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) { 513 if(((struct reuse_tcp*)result)->tree_by_id.count < 514 MAX_REUSE_TCP_QUERIES) { 515 /* same address, port, ssl-yes-or-no, and has 516 * space for another query */ 517 return (struct reuse_tcp*)result; 518 } 519 result = rbtree_next(result); 520 } 521 return NULL; 522 } 523 524 /** use the buffer to setup writing the query */ 525 static void 526 outnet_tcp_take_query_setup(int s, struct pending_tcp* pend, 527 struct waiting_tcp* w) 528 { 529 struct timeval tv; 530 verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write " 531 "len %d timeout %d msec", 532 (int)w->pkt_len, w->timeout); 533 pend->c->tcp_write_pkt = w->pkt; 534 pend->c->tcp_write_pkt_len = w->pkt_len; 535 pend->c->tcp_write_and_read = 1; 536 pend->c->tcp_write_byte_count = 0; 537 pend->c->tcp_is_reading = 0; 538 comm_point_start_listening(pend->c, s, -1); 539 /* set timer on the waiting_tcp entry, this is the write timeout 540 * for the written packet. The timer on pend->c is the timer 541 * for when there is no written packet and we have readtimeouts */ 542 #ifndef S_SPLINT_S 543 tv.tv_sec = w->timeout/1000; 544 tv.tv_usec = (w->timeout%1000)*1000; 545 #endif 546 /* if the waiting_tcp was previously waiting for a buffer in the 547 * outside_network.tcpwaitlist, then the timer is reset now that 548 * we start writing it */ 549 comm_timer_set(w->timer, &tv); 550 } 551 552 /** use next free buffer to service a tcp query */ 553 static int 554 outnet_tcp_take_into_use(struct waiting_tcp* w) 555 { 556 struct pending_tcp* pend = w->outnet->tcp_free; 557 int s; 558 log_assert(pend); 559 log_assert(w->pkt); 560 log_assert(w->pkt_len > 0); 561 log_assert(w->addrlen > 0); 562 pend->c->tcp_do_toggle_rw = 0; 563 pend->c->tcp_do_close = 0; 564 /* open socket */ 565 s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp); 566 567 if(s == -1) 568 return 0; 569 570 if(!pick_outgoing_tcp(w, s)) 571 return 0; 572 573 fd_set_nonblock(s); 574 #ifdef USE_OSX_MSG_FASTOPEN 575 /* API for fast open is different here. We use a connectx() function and 576 then writes can happen as normal even using SSL.*/ 577 /* connectx requires that the len be set in the sockaddr struct*/ 578 struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr; 579 addr_in->sin_len = w->addrlen; 580 sa_endpoints_t endpoints; 581 endpoints.sae_srcif = 0; 582 endpoints.sae_srcaddr = NULL; 583 endpoints.sae_srcaddrlen = 0; 584 endpoints.sae_dstaddr = (struct sockaddr *)&w->addr; 585 endpoints.sae_dstaddrlen = w->addrlen; 586 if (connectx(s, &endpoints, SAE_ASSOCID_ANY, 587 CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE, 588 NULL, 0, NULL, NULL) == -1) { 589 /* if fails, failover to connect for OSX 10.10 */ 590 #ifdef EINPROGRESS 591 if(errno != EINPROGRESS) { 592 #else 593 if(1) { 594 #endif 595 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 596 #else /* USE_OSX_MSG_FASTOPEN*/ 597 #ifdef USE_MSG_FASTOPEN 598 pend->c->tcp_do_fastopen = 1; 599 /* Only do TFO for TCP in which case no connect() is required here. 600 Don't combine client TFO with SSL, since OpenSSL can't 601 currently support doing a handshake on fd that already isn't connected*/ 602 if (w->outnet->sslctx && w->ssl_upstream) { 603 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 604 #else /* USE_MSG_FASTOPEN*/ 605 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) { 606 #endif /* USE_MSG_FASTOPEN*/ 607 #endif /* USE_OSX_MSG_FASTOPEN*/ 608 #ifndef USE_WINSOCK 609 #ifdef EINPROGRESS 610 if(errno != EINPROGRESS) { 611 #else 612 if(1) { 613 #endif 614 if(tcp_connect_errno_needs_log( 615 (struct sockaddr*)&w->addr, w->addrlen)) 616 log_err_addr("outgoing tcp: connect", 617 strerror(errno), &w->addr, w->addrlen); 618 close(s); 619 #else /* USE_WINSOCK */ 620 if(WSAGetLastError() != WSAEINPROGRESS && 621 WSAGetLastError() != WSAEWOULDBLOCK) { 622 closesocket(s); 623 #endif 624 return 0; 625 } 626 } 627 #ifdef USE_MSG_FASTOPEN 628 } 629 #endif /* USE_MSG_FASTOPEN */ 630 #ifdef USE_OSX_MSG_FASTOPEN 631 } 632 } 633 #endif /* USE_OSX_MSG_FASTOPEN */ 634 if(w->outnet->sslctx && w->ssl_upstream) { 635 pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s); 636 if(!pend->c->ssl) { 637 pend->c->fd = s; 638 comm_point_close(pend->c); 639 return 0; 640 } 641 verbose(VERB_ALGO, "the query is using TLS encryption, for %s", 642 (w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection")); 643 #ifdef USE_WINSOCK 644 comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl); 645 #endif 646 pend->c->ssl_shake_state = comm_ssl_shake_write; 647 if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name, 648 w->outnet->tls_use_sni)) { 649 pend->c->fd = s; 650 #ifdef HAVE_SSL 651 SSL_free(pend->c->ssl); 652 #endif 653 pend->c->ssl = NULL; 654 comm_point_close(pend->c); 655 return 0; 656 } 657 } 658 w->next_waiting = (void*)pend; 659 w->outnet->num_tcp_outgoing++; 660 w->outnet->tcp_free = pend->next_free; 661 pend->next_free = NULL; 662 pend->query = w; 663 pend->reuse.outnet = w->outnet; 664 pend->c->repinfo.addrlen = w->addrlen; 665 pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again; 666 pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again; 667 pend->reuse.cp_more_read_again = 0; 668 pend->reuse.cp_more_write_again = 0; 669 memcpy(&pend->c->repinfo.addr, &w->addr, w->addrlen); 670 pend->reuse.pending = pend; 671 672 /* Remove from tree in case the is_ssl will be different and causes the 673 * identity of the reuse_tcp to change; could result in nodes not being 674 * deleted from the tree (because the new identity does not match the 675 * previous node) but their ->key would be changed to NULL. */ 676 if(pend->reuse.node.key) 677 reuse_tcp_remove_tree_list(w->outnet, &pend->reuse); 678 679 if(pend->c->ssl) 680 pend->reuse.is_ssl = 1; 681 else pend->reuse.is_ssl = 0; 682 /* insert in reuse by address tree if not already inserted there */ 683 (void)reuse_tcp_insert(w->outnet, pend); 684 reuse_tree_by_id_insert(&pend->reuse, w); 685 outnet_tcp_take_query_setup(s, pend, w); 686 return 1; 687 } 688 689 /** Touch the lru of a reuse_tcp element, it is in use. 690 * This moves it to the front of the list, where it is not likely to 691 * be closed. Items at the back of the list are closed to make space. */ 692 static void 693 reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse) 694 { 695 if(!reuse->item_on_lru_list) { 696 log_err("internal error: we need to touch the lru_list but item not in list"); 697 return; /* not on the list, no lru to modify */ 698 } 699 if(!reuse->lru_prev) 700 return; /* already first in the list */ 701 /* remove at current position */ 702 /* since it is not first, there is a previous element */ 703 reuse->lru_prev->lru_next = reuse->lru_next; 704 if(reuse->lru_next) 705 reuse->lru_next->lru_prev = reuse->lru_prev; 706 else outnet->tcp_reuse_last = reuse->lru_prev; 707 /* insert at the front */ 708 reuse->lru_prev = NULL; 709 reuse->lru_next = outnet->tcp_reuse_first; 710 /* since it is not first, it is not the only element and 711 * lru_next is thus not NULL and thus reuse is now not the last in 712 * the list, so outnet->tcp_reuse_last does not need to be modified */ 713 outnet->tcp_reuse_first = reuse; 714 } 715 716 /** call callback on waiting_tcp, if not NULL */ 717 static void 718 waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error, 719 struct comm_reply* reply_info) 720 { 721 if(w->cb) { 722 fptr_ok(fptr_whitelist_pending_tcp(w->cb)); 723 (void)(*w->cb)(c, w->cb_arg, error, reply_info); 724 } 725 } 726 727 /** see if buffers can be used to service TCP queries */ 728 static void 729 use_free_buffer(struct outside_network* outnet) 730 { 731 struct waiting_tcp* w; 732 while(outnet->tcp_free && outnet->tcp_wait_first 733 && !outnet->want_to_quit) { 734 struct reuse_tcp* reuse = NULL; 735 w = outnet->tcp_wait_first; 736 outnet->tcp_wait_first = w->next_waiting; 737 if(outnet->tcp_wait_last == w) 738 outnet->tcp_wait_last = NULL; 739 w->on_tcp_waiting_list = 0; 740 reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen, 741 w->ssl_upstream); 742 if(reuse) { 743 log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: " 744 "found reuse", reuse); 745 reuse_tcp_lru_touch(outnet, reuse); 746 comm_timer_disable(w->timer); 747 w->next_waiting = (void*)reuse->pending; 748 reuse_tree_by_id_insert(reuse, w); 749 if(reuse->pending->query) { 750 /* on the write wait list */ 751 reuse_write_wait_push_back(reuse, w); 752 } else { 753 /* write straight away */ 754 /* stop the timer on read of the fd */ 755 comm_point_stop_listening(reuse->pending->c); 756 reuse->pending->query = w; 757 outnet_tcp_take_query_setup( 758 reuse->pending->c->fd, reuse->pending, 759 w); 760 } 761 } else { 762 struct pending_tcp* pend = w->outnet->tcp_free; 763 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 764 pend->reuse.pending = pend; 765 memcpy(&pend->reuse.addr, &w->addr, w->addrlen); 766 pend->reuse.addrlen = w->addrlen; 767 if(!outnet_tcp_take_into_use(w)) { 768 waiting_tcp_callback(w, NULL, NETEVENT_CLOSED, 769 NULL); 770 waiting_tcp_delete(w); 771 } 772 } 773 } 774 } 775 776 /** add waiting_tcp element to the outnet tcp waiting list */ 777 static void 778 outnet_add_tcp_waiting(struct outside_network* outnet, struct waiting_tcp* w) 779 { 780 struct timeval tv; 781 if(w->on_tcp_waiting_list) 782 return; 783 w->next_waiting = NULL; 784 if(outnet->tcp_wait_last) 785 outnet->tcp_wait_last->next_waiting = w; 786 else outnet->tcp_wait_first = w; 787 outnet->tcp_wait_last = w; 788 w->on_tcp_waiting_list = 1; 789 #ifndef S_SPLINT_S 790 tv.tv_sec = w->timeout/1000; 791 tv.tv_usec = (w->timeout%1000)*1000; 792 #endif 793 comm_timer_set(w->timer, &tv); 794 } 795 796 /** delete element from tree by id */ 797 static void 798 reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w) 799 { 800 log_assert(w->id_node.key != NULL); 801 rbtree_delete(&reuse->tree_by_id, w); 802 w->id_node.key = NULL; 803 } 804 805 /** move writewait list to go for another connection. */ 806 static void 807 reuse_move_writewait_away(struct outside_network* outnet, 808 struct pending_tcp* pend) 809 { 810 /* the writewait list has not been written yet, so if the 811 * stream was closed, they have not actually been failed, only 812 * the queries written. Other queries can get written to another 813 * stream. For upstreams that do not support multiple queries 814 * and answers, the stream can get closed, and then the queries 815 * can get written on a new socket */ 816 struct waiting_tcp* w; 817 if(pend->query && pend->query->error_count == 0 && 818 pend->c->tcp_write_pkt == pend->query->pkt && 819 pend->c->tcp_write_pkt_len == pend->query->pkt_len) { 820 /* since the current query is not written, it can also 821 * move to a free buffer */ 822 if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 && 823 LDNS_QDCOUNT(pend->query->pkt) > 0 && 824 dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) { 825 char buf[LDNS_MAX_DOMAINLEN+1]; 826 dname_str(pend->query->pkt+12, buf); 827 verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written", 828 buf, (int)pend->c->tcp_write_byte_count); 829 } 830 pend->c->tcp_write_pkt = NULL; 831 pend->c->tcp_write_pkt_len = 0; 832 pend->c->tcp_write_and_read = 0; 833 pend->reuse.cp_more_read_again = 0; 834 pend->reuse.cp_more_write_again = 0; 835 pend->c->tcp_is_reading = 1; 836 w = pend->query; 837 pend->query = NULL; 838 /* increase error count, so that if the next socket fails too 839 * the server selection is run again with this query failed 840 * and it can select a different server (if possible), or 841 * fail the query */ 842 w->error_count ++; 843 reuse_tree_by_id_delete(&pend->reuse, w); 844 outnet_add_tcp_waiting(outnet, w); 845 } 846 while((w = reuse_write_wait_pop(&pend->reuse)) != NULL) { 847 if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 && 848 LDNS_QDCOUNT(w->pkt) > 0 && 849 dname_valid(w->pkt+12, w->pkt_len-12)) { 850 char buf[LDNS_MAX_DOMAINLEN+1]; 851 dname_str(w->pkt+12, buf); 852 verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf); 853 } 854 reuse_tree_by_id_delete(&pend->reuse, w); 855 outnet_add_tcp_waiting(outnet, w); 856 } 857 } 858 859 /** remove reused element from tree and lru list */ 860 static void 861 reuse_tcp_remove_tree_list(struct outside_network* outnet, 862 struct reuse_tcp* reuse) 863 { 864 verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list"); 865 if(reuse->node.key) { 866 /* delete it from reuse tree */ 867 (void)rbtree_delete(&outnet->tcp_reuse, reuse); 868 reuse->node.key = NULL; 869 } 870 /* delete from reuse list */ 871 if(reuse->item_on_lru_list) { 872 if(reuse->lru_prev) { 873 /* assert that members of the lru list are waiting 874 * and thus have a pending pointer to the struct */ 875 log_assert(reuse->lru_prev->pending); 876 reuse->lru_prev->lru_next = reuse->lru_next; 877 } else { 878 log_assert(!reuse->lru_next || reuse->lru_next->pending); 879 outnet->tcp_reuse_first = reuse->lru_next; 880 } 881 if(reuse->lru_next) { 882 /* assert that members of the lru list are waiting 883 * and thus have a pending pointer to the struct */ 884 log_assert(reuse->lru_next->pending); 885 reuse->lru_next->lru_prev = reuse->lru_prev; 886 } else { 887 log_assert(!reuse->lru_prev || reuse->lru_prev->pending); 888 outnet->tcp_reuse_last = reuse->lru_prev; 889 } 890 reuse->item_on_lru_list = 0; 891 } 892 } 893 894 /** helper function that deletes an element from the tree of readwait 895 * elements in tcp reuse structure */ 896 static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg)) 897 { 898 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 899 waiting_tcp_delete(w); 900 } 901 902 /** delete readwait waiting_tcp elements, deletes the elements in the list */ 903 void reuse_del_readwait(rbtree_type* tree_by_id) 904 { 905 if(tree_by_id->root == NULL || 906 tree_by_id->root == RBTREE_NULL) 907 return; 908 traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL); 909 rbtree_init(tree_by_id, reuse_id_cmp); 910 } 911 912 /** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */ 913 static void 914 decommission_pending_tcp(struct outside_network* outnet, 915 struct pending_tcp* pend) 916 { 917 verbose(VERB_CLIENT, "decommission_pending_tcp"); 918 pend->next_free = outnet->tcp_free; 919 outnet->tcp_free = pend; 920 if(pend->reuse.node.key) { 921 /* needs unlink from the reuse tree to get deleted */ 922 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 923 } 924 /* free SSL structure after remove from outnet tcp reuse tree, 925 * because the c->ssl null or not is used for sorting in the tree */ 926 if(pend->c->ssl) { 927 #ifdef HAVE_SSL 928 SSL_shutdown(pend->c->ssl); 929 SSL_free(pend->c->ssl); 930 pend->c->ssl = NULL; 931 #endif 932 } 933 comm_point_close(pend->c); 934 pend->reuse.cp_more_read_again = 0; 935 pend->reuse.cp_more_write_again = 0; 936 /* unlink the query and writewait list, it is part of the tree 937 * nodes and is deleted */ 938 pend->query = NULL; 939 pend->reuse.write_wait_first = NULL; 940 pend->reuse.write_wait_last = NULL; 941 reuse_del_readwait(&pend->reuse.tree_by_id); 942 } 943 944 /** perform failure callbacks for waiting queries in reuse read rbtree */ 945 static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err) 946 { 947 rbnode_type* node; 948 if(tree_by_id->root == NULL || 949 tree_by_id->root == RBTREE_NULL) 950 return; 951 node = rbtree_first(tree_by_id); 952 while(node && node != RBTREE_NULL) { 953 struct waiting_tcp* w = (struct waiting_tcp*)node->key; 954 waiting_tcp_callback(w, NULL, err, NULL); 955 node = rbtree_next(node); 956 } 957 } 958 959 /** perform callbacks for failure and also decommission pending tcp. 960 * the callbacks remove references in sq->pending to the waiting_tcp 961 * members of the tree_by_id in the pending tcp. The pending_tcp is 962 * removed before the callbacks, so that the callbacks do not modify 963 * the pending_tcp due to its reference in the outside_network reuse tree */ 964 static void reuse_cb_and_decommission(struct outside_network* outnet, 965 struct pending_tcp* pend, int error) 966 { 967 rbtree_type store; 968 store = pend->reuse.tree_by_id; 969 pend->query = NULL; 970 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 971 pend->reuse.write_wait_first = NULL; 972 pend->reuse.write_wait_last = NULL; 973 decommission_pending_tcp(outnet, pend); 974 reuse_cb_readwait_for_failure(&store, error); 975 reuse_del_readwait(&store); 976 } 977 978 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 979 static void 980 reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp) 981 { 982 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse); 983 comm_point_start_listening(pend_tcp->c, -1, REUSE_TIMEOUT); 984 } 985 986 /** set timeout on tcp fd and setup read event to catch incoming dns msgs */ 987 static void 988 reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp) 989 { 990 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse); 991 sldns_buffer_clear(pend_tcp->c->buffer); 992 pend_tcp->c->tcp_is_reading = 1; 993 pend_tcp->c->tcp_byte_count = 0; 994 comm_point_stop_listening(pend_tcp->c); 995 comm_point_start_listening(pend_tcp->c, -1, REUSE_TIMEOUT); 996 } 997 998 int 999 outnet_tcp_cb(struct comm_point* c, void* arg, int error, 1000 struct comm_reply *reply_info) 1001 { 1002 struct pending_tcp* pend = (struct pending_tcp*)arg; 1003 struct outside_network* outnet = pend->reuse.outnet; 1004 struct waiting_tcp* w = NULL; 1005 verbose(VERB_ALGO, "outnettcp cb"); 1006 if(error == NETEVENT_TIMEOUT) { 1007 if(pend->c->tcp_write_and_read) { 1008 verbose(VERB_QUERY, "outnettcp got tcp timeout " 1009 "for read, ignored because write underway"); 1010 /* if we are writing, ignore readtimer, wait for write timer 1011 * or write is done */ 1012 return 0; 1013 } else { 1014 verbose(VERB_QUERY, "outnettcp got tcp timeout %s", 1015 (pend->reuse.tree_by_id.count?"for reading pkt": 1016 "for keepalive for reuse")); 1017 } 1018 /* must be timeout for reading or keepalive reuse, 1019 * close it. */ 1020 reuse_tcp_remove_tree_list(outnet, &pend->reuse); 1021 } else if(error == NETEVENT_PKT_WRITTEN) { 1022 /* the packet we want to write has been written. */ 1023 verbose(VERB_ALGO, "outnet tcp pkt was written event"); 1024 log_assert(c == pend->c); 1025 log_assert(pend->query->pkt == pend->c->tcp_write_pkt); 1026 log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len); 1027 pend->c->tcp_write_pkt = NULL; 1028 pend->c->tcp_write_pkt_len = 0; 1029 /* the pend.query is already in tree_by_id */ 1030 log_assert(pend->query->id_node.key); 1031 pend->query = NULL; 1032 /* setup to write next packet or setup read timeout */ 1033 if(pend->reuse.write_wait_first) { 1034 verbose(VERB_ALGO, "outnet tcp setup next pkt"); 1035 /* we can write it straight away perhaps, set flag 1036 * because this callback called after a tcp write 1037 * succeeded and likely more buffer space is available 1038 * and we can write some more. */ 1039 pend->reuse.cp_more_write_again = 1; 1040 pend->query = reuse_write_wait_pop(&pend->reuse); 1041 comm_point_stop_listening(pend->c); 1042 outnet_tcp_take_query_setup(pend->c->fd, pend, 1043 pend->query); 1044 } else { 1045 verbose(VERB_ALGO, "outnet tcp writes done, wait"); 1046 pend->c->tcp_write_and_read = 0; 1047 pend->reuse.cp_more_read_again = 0; 1048 pend->reuse.cp_more_write_again = 0; 1049 pend->c->tcp_is_reading = 1; 1050 comm_point_stop_listening(pend->c); 1051 reuse_tcp_setup_timeout(pend); 1052 } 1053 return 0; 1054 } else if(error != NETEVENT_NOERROR) { 1055 verbose(VERB_QUERY, "outnettcp got tcp error %d", error); 1056 reuse_move_writewait_away(outnet, pend); 1057 /* pass error below and exit */ 1058 } else { 1059 /* check ID */ 1060 if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) { 1061 log_addr(VERB_QUERY, 1062 "outnettcp: bad ID in reply, too short, from:", 1063 &pend->reuse.addr, pend->reuse.addrlen); 1064 error = NETEVENT_CLOSED; 1065 } else { 1066 uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin( 1067 c->buffer)); 1068 /* find the query the reply is for */ 1069 w = reuse_tcp_by_id_find(&pend->reuse, id); 1070 } 1071 } 1072 if(error == NETEVENT_NOERROR && !w) { 1073 /* no struct waiting found in tree, no reply to call */ 1074 log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:", 1075 &pend->reuse.addr, pend->reuse.addrlen); 1076 error = NETEVENT_CLOSED; 1077 } 1078 if(error == NETEVENT_NOERROR) { 1079 /* add to reuse tree so it can be reused, if not a failure. 1080 * This is possible if the state machine wants to make a tcp 1081 * query again to the same destination. */ 1082 if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) { 1083 (void)reuse_tcp_insert(outnet, pend); 1084 } 1085 } 1086 if(w) { 1087 reuse_tree_by_id_delete(&pend->reuse, w); 1088 verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d", 1089 error, (int)sldns_buffer_limit(c->buffer)); 1090 waiting_tcp_callback(w, c, error, reply_info); 1091 waiting_tcp_delete(w); 1092 } 1093 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb"); 1094 if(error == NETEVENT_NOERROR && pend->reuse.node.key) { 1095 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it"); 1096 /* it is in the reuse_tcp tree, with other queries, or 1097 * on the empty list. do not decommission it */ 1098 /* if there are more outstanding queries, we could try to 1099 * read again, to see if it is on the input, 1100 * because this callback called after a successful read 1101 * and there could be more bytes to read on the input */ 1102 if(pend->reuse.tree_by_id.count != 0) 1103 pend->reuse.cp_more_read_again = 1; 1104 reuse_tcp_setup_read_and_timeout(pend); 1105 return 0; 1106 } 1107 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it"); 1108 /* no queries on it, no space to keep it. or timeout or closed due 1109 * to error. Close it */ 1110 reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT? 1111 NETEVENT_TIMEOUT:NETEVENT_CLOSED)); 1112 use_free_buffer(outnet); 1113 return 0; 1114 } 1115 1116 /** lower use count on pc, see if it can be closed */ 1117 static void 1118 portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc) 1119 { 1120 struct port_if* pif; 1121 pc->num_outstanding--; 1122 if(pc->num_outstanding > 0) { 1123 return; 1124 } 1125 /* close it and replace in unused list */ 1126 verbose(VERB_ALGO, "close of port %d", pc->number); 1127 comm_point_close(pc->cp); 1128 pif = pc->pif; 1129 log_assert(pif->inuse > 0); 1130 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1131 pif->avail_ports[pif->avail_total - pif->inuse] = pc->number; 1132 #endif 1133 pif->inuse--; 1134 pif->out[pc->index] = pif->out[pif->inuse]; 1135 pif->out[pc->index]->index = pc->index; 1136 pc->next = outnet->unused_fds; 1137 outnet->unused_fds = pc; 1138 } 1139 1140 /** try to send waiting UDP queries */ 1141 static void 1142 outnet_send_wait_udp(struct outside_network* outnet) 1143 { 1144 struct pending* pend; 1145 /* process waiting queries */ 1146 while(outnet->udp_wait_first && outnet->unused_fds 1147 && !outnet->want_to_quit) { 1148 pend = outnet->udp_wait_first; 1149 outnet->udp_wait_first = pend->next_waiting; 1150 if(!pend->next_waiting) outnet->udp_wait_last = NULL; 1151 sldns_buffer_clear(outnet->udp_buff); 1152 sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len); 1153 sldns_buffer_flip(outnet->udp_buff); 1154 free(pend->pkt); /* freeing now makes get_mem correct */ 1155 pend->pkt = NULL; 1156 pend->pkt_len = 0; 1157 if(!randomize_and_send_udp(pend, outnet->udp_buff, 1158 pend->timeout)) { 1159 /* callback error on pending */ 1160 if(pend->cb) { 1161 fptr_ok(fptr_whitelist_pending_udp(pend->cb)); 1162 (void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg, 1163 NETEVENT_CLOSED, NULL); 1164 } 1165 pending_delete(outnet, pend); 1166 } 1167 } 1168 } 1169 1170 int 1171 outnet_udp_cb(struct comm_point* c, void* arg, int error, 1172 struct comm_reply *reply_info) 1173 { 1174 struct outside_network* outnet = (struct outside_network*)arg; 1175 struct pending key; 1176 struct pending* p; 1177 verbose(VERB_ALGO, "answer cb"); 1178 1179 if(error != NETEVENT_NOERROR) { 1180 verbose(VERB_QUERY, "outnetudp got udp error %d", error); 1181 return 0; 1182 } 1183 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE) { 1184 verbose(VERB_QUERY, "outnetudp udp too short"); 1185 return 0; 1186 } 1187 log_assert(reply_info); 1188 1189 /* setup lookup key */ 1190 key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer)); 1191 memcpy(&key.addr, &reply_info->addr, reply_info->addrlen); 1192 key.addrlen = reply_info->addrlen; 1193 verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id); 1194 log_addr(VERB_ALGO, "Incoming reply addr =", 1195 &reply_info->addr, reply_info->addrlen); 1196 1197 /* find it, see if this thing is a valid query response */ 1198 verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count); 1199 p = (struct pending*)rbtree_search(outnet->pending, &key); 1200 if(!p) { 1201 verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped."); 1202 log_buf(VERB_ALGO, "dropped message", c->buffer); 1203 outnet->unwanted_replies++; 1204 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1205 >= outnet->unwanted_threshold) { 1206 log_warn("unwanted reply total reached threshold (%u)" 1207 " you may be under attack." 1208 " defensive action: clearing the cache", 1209 (unsigned)outnet->unwanted_threshold); 1210 fptr_ok(fptr_whitelist_alloc_cleanup( 1211 outnet->unwanted_action)); 1212 (*outnet->unwanted_action)(outnet->unwanted_param); 1213 outnet->unwanted_total = 0; 1214 } 1215 return 0; 1216 } 1217 1218 verbose(VERB_ALGO, "received udp reply."); 1219 log_buf(VERB_ALGO, "udp message", c->buffer); 1220 if(p->pc->cp != c) { 1221 verbose(VERB_QUERY, "received reply id,addr on wrong port. " 1222 "dropped."); 1223 outnet->unwanted_replies++; 1224 if(outnet->unwanted_threshold && ++outnet->unwanted_total 1225 >= outnet->unwanted_threshold) { 1226 log_warn("unwanted reply total reached threshold (%u)" 1227 " you may be under attack." 1228 " defensive action: clearing the cache", 1229 (unsigned)outnet->unwanted_threshold); 1230 fptr_ok(fptr_whitelist_alloc_cleanup( 1231 outnet->unwanted_action)); 1232 (*outnet->unwanted_action)(outnet->unwanted_param); 1233 outnet->unwanted_total = 0; 1234 } 1235 return 0; 1236 } 1237 comm_timer_disable(p->timer); 1238 verbose(VERB_ALGO, "outnet handle udp reply"); 1239 /* delete from tree first in case callback creates a retry */ 1240 (void)rbtree_delete(outnet->pending, p->node.key); 1241 if(p->cb) { 1242 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1243 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR, reply_info); 1244 } 1245 portcomm_loweruse(outnet, p->pc); 1246 pending_delete(NULL, p); 1247 outnet_send_wait_udp(outnet); 1248 return 0; 1249 } 1250 1251 /** calculate number of ip4 and ip6 interfaces*/ 1252 static void 1253 calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6, 1254 int* num_ip4, int* num_ip6) 1255 { 1256 int i; 1257 *num_ip4 = 0; 1258 *num_ip6 = 0; 1259 if(num_ifs <= 0) { 1260 if(do_ip4) 1261 *num_ip4 = 1; 1262 if(do_ip6) 1263 *num_ip6 = 1; 1264 return; 1265 } 1266 for(i=0; i<num_ifs; i++) 1267 { 1268 if(str_is_ip6(ifs[i])) { 1269 if(do_ip6) 1270 (*num_ip6)++; 1271 } else { 1272 if(do_ip4) 1273 (*num_ip4)++; 1274 } 1275 } 1276 1277 } 1278 1279 void 1280 pending_udp_timer_delay_cb(void* arg) 1281 { 1282 struct pending* p = (struct pending*)arg; 1283 struct outside_network* outnet = p->outnet; 1284 verbose(VERB_ALGO, "timeout udp with delay"); 1285 portcomm_loweruse(outnet, p->pc); 1286 pending_delete(outnet, p); 1287 outnet_send_wait_udp(outnet); 1288 } 1289 1290 void 1291 pending_udp_timer_cb(void *arg) 1292 { 1293 struct pending* p = (struct pending*)arg; 1294 struct outside_network* outnet = p->outnet; 1295 /* it timed out */ 1296 verbose(VERB_ALGO, "timeout udp"); 1297 if(p->cb) { 1298 fptr_ok(fptr_whitelist_pending_udp(p->cb)); 1299 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT, NULL); 1300 } 1301 /* if delayclose, keep port open for a longer time. 1302 * But if the udpwaitlist exists, then we are struggling to 1303 * keep up with demand for sockets, so do not wait, but service 1304 * the customer (customer service more important than portICMPs) */ 1305 if(outnet->delayclose && !outnet->udp_wait_first) { 1306 p->cb = NULL; 1307 p->timer->callback = &pending_udp_timer_delay_cb; 1308 comm_timer_set(p->timer, &outnet->delay_tv); 1309 return; 1310 } 1311 portcomm_loweruse(outnet, p->pc); 1312 pending_delete(outnet, p); 1313 outnet_send_wait_udp(outnet); 1314 } 1315 1316 /** create pending_tcp buffers */ 1317 static int 1318 create_pending_tcp(struct outside_network* outnet, size_t bufsize) 1319 { 1320 size_t i; 1321 if(outnet->num_tcp == 0) 1322 return 1; /* no tcp needed, nothing to do */ 1323 if(!(outnet->tcp_conns = (struct pending_tcp **)calloc( 1324 outnet->num_tcp, sizeof(struct pending_tcp*)))) 1325 return 0; 1326 for(i=0; i<outnet->num_tcp; i++) { 1327 if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1, 1328 sizeof(struct pending_tcp)))) 1329 return 0; 1330 outnet->tcp_conns[i]->next_free = outnet->tcp_free; 1331 outnet->tcp_free = outnet->tcp_conns[i]; 1332 outnet->tcp_conns[i]->c = comm_point_create_tcp_out( 1333 outnet->base, bufsize, outnet_tcp_cb, 1334 outnet->tcp_conns[i]); 1335 if(!outnet->tcp_conns[i]->c) 1336 return 0; 1337 } 1338 return 1; 1339 } 1340 1341 /** setup an outgoing interface, ready address */ 1342 static int setup_if(struct port_if* pif, const char* addrstr, 1343 int* avail, int numavail, size_t numfd) 1344 { 1345 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1346 pif->avail_total = numavail; 1347 pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int)); 1348 if(!pif->avail_ports) 1349 return 0; 1350 #endif 1351 if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT, &pif->addr, &pif->addrlen) && 1352 !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT, 1353 &pif->addr, &pif->addrlen, &pif->pfxlen)) 1354 return 0; 1355 pif->maxout = (int)numfd; 1356 pif->inuse = 0; 1357 pif->out = (struct port_comm**)calloc(numfd, 1358 sizeof(struct port_comm*)); 1359 if(!pif->out) 1360 return 0; 1361 return 1; 1362 } 1363 1364 struct outside_network* 1365 outside_network_create(struct comm_base *base, size_t bufsize, 1366 size_t num_ports, char** ifs, int num_ifs, int do_ip4, 1367 int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra, 1368 struct ub_randstate* rnd, int use_caps_for_id, int* availports, 1369 int numavailports, size_t unwanted_threshold, int tcp_mss, 1370 void (*unwanted_action)(void*), void* unwanted_param, int do_udp, 1371 void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv, 1372 int udp_connect) 1373 { 1374 struct outside_network* outnet = (struct outside_network*) 1375 calloc(1, sizeof(struct outside_network)); 1376 size_t k; 1377 if(!outnet) { 1378 log_err("malloc failed"); 1379 return NULL; 1380 } 1381 comm_base_timept(base, &outnet->now_secs, &outnet->now_tv); 1382 outnet->base = base; 1383 outnet->num_tcp = num_tcp; 1384 outnet->num_tcp_outgoing = 0; 1385 outnet->infra = infra; 1386 outnet->rnd = rnd; 1387 outnet->sslctx = sslctx; 1388 outnet->tls_use_sni = tls_use_sni; 1389 #ifdef USE_DNSTAP 1390 outnet->dtenv = dtenv; 1391 #else 1392 (void)dtenv; 1393 #endif 1394 outnet->svcd_overhead = 0; 1395 outnet->want_to_quit = 0; 1396 outnet->unwanted_threshold = unwanted_threshold; 1397 outnet->unwanted_action = unwanted_action; 1398 outnet->unwanted_param = unwanted_param; 1399 outnet->use_caps_for_id = use_caps_for_id; 1400 outnet->do_udp = do_udp; 1401 outnet->tcp_mss = tcp_mss; 1402 outnet->ip_dscp = dscp; 1403 #ifndef S_SPLINT_S 1404 if(delayclose) { 1405 outnet->delayclose = 1; 1406 outnet->delay_tv.tv_sec = delayclose/1000; 1407 outnet->delay_tv.tv_usec = (delayclose%1000)*1000; 1408 } 1409 #endif 1410 if(udp_connect) { 1411 outnet->udp_connect = 1; 1412 } 1413 if(numavailports == 0 || num_ports == 0) { 1414 log_err("no outgoing ports available"); 1415 outside_network_delete(outnet); 1416 return NULL; 1417 } 1418 #ifndef INET6 1419 do_ip6 = 0; 1420 #endif 1421 calc_num46(ifs, num_ifs, do_ip4, do_ip6, 1422 &outnet->num_ip4, &outnet->num_ip6); 1423 if(outnet->num_ip4 != 0) { 1424 if(!(outnet->ip4_ifs = (struct port_if*)calloc( 1425 (size_t)outnet->num_ip4, sizeof(struct port_if)))) { 1426 log_err("malloc failed"); 1427 outside_network_delete(outnet); 1428 return NULL; 1429 } 1430 } 1431 if(outnet->num_ip6 != 0) { 1432 if(!(outnet->ip6_ifs = (struct port_if*)calloc( 1433 (size_t)outnet->num_ip6, sizeof(struct port_if)))) { 1434 log_err("malloc failed"); 1435 outside_network_delete(outnet); 1436 return NULL; 1437 } 1438 } 1439 if( !(outnet->udp_buff = sldns_buffer_new(bufsize)) || 1440 !(outnet->pending = rbtree_create(pending_cmp)) || 1441 !(outnet->serviced = rbtree_create(serviced_cmp)) || 1442 !create_pending_tcp(outnet, bufsize)) { 1443 log_err("malloc failed"); 1444 outside_network_delete(outnet); 1445 return NULL; 1446 } 1447 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1448 outnet->tcp_reuse_max = num_tcp; 1449 1450 /* allocate commpoints */ 1451 for(k=0; k<num_ports; k++) { 1452 struct port_comm* pc; 1453 pc = (struct port_comm*)calloc(1, sizeof(*pc)); 1454 if(!pc) { 1455 log_err("malloc failed"); 1456 outside_network_delete(outnet); 1457 return NULL; 1458 } 1459 pc->cp = comm_point_create_udp(outnet->base, -1, 1460 outnet->udp_buff, outnet_udp_cb, outnet); 1461 if(!pc->cp) { 1462 log_err("malloc failed"); 1463 free(pc); 1464 outside_network_delete(outnet); 1465 return NULL; 1466 } 1467 pc->next = outnet->unused_fds; 1468 outnet->unused_fds = pc; 1469 } 1470 1471 /* allocate interfaces */ 1472 if(num_ifs == 0) { 1473 if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0", 1474 availports, numavailports, num_ports)) { 1475 log_err("malloc failed"); 1476 outside_network_delete(outnet); 1477 return NULL; 1478 } 1479 if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::", 1480 availports, numavailports, num_ports)) { 1481 log_err("malloc failed"); 1482 outside_network_delete(outnet); 1483 return NULL; 1484 } 1485 } else { 1486 size_t done_4 = 0, done_6 = 0; 1487 int i; 1488 for(i=0; i<num_ifs; i++) { 1489 if(str_is_ip6(ifs[i]) && do_ip6) { 1490 if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i], 1491 availports, numavailports, num_ports)){ 1492 log_err("malloc failed"); 1493 outside_network_delete(outnet); 1494 return NULL; 1495 } 1496 done_6++; 1497 } 1498 if(!str_is_ip6(ifs[i]) && do_ip4) { 1499 if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i], 1500 availports, numavailports, num_ports)){ 1501 log_err("malloc failed"); 1502 outside_network_delete(outnet); 1503 return NULL; 1504 } 1505 done_4++; 1506 } 1507 } 1508 } 1509 return outnet; 1510 } 1511 1512 /** helper pending delete */ 1513 static void 1514 pending_node_del(rbnode_type* node, void* arg) 1515 { 1516 struct pending* pend = (struct pending*)node; 1517 struct outside_network* outnet = (struct outside_network*)arg; 1518 pending_delete(outnet, pend); 1519 } 1520 1521 /** helper serviced delete */ 1522 static void 1523 serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg)) 1524 { 1525 struct serviced_query* sq = (struct serviced_query*)node; 1526 struct service_callback* p = sq->cblist, *np; 1527 free(sq->qbuf); 1528 free(sq->zone); 1529 free(sq->tls_auth_name); 1530 edns_opt_list_free(sq->opt_list); 1531 while(p) { 1532 np = p->next; 1533 free(p); 1534 p = np; 1535 } 1536 free(sq); 1537 } 1538 1539 void 1540 outside_network_quit_prepare(struct outside_network* outnet) 1541 { 1542 if(!outnet) 1543 return; 1544 /* prevent queued items from being sent */ 1545 outnet->want_to_quit = 1; 1546 } 1547 1548 void 1549 outside_network_delete(struct outside_network* outnet) 1550 { 1551 if(!outnet) 1552 return; 1553 outnet->want_to_quit = 1; 1554 /* check every element, since we can be called on malloc error */ 1555 if(outnet->pending) { 1556 /* free pending elements, but do no unlink from tree. */ 1557 traverse_postorder(outnet->pending, pending_node_del, NULL); 1558 free(outnet->pending); 1559 } 1560 if(outnet->serviced) { 1561 traverse_postorder(outnet->serviced, serviced_node_del, NULL); 1562 free(outnet->serviced); 1563 } 1564 if(outnet->udp_buff) 1565 sldns_buffer_free(outnet->udp_buff); 1566 if(outnet->unused_fds) { 1567 struct port_comm* p = outnet->unused_fds, *np; 1568 while(p) { 1569 np = p->next; 1570 comm_point_delete(p->cp); 1571 free(p); 1572 p = np; 1573 } 1574 outnet->unused_fds = NULL; 1575 } 1576 if(outnet->ip4_ifs) { 1577 int i, k; 1578 for(i=0; i<outnet->num_ip4; i++) { 1579 for(k=0; k<outnet->ip4_ifs[i].inuse; k++) { 1580 struct port_comm* pc = outnet->ip4_ifs[i]. 1581 out[k]; 1582 comm_point_delete(pc->cp); 1583 free(pc); 1584 } 1585 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1586 free(outnet->ip4_ifs[i].avail_ports); 1587 #endif 1588 free(outnet->ip4_ifs[i].out); 1589 } 1590 free(outnet->ip4_ifs); 1591 } 1592 if(outnet->ip6_ifs) { 1593 int i, k; 1594 for(i=0; i<outnet->num_ip6; i++) { 1595 for(k=0; k<outnet->ip6_ifs[i].inuse; k++) { 1596 struct port_comm* pc = outnet->ip6_ifs[i]. 1597 out[k]; 1598 comm_point_delete(pc->cp); 1599 free(pc); 1600 } 1601 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1602 free(outnet->ip6_ifs[i].avail_ports); 1603 #endif 1604 free(outnet->ip6_ifs[i].out); 1605 } 1606 free(outnet->ip6_ifs); 1607 } 1608 if(outnet->tcp_conns) { 1609 size_t i; 1610 for(i=0; i<outnet->num_tcp; i++) 1611 if(outnet->tcp_conns[i]) { 1612 if(outnet->tcp_conns[i]->query && 1613 !outnet->tcp_conns[i]->query-> 1614 on_tcp_waiting_list) { 1615 /* delete waiting_tcp elements that 1616 * the tcp conn is working on */ 1617 struct pending_tcp* pend = 1618 (struct pending_tcp*)outnet-> 1619 tcp_conns[i]->query-> 1620 next_waiting; 1621 decommission_pending_tcp(outnet, pend); 1622 } 1623 comm_point_delete(outnet->tcp_conns[i]->c); 1624 waiting_tcp_delete(outnet->tcp_conns[i]->query); 1625 free(outnet->tcp_conns[i]); 1626 } 1627 free(outnet->tcp_conns); 1628 } 1629 if(outnet->tcp_wait_first) { 1630 struct waiting_tcp* p = outnet->tcp_wait_first, *np; 1631 while(p) { 1632 np = p->next_waiting; 1633 waiting_tcp_delete(p); 1634 p = np; 1635 } 1636 } 1637 /* was allocated in struct pending that was deleted above */ 1638 rbtree_init(&outnet->tcp_reuse, reuse_cmp); 1639 outnet->tcp_reuse_first = NULL; 1640 outnet->tcp_reuse_last = NULL; 1641 if(outnet->udp_wait_first) { 1642 struct pending* p = outnet->udp_wait_first, *np; 1643 while(p) { 1644 np = p->next_waiting; 1645 pending_delete(NULL, p); 1646 p = np; 1647 } 1648 } 1649 free(outnet); 1650 } 1651 1652 void 1653 pending_delete(struct outside_network* outnet, struct pending* p) 1654 { 1655 if(!p) 1656 return; 1657 if(outnet && outnet->udp_wait_first && 1658 (p->next_waiting || p == outnet->udp_wait_last) ) { 1659 /* delete from waiting list, if it is in the waiting list */ 1660 struct pending* prev = NULL, *x = outnet->udp_wait_first; 1661 while(x && x != p) { 1662 prev = x; 1663 x = x->next_waiting; 1664 } 1665 if(x) { 1666 log_assert(x == p); 1667 if(prev) 1668 prev->next_waiting = p->next_waiting; 1669 else outnet->udp_wait_first = p->next_waiting; 1670 if(outnet->udp_wait_last == p) 1671 outnet->udp_wait_last = prev; 1672 } 1673 } 1674 if(outnet) { 1675 (void)rbtree_delete(outnet->pending, p->node.key); 1676 } 1677 if(p->timer) 1678 comm_timer_delete(p->timer); 1679 free(p->pkt); 1680 free(p); 1681 } 1682 1683 static void 1684 sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd) 1685 { 1686 int i, last; 1687 if(!(pfxlen > 0 && pfxlen < 128)) 1688 return; 1689 for(i = 0; i < (128 - pfxlen) / 8; i++) { 1690 sa->sin6_addr.s6_addr[15-i] = (uint8_t)ub_random_max(rnd, 256); 1691 } 1692 last = pfxlen & 7; 1693 if(last != 0) { 1694 sa->sin6_addr.s6_addr[15-i] |= 1695 ((0xFF >> last) & ub_random_max(rnd, 256)); 1696 } 1697 } 1698 1699 /** 1700 * Try to open a UDP socket for outgoing communication. 1701 * Sets sockets options as needed. 1702 * @param addr: socket address. 1703 * @param addrlen: length of address. 1704 * @param pfxlen: length of network prefix (for address randomisation). 1705 * @param port: port override for addr. 1706 * @param inuse: if -1 is returned, this bool means the port was in use. 1707 * @param rnd: random state (for address randomisation). 1708 * @param dscp: DSCP to use. 1709 * @return fd or -1 1710 */ 1711 static int 1712 udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen, 1713 int port, int* inuse, struct ub_randstate* rnd, int dscp) 1714 { 1715 int fd, noproto; 1716 if(addr_is_ip6(addr, addrlen)) { 1717 int freebind = 0; 1718 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 1719 sa.sin6_port = (in_port_t)htons((uint16_t)port); 1720 sa.sin6_flowinfo = 0; 1721 sa.sin6_scope_id = 0; 1722 if(pfxlen != 0) { 1723 freebind = 1; 1724 sai6_putrandom(&sa, pfxlen, rnd); 1725 } 1726 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 1727 (struct sockaddr*)&sa, addrlen, 1, inuse, &noproto, 1728 0, 0, 0, NULL, 0, freebind, 0, dscp); 1729 } else { 1730 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 1731 sa->sin_port = (in_port_t)htons((uint16_t)port); 1732 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 1733 (struct sockaddr*)addr, addrlen, 1, inuse, &noproto, 1734 0, 0, 0, NULL, 0, 0, 0, dscp); 1735 } 1736 return fd; 1737 } 1738 1739 /** Select random ID */ 1740 static int 1741 select_id(struct outside_network* outnet, struct pending* pend, 1742 sldns_buffer* packet) 1743 { 1744 int id_tries = 0; 1745 pend->id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 1746 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 1747 1748 /* insert in tree */ 1749 pend->node.key = pend; 1750 while(!rbtree_insert(outnet->pending, &pend->node)) { 1751 /* change ID to avoid collision */ 1752 pend->id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 1753 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id); 1754 id_tries++; 1755 if(id_tries == MAX_ID_RETRY) { 1756 pend->id=99999; /* non existant ID */ 1757 log_err("failed to generate unique ID, drop msg"); 1758 return 0; 1759 } 1760 } 1761 verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id); 1762 return 1; 1763 } 1764 1765 /** return true is UDP connect error needs to be logged */ 1766 static int udp_connect_needs_log(int err) 1767 { 1768 switch(err) { 1769 case ECONNREFUSED: 1770 # ifdef ENETUNREACH 1771 case ENETUNREACH: 1772 # endif 1773 # ifdef EHOSTDOWN 1774 case EHOSTDOWN: 1775 # endif 1776 # ifdef EHOSTUNREACH 1777 case EHOSTUNREACH: 1778 # endif 1779 # ifdef ENETDOWN 1780 case ENETDOWN: 1781 # endif 1782 if(verbosity >= VERB_ALGO) 1783 return 1; 1784 return 0; 1785 default: 1786 break; 1787 } 1788 return 1; 1789 } 1790 1791 1792 /** Select random interface and port */ 1793 static int 1794 select_ifport(struct outside_network* outnet, struct pending* pend, 1795 int num_if, struct port_if* ifs) 1796 { 1797 int my_if, my_port, fd, portno, inuse, tries=0; 1798 struct port_if* pif; 1799 /* randomly select interface and port */ 1800 if(num_if == 0) { 1801 verbose(VERB_QUERY, "Need to send query but have no " 1802 "outgoing interfaces of that family"); 1803 return 0; 1804 } 1805 log_assert(outnet->unused_fds); 1806 tries = 0; 1807 while(1) { 1808 my_if = ub_random_max(outnet->rnd, num_if); 1809 pif = &ifs[my_if]; 1810 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1811 if(outnet->udp_connect) { 1812 /* if we connect() we cannot reuse fds for a port */ 1813 if(pif->inuse >= pif->avail_total) { 1814 tries++; 1815 if(tries < MAX_PORT_RETRY) 1816 continue; 1817 log_err("failed to find an open port, drop msg"); 1818 return 0; 1819 } 1820 my_port = pif->inuse + ub_random_max(outnet->rnd, 1821 pif->avail_total - pif->inuse); 1822 } else { 1823 my_port = ub_random_max(outnet->rnd, pif->avail_total); 1824 if(my_port < pif->inuse) { 1825 /* port already open */ 1826 pend->pc = pif->out[my_port]; 1827 verbose(VERB_ALGO, "using UDP if=%d port=%d", 1828 my_if, pend->pc->number); 1829 break; 1830 } 1831 } 1832 /* try to open new port, if fails, loop to try again */ 1833 log_assert(pif->inuse < pif->maxout); 1834 portno = pif->avail_ports[my_port - pif->inuse]; 1835 #else 1836 my_port = portno = 0; 1837 #endif 1838 fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen, 1839 portno, &inuse, outnet->rnd, outnet->ip_dscp); 1840 if(fd == -1 && !inuse) { 1841 /* nonrecoverable error making socket */ 1842 return 0; 1843 } 1844 if(fd != -1) { 1845 verbose(VERB_ALGO, "opened UDP if=%d port=%d", 1846 my_if, portno); 1847 if(outnet->udp_connect) { 1848 /* connect() to the destination */ 1849 if(connect(fd, (struct sockaddr*)&pend->addr, 1850 pend->addrlen) < 0) { 1851 if(udp_connect_needs_log(errno)) { 1852 log_err_addr("udp connect failed", 1853 strerror(errno), &pend->addr, 1854 pend->addrlen); 1855 } 1856 sock_close(fd); 1857 return 0; 1858 } 1859 } 1860 /* grab fd */ 1861 pend->pc = outnet->unused_fds; 1862 outnet->unused_fds = pend->pc->next; 1863 1864 /* setup portcomm */ 1865 pend->pc->next = NULL; 1866 pend->pc->number = portno; 1867 pend->pc->pif = pif; 1868 pend->pc->index = pif->inuse; 1869 pend->pc->num_outstanding = 0; 1870 comm_point_start_listening(pend->pc->cp, fd, -1); 1871 1872 /* grab port in interface */ 1873 pif->out[pif->inuse] = pend->pc; 1874 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 1875 pif->avail_ports[my_port - pif->inuse] = 1876 pif->avail_ports[pif->avail_total-pif->inuse-1]; 1877 #endif 1878 pif->inuse++; 1879 break; 1880 } 1881 /* failed, already in use */ 1882 verbose(VERB_QUERY, "port %d in use, trying another", portno); 1883 tries++; 1884 if(tries == MAX_PORT_RETRY) { 1885 log_err("failed to find an open port, drop msg"); 1886 return 0; 1887 } 1888 } 1889 log_assert(pend->pc); 1890 pend->pc->num_outstanding++; 1891 1892 return 1; 1893 } 1894 1895 static int 1896 randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout) 1897 { 1898 struct timeval tv; 1899 struct outside_network* outnet = pend->sq->outnet; 1900 1901 /* select id */ 1902 if(!select_id(outnet, pend, packet)) { 1903 return 0; 1904 } 1905 1906 /* select src_if, port */ 1907 if(addr_is_ip6(&pend->addr, pend->addrlen)) { 1908 if(!select_ifport(outnet, pend, 1909 outnet->num_ip6, outnet->ip6_ifs)) 1910 return 0; 1911 } else { 1912 if(!select_ifport(outnet, pend, 1913 outnet->num_ip4, outnet->ip4_ifs)) 1914 return 0; 1915 } 1916 log_assert(pend->pc && pend->pc->cp); 1917 1918 /* send it over the commlink */ 1919 if(!comm_point_send_udp_msg(pend->pc->cp, packet, 1920 (struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) { 1921 portcomm_loweruse(outnet, pend->pc); 1922 return 0; 1923 } 1924 1925 /* system calls to set timeout after sending UDP to make roundtrip 1926 smaller. */ 1927 #ifndef S_SPLINT_S 1928 tv.tv_sec = timeout/1000; 1929 tv.tv_usec = (timeout%1000)*1000; 1930 #endif 1931 comm_timer_set(pend->timer, &tv); 1932 1933 #ifdef USE_DNSTAP 1934 if(outnet->dtenv && 1935 (outnet->dtenv->log_resolver_query_messages || 1936 outnet->dtenv->log_forwarder_query_messages)) 1937 dt_msg_send_outside_query(outnet->dtenv, &pend->addr, comm_udp, 1938 pend->sq->zone, pend->sq->zonelen, packet); 1939 #endif 1940 return 1; 1941 } 1942 1943 struct pending* 1944 pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet, 1945 int timeout, comm_point_callback_type* cb, void* cb_arg) 1946 { 1947 struct pending* pend = (struct pending*)calloc(1, sizeof(*pend)); 1948 if(!pend) return NULL; 1949 pend->outnet = sq->outnet; 1950 pend->sq = sq; 1951 pend->addrlen = sq->addrlen; 1952 memmove(&pend->addr, &sq->addr, sq->addrlen); 1953 pend->cb = cb; 1954 pend->cb_arg = cb_arg; 1955 pend->node.key = pend; 1956 pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb, 1957 pend); 1958 if(!pend->timer) { 1959 free(pend); 1960 return NULL; 1961 } 1962 1963 if(sq->outnet->unused_fds == NULL) { 1964 /* no unused fd, cannot create a new port (randomly) */ 1965 verbose(VERB_ALGO, "no fds available, udp query waiting"); 1966 pend->timeout = timeout; 1967 pend->pkt_len = sldns_buffer_limit(packet); 1968 pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet), 1969 pend->pkt_len); 1970 if(!pend->pkt) { 1971 comm_timer_delete(pend->timer); 1972 free(pend); 1973 return NULL; 1974 } 1975 /* put at end of waiting list */ 1976 if(sq->outnet->udp_wait_last) 1977 sq->outnet->udp_wait_last->next_waiting = pend; 1978 else 1979 sq->outnet->udp_wait_first = pend; 1980 sq->outnet->udp_wait_last = pend; 1981 return pend; 1982 } 1983 if(!randomize_and_send_udp(pend, packet, timeout)) { 1984 pending_delete(sq->outnet, pend); 1985 return NULL; 1986 } 1987 return pend; 1988 } 1989 1990 void 1991 outnet_tcptimer(void* arg) 1992 { 1993 struct waiting_tcp* w = (struct waiting_tcp*)arg; 1994 struct outside_network* outnet = w->outnet; 1995 verbose(VERB_CLIENT, "outnet_tcptimer"); 1996 if(w->on_tcp_waiting_list) { 1997 /* it is on the waiting list */ 1998 waiting_list_remove(outnet, w); 1999 waiting_tcp_callback(w, NULL, NETEVENT_TIMEOUT, NULL); 2000 waiting_tcp_delete(w); 2001 } else { 2002 /* it was in use */ 2003 struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting; 2004 reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT); 2005 } 2006 use_free_buffer(outnet); 2007 } 2008 2009 /** close the oldest reuse_tcp connection to make a fd and struct pend 2010 * available for a new stream connection */ 2011 static void 2012 reuse_tcp_close_oldest(struct outside_network* outnet) 2013 { 2014 struct pending_tcp* pend; 2015 verbose(VERB_CLIENT, "reuse_tcp_close_oldest"); 2016 if(!outnet->tcp_reuse_last) return; 2017 pend = outnet->tcp_reuse_last->pending; 2018 2019 /* snip off of LRU */ 2020 log_assert(pend->reuse.lru_next == NULL); 2021 if(pend->reuse.lru_prev) { 2022 outnet->tcp_reuse_last = pend->reuse.lru_prev; 2023 pend->reuse.lru_prev->lru_next = NULL; 2024 } else { 2025 outnet->tcp_reuse_last = NULL; 2026 outnet->tcp_reuse_first = NULL; 2027 } 2028 pend->reuse.item_on_lru_list = 0; 2029 2030 /* free up */ 2031 reuse_cb_and_decommission(outnet, pend, NETEVENT_CLOSED); 2032 } 2033 2034 /** find spare ID value for reuse tcp stream. That is random and also does 2035 * not collide with an existing query ID that is in use or waiting */ 2036 uint16_t 2037 reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet) 2038 { 2039 uint16_t id = 0, curid, nextid; 2040 const int try_random = 2000; 2041 int i; 2042 unsigned select, count, space; 2043 rbnode_type* node; 2044 2045 /* make really sure the tree is not empty */ 2046 if(reuse->tree_by_id.count == 0) { 2047 id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 2048 return id; 2049 } 2050 2051 /* try to find random empty spots by picking them */ 2052 for(i = 0; i<try_random; i++) { 2053 id = ((unsigned)ub_random(outnet->rnd)>>8) & 0xffff; 2054 if(!reuse_tcp_by_id_find(reuse, id)) { 2055 return id; 2056 } 2057 } 2058 2059 /* equally pick a random unused element from the tree that is 2060 * not in use. Pick a the n-th index of an ununused number, 2061 * then loop over the empty spaces in the tree and find it */ 2062 log_assert(reuse->tree_by_id.count < 0xffff); 2063 select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count); 2064 /* select value now in 0 .. num free - 1 */ 2065 2066 count = 0; /* number of free spaces passed by */ 2067 node = rbtree_first(&reuse->tree_by_id); 2068 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2069 /* see if select is before first node */ 2070 if(select < tree_by_id_get_id(node)) 2071 return select; 2072 count += tree_by_id_get_id(node); 2073 /* perhaps select is between nodes */ 2074 while(node && node != RBTREE_NULL) { 2075 rbnode_type* next = rbtree_next(node); 2076 if(next && next != RBTREE_NULL) { 2077 curid = tree_by_id_get_id(node); 2078 nextid = tree_by_id_get_id(next); 2079 log_assert(curid < nextid); 2080 if(curid != 0xffff && curid + 1 < nextid) { 2081 /* space between nodes */ 2082 space = nextid - curid - 1; 2083 log_assert(select >= count); 2084 if(select < count + space) { 2085 /* here it is */ 2086 return curid + 1 + (select - count); 2087 } 2088 count += space; 2089 } 2090 } 2091 node = next; 2092 } 2093 2094 /* select is after the last node */ 2095 /* count is the number of free positions before the nodes in the 2096 * tree */ 2097 node = rbtree_last(&reuse->tree_by_id); 2098 log_assert(node && node != RBTREE_NULL); /* tree not empty */ 2099 curid = tree_by_id_get_id(node); 2100 log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff); 2101 return curid + 1 + (select - count); 2102 } 2103 2104 struct waiting_tcp* 2105 pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet, 2106 int timeout, comm_point_callback_type* callback, void* callback_arg) 2107 { 2108 struct pending_tcp* pend = sq->outnet->tcp_free; 2109 struct reuse_tcp* reuse = NULL; 2110 struct waiting_tcp* w; 2111 2112 verbose(VERB_CLIENT, "pending_tcp_query"); 2113 if(sldns_buffer_limit(packet) < sizeof(uint16_t)) { 2114 verbose(VERB_ALGO, "pending tcp query with too short buffer < 2"); 2115 return NULL; 2116 } 2117 2118 /* find out if a reused stream to the target exists */ 2119 /* if so, take it into use */ 2120 reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen, 2121 sq->ssl_upstream); 2122 if(reuse) { 2123 log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse); 2124 log_assert(reuse->pending); 2125 pend = reuse->pending; 2126 reuse_tcp_lru_touch(sq->outnet, reuse); 2127 } 2128 2129 /* if !pend but we have reuse streams, close a reuse stream 2130 * to be able to open a new one to this target, no use waiting 2131 * to reuse a file descriptor while another query needs to use 2132 * that buffer and file descriptor now. */ 2133 if(!pend) { 2134 reuse_tcp_close_oldest(sq->outnet); 2135 pend = sq->outnet->tcp_free; 2136 } 2137 2138 /* allocate space to store query */ 2139 w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp) 2140 + sldns_buffer_limit(packet)); 2141 if(!w) { 2142 return NULL; 2143 } 2144 if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) { 2145 free(w); 2146 return NULL; 2147 } 2148 w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp); 2149 w->pkt_len = sldns_buffer_limit(packet); 2150 memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len); 2151 if(reuse) 2152 w->id = reuse_tcp_select_id(reuse, sq->outnet); 2153 else w->id = ((unsigned)ub_random(sq->outnet->rnd)>>8) & 0xffff; 2154 LDNS_ID_SET(w->pkt, w->id); 2155 memcpy(&w->addr, &sq->addr, sq->addrlen); 2156 w->addrlen = sq->addrlen; 2157 w->outnet = sq->outnet; 2158 w->on_tcp_waiting_list = 0; 2159 w->next_waiting = NULL; 2160 w->cb = callback; 2161 w->cb_arg = callback_arg; 2162 w->ssl_upstream = sq->ssl_upstream; 2163 w->tls_auth_name = sq->tls_auth_name; 2164 w->timeout = timeout; 2165 w->id_node.key = NULL; 2166 w->write_wait_prev = NULL; 2167 w->write_wait_next = NULL; 2168 w->write_wait_queued = 0; 2169 w->error_count = 0; 2170 if(pend) { 2171 /* we have a buffer available right now */ 2172 if(reuse) { 2173 /* reuse existing fd, write query and continue */ 2174 /* store query in tree by id */ 2175 verbose(VERB_CLIENT, "pending_tcp_query: reuse, store"); 2176 w->next_waiting = (void*)pend; 2177 reuse_tree_by_id_insert(&pend->reuse, w); 2178 /* can we write right now? */ 2179 if(pend->query == NULL) { 2180 /* write straight away */ 2181 /* stop the timer on read of the fd */ 2182 comm_point_stop_listening(pend->c); 2183 pend->query = w; 2184 outnet_tcp_take_query_setup(pend->c->fd, pend, 2185 w); 2186 } else { 2187 /* put it in the waiting list for 2188 * this stream */ 2189 reuse_write_wait_push_back(&pend->reuse, w); 2190 } 2191 } else { 2192 /* create new fd and connect to addr, setup to 2193 * write query */ 2194 verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect"); 2195 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp); 2196 pend->reuse.pending = pend; 2197 memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen); 2198 pend->reuse.addrlen = sq->addrlen; 2199 if(!outnet_tcp_take_into_use(w)) { 2200 waiting_tcp_delete(w); 2201 return NULL; 2202 } 2203 } 2204 } else { 2205 /* queue up */ 2206 /* waiting for a buffer on the outside network buffer wait 2207 * list */ 2208 verbose(VERB_CLIENT, "pending_tcp_query: queue to wait"); 2209 outnet_add_tcp_waiting(sq->outnet, w); 2210 } 2211 #ifdef USE_DNSTAP 2212 if(sq->outnet->dtenv && 2213 (sq->outnet->dtenv->log_resolver_query_messages || 2214 sq->outnet->dtenv->log_forwarder_query_messages)) 2215 dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr, 2216 comm_tcp, sq->zone, sq->zonelen, packet); 2217 #endif 2218 return w; 2219 } 2220 2221 /** create query for serviced queries */ 2222 static void 2223 serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen, 2224 uint16_t qtype, uint16_t qclass, uint16_t flags) 2225 { 2226 sldns_buffer_clear(buff); 2227 /* skip id */ 2228 sldns_buffer_write_u16(buff, flags); 2229 sldns_buffer_write_u16(buff, 1); /* qdcount */ 2230 sldns_buffer_write_u16(buff, 0); /* ancount */ 2231 sldns_buffer_write_u16(buff, 0); /* nscount */ 2232 sldns_buffer_write_u16(buff, 0); /* arcount */ 2233 sldns_buffer_write(buff, qname, qnamelen); 2234 sldns_buffer_write_u16(buff, qtype); 2235 sldns_buffer_write_u16(buff, qclass); 2236 sldns_buffer_flip(buff); 2237 } 2238 2239 /** lookup serviced query in serviced query rbtree */ 2240 static struct serviced_query* 2241 lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2242 struct sockaddr_storage* addr, socklen_t addrlen, 2243 struct edns_option* opt_list) 2244 { 2245 struct serviced_query key; 2246 key.node.key = &key; 2247 key.qbuf = sldns_buffer_begin(buff); 2248 key.qbuflen = sldns_buffer_limit(buff); 2249 key.dnssec = dnssec; 2250 memcpy(&key.addr, addr, addrlen); 2251 key.addrlen = addrlen; 2252 key.outnet = outnet; 2253 key.opt_list = opt_list; 2254 return (struct serviced_query*)rbtree_search(outnet->serviced, &key); 2255 } 2256 2257 /** Create new serviced entry */ 2258 static struct serviced_query* 2259 serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec, 2260 int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream, 2261 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen, 2262 uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list, 2263 size_t pad_queries_block_size) 2264 { 2265 struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq)); 2266 #ifdef UNBOUND_DEBUG 2267 rbnode_type* ins; 2268 #endif 2269 if(!sq) 2270 return NULL; 2271 sq->node.key = sq; 2272 sq->qbuf = memdup(sldns_buffer_begin(buff), sldns_buffer_limit(buff)); 2273 if(!sq->qbuf) { 2274 free(sq); 2275 return NULL; 2276 } 2277 sq->qbuflen = sldns_buffer_limit(buff); 2278 sq->zone = memdup(zone, zonelen); 2279 if(!sq->zone) { 2280 free(sq->qbuf); 2281 free(sq); 2282 return NULL; 2283 } 2284 sq->zonelen = zonelen; 2285 sq->qtype = qtype; 2286 sq->dnssec = dnssec; 2287 sq->want_dnssec = want_dnssec; 2288 sq->nocaps = nocaps; 2289 sq->tcp_upstream = tcp_upstream; 2290 sq->ssl_upstream = ssl_upstream; 2291 if(tls_auth_name) { 2292 sq->tls_auth_name = strdup(tls_auth_name); 2293 if(!sq->tls_auth_name) { 2294 free(sq->zone); 2295 free(sq->qbuf); 2296 free(sq); 2297 return NULL; 2298 } 2299 } else { 2300 sq->tls_auth_name = NULL; 2301 } 2302 memcpy(&sq->addr, addr, addrlen); 2303 sq->addrlen = addrlen; 2304 sq->opt_list = NULL; 2305 if(opt_list) { 2306 sq->opt_list = edns_opt_copy_alloc(opt_list); 2307 if(!sq->opt_list) { 2308 free(sq->tls_auth_name); 2309 free(sq->zone); 2310 free(sq->qbuf); 2311 free(sq); 2312 return NULL; 2313 } 2314 } 2315 sq->outnet = outnet; 2316 sq->cblist = NULL; 2317 sq->pending = NULL; 2318 sq->status = serviced_initial; 2319 sq->retry = 0; 2320 sq->to_be_deleted = 0; 2321 sq->padding_block_size = pad_queries_block_size; 2322 #ifdef UNBOUND_DEBUG 2323 ins = 2324 #else 2325 (void) 2326 #endif 2327 rbtree_insert(outnet->serviced, &sq->node); 2328 log_assert(ins != NULL); /* must not be already present */ 2329 return sq; 2330 } 2331 2332 /** remove waiting tcp from the outnet waiting list */ 2333 static void 2334 waiting_list_remove(struct outside_network* outnet, struct waiting_tcp* w) 2335 { 2336 struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL; 2337 w->on_tcp_waiting_list = 0; 2338 while(p) { 2339 if(p == w) { 2340 /* remove w */ 2341 if(prev) 2342 prev->next_waiting = w->next_waiting; 2343 else outnet->tcp_wait_first = w->next_waiting; 2344 if(outnet->tcp_wait_last == w) 2345 outnet->tcp_wait_last = prev; 2346 return; 2347 } 2348 prev = p; 2349 p = p->next_waiting; 2350 } 2351 } 2352 2353 /** reuse tcp stream, remove serviced query from stream, 2354 * return true if the stream is kept, false if it is to be closed */ 2355 static int 2356 reuse_tcp_remove_serviced_keep(struct waiting_tcp* w, 2357 struct serviced_query* sq) 2358 { 2359 struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting; 2360 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep"); 2361 /* remove the callback. let query continue to write to not cancel 2362 * the stream itself. also keep it as an entry in the tree_by_id, 2363 * in case the answer returns (that we no longer want), but we cannot 2364 * pick the same ID number meanwhile */ 2365 w->cb = NULL; 2366 /* see if can be entered in reuse tree 2367 * for that the FD has to be non-1 */ 2368 if(pend_tcp->c->fd == -1) { 2369 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd"); 2370 return 0; 2371 } 2372 /* if in tree and used by other queries */ 2373 if(pend_tcp->reuse.node.key) { 2374 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries"); 2375 /* do not reset the keepalive timer, for that 2376 * we'd need traffic, and this is where the serviced is 2377 * removed due to state machine internal reasons, 2378 * eg. iterator no longer interested in this query */ 2379 return 1; 2380 } 2381 /* if still open and want to keep it open */ 2382 if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count < 2383 sq->outnet->tcp_reuse_max) { 2384 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open"); 2385 /* set a keepalive timer on it */ 2386 if(!reuse_tcp_insert(sq->outnet, pend_tcp)) { 2387 return 0; 2388 } 2389 reuse_tcp_setup_timeout(pend_tcp); 2390 return 1; 2391 } 2392 return 0; 2393 } 2394 2395 /** cleanup serviced query entry */ 2396 static void 2397 serviced_delete(struct serviced_query* sq) 2398 { 2399 verbose(VERB_CLIENT, "serviced_delete"); 2400 if(sq->pending) { 2401 /* clear up the pending query */ 2402 if(sq->status == serviced_query_UDP_EDNS || 2403 sq->status == serviced_query_UDP || 2404 sq->status == serviced_query_UDP_EDNS_FRAG || 2405 sq->status == serviced_query_UDP_EDNS_fallback) { 2406 struct pending* p = (struct pending*)sq->pending; 2407 verbose(VERB_CLIENT, "serviced_delete: UDP"); 2408 if(p->pc) 2409 portcomm_loweruse(sq->outnet, p->pc); 2410 pending_delete(sq->outnet, p); 2411 /* this call can cause reentrant calls back into the 2412 * mesh */ 2413 outnet_send_wait_udp(sq->outnet); 2414 } else { 2415 struct waiting_tcp* w = (struct waiting_tcp*) 2416 sq->pending; 2417 verbose(VERB_CLIENT, "serviced_delete: TCP"); 2418 /* if on stream-write-waiting list then 2419 * remove from waiting list and waiting_tcp_delete */ 2420 if(w->write_wait_queued) { 2421 struct pending_tcp* pend = 2422 (struct pending_tcp*)w->next_waiting; 2423 verbose(VERB_CLIENT, "serviced_delete: writewait"); 2424 reuse_tree_by_id_delete(&pend->reuse, w); 2425 reuse_write_wait_remove(&pend->reuse, w); 2426 waiting_tcp_delete(w); 2427 } else if(!w->on_tcp_waiting_list) { 2428 struct pending_tcp* pend = 2429 (struct pending_tcp*)w->next_waiting; 2430 verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep"); 2431 if(!reuse_tcp_remove_serviced_keep(w, sq)) { 2432 reuse_cb_and_decommission(sq->outnet, 2433 pend, NETEVENT_CLOSED); 2434 use_free_buffer(sq->outnet); 2435 } 2436 sq->pending = NULL; 2437 } else { 2438 verbose(VERB_CLIENT, "serviced_delete: tcpwait"); 2439 waiting_list_remove(sq->outnet, w); 2440 waiting_tcp_delete(w); 2441 } 2442 } 2443 } 2444 /* does not delete from tree, caller has to do that */ 2445 serviced_node_del(&sq->node, NULL); 2446 } 2447 2448 /** perturb a dname capitalization randomly */ 2449 static void 2450 serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len) 2451 { 2452 uint8_t lablen; 2453 uint8_t* d = qbuf + 10; 2454 long int random = 0; 2455 int bits = 0; 2456 log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */); 2457 (void)len; 2458 lablen = *d++; 2459 while(lablen) { 2460 while(lablen--) { 2461 /* only perturb A-Z, a-z */ 2462 if(isalpha((unsigned char)*d)) { 2463 /* get a random bit */ 2464 if(bits == 0) { 2465 random = ub_random(rnd); 2466 bits = 30; 2467 } 2468 if(random & 0x1) { 2469 *d = (uint8_t)toupper((unsigned char)*d); 2470 } else { 2471 *d = (uint8_t)tolower((unsigned char)*d); 2472 } 2473 random >>= 1; 2474 bits--; 2475 } 2476 d++; 2477 } 2478 lablen = *d++; 2479 } 2480 if(verbosity >= VERB_ALGO) { 2481 char buf[LDNS_MAX_DOMAINLEN+1]; 2482 dname_str(qbuf+10, buf); 2483 verbose(VERB_ALGO, "qname perturbed to %s", buf); 2484 } 2485 } 2486 2487 /** put serviced query into a buffer */ 2488 static void 2489 serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns) 2490 { 2491 /* if we are using 0x20 bits for ID randomness, perturb them */ 2492 if(sq->outnet->use_caps_for_id && !sq->nocaps) { 2493 serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen); 2494 } 2495 /* generate query */ 2496 sldns_buffer_clear(buff); 2497 sldns_buffer_write_u16(buff, 0); /* id placeholder */ 2498 sldns_buffer_write(buff, sq->qbuf, sq->qbuflen); 2499 sldns_buffer_flip(buff); 2500 if(with_edns) { 2501 /* add edns section */ 2502 struct edns_data edns; 2503 struct edns_option padding_option; 2504 edns.edns_present = 1; 2505 edns.ext_rcode = 0; 2506 edns.edns_version = EDNS_ADVERTISED_VERSION; 2507 edns.opt_list = sq->opt_list; 2508 if(sq->status == serviced_query_UDP_EDNS_FRAG) { 2509 if(addr_is_ip6(&sq->addr, sq->addrlen)) { 2510 if(EDNS_FRAG_SIZE_IP6 < EDNS_ADVERTISED_SIZE) 2511 edns.udp_size = EDNS_FRAG_SIZE_IP6; 2512 else edns.udp_size = EDNS_ADVERTISED_SIZE; 2513 } else { 2514 if(EDNS_FRAG_SIZE_IP4 < EDNS_ADVERTISED_SIZE) 2515 edns.udp_size = EDNS_FRAG_SIZE_IP4; 2516 else edns.udp_size = EDNS_ADVERTISED_SIZE; 2517 } 2518 } else { 2519 edns.udp_size = EDNS_ADVERTISED_SIZE; 2520 } 2521 edns.bits = 0; 2522 if(sq->dnssec & EDNS_DO) 2523 edns.bits = EDNS_DO; 2524 if(sq->dnssec & BIT_CD) 2525 LDNS_CD_SET(sldns_buffer_begin(buff)); 2526 if (sq->ssl_upstream && sq->padding_block_size) { 2527 padding_option.opt_code = LDNS_EDNS_PADDING; 2528 padding_option.opt_len = 0; 2529 padding_option.opt_data = NULL; 2530 padding_option.next = edns.opt_list; 2531 edns.opt_list = &padding_option; 2532 edns.padding_block_size = sq->padding_block_size; 2533 } 2534 attach_edns_record(buff, &edns); 2535 } 2536 } 2537 2538 /** 2539 * Perform serviced query UDP sending operation. 2540 * Sends UDP with EDNS, unless infra host marked non EDNS. 2541 * @param sq: query to send. 2542 * @param buff: buffer scratch space. 2543 * @return 0 on error. 2544 */ 2545 static int 2546 serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff) 2547 { 2548 int rtt, vs; 2549 uint8_t edns_lame_known; 2550 time_t now = *sq->outnet->now_secs; 2551 2552 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 2553 sq->zonelen, now, &vs, &edns_lame_known, &rtt)) 2554 return 0; 2555 sq->last_rtt = rtt; 2556 verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs); 2557 if(sq->status == serviced_initial) { 2558 if(vs != -1) { 2559 sq->status = serviced_query_UDP_EDNS; 2560 } else { 2561 sq->status = serviced_query_UDP; 2562 } 2563 } 2564 serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) || 2565 (sq->status == serviced_query_UDP_EDNS_FRAG)); 2566 sq->last_sent_time = *sq->outnet->now_tv; 2567 sq->edns_lame_known = (int)edns_lame_known; 2568 verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt); 2569 sq->pending = pending_udp_query(sq, buff, rtt, 2570 serviced_udp_callback, sq); 2571 if(!sq->pending) 2572 return 0; 2573 return 1; 2574 } 2575 2576 /** check that perturbed qname is identical */ 2577 static int 2578 serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen) 2579 { 2580 uint8_t* d1 = sldns_buffer_begin(pkt)+12; 2581 uint8_t* d2 = qbuf+10; 2582 uint8_t len1, len2; 2583 int count = 0; 2584 if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */ 2585 return 0; 2586 log_assert(qbuflen >= 15 /* 10 header, root, type, class */); 2587 len1 = *d1++; 2588 len2 = *d2++; 2589 while(len1 != 0 || len2 != 0) { 2590 if(LABEL_IS_PTR(len1)) { 2591 /* check if we can read *d1 with compression ptr rest */ 2592 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2593 return 0; 2594 d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1); 2595 /* check if we can read the destination *d1 */ 2596 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2597 return 0; 2598 len1 = *d1++; 2599 if(count++ > MAX_COMPRESS_PTRS) 2600 return 0; 2601 continue; 2602 } 2603 if(d2 > qbuf+qbuflen) 2604 return 0; 2605 if(len1 != len2) 2606 return 0; 2607 if(len1 > LDNS_MAX_LABELLEN) 2608 return 0; 2609 /* check len1 + 1(next length) are okay to read */ 2610 if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt))) 2611 return 0; 2612 log_assert(len1 <= LDNS_MAX_LABELLEN); 2613 log_assert(len2 <= LDNS_MAX_LABELLEN); 2614 log_assert(len1 == len2 && len1 != 0); 2615 /* compare the labels - bitwise identical */ 2616 if(memcmp(d1, d2, len1) != 0) 2617 return 0; 2618 d1 += len1; 2619 d2 += len2; 2620 len1 = *d1++; 2621 len2 = *d2++; 2622 } 2623 return 1; 2624 } 2625 2626 /** call the callbacks for a serviced query */ 2627 static void 2628 serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c, 2629 struct comm_reply* rep) 2630 { 2631 struct service_callback* p; 2632 int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/ 2633 uint8_t *backup_p = NULL; 2634 size_t backlen = 0; 2635 #ifdef UNBOUND_DEBUG 2636 rbnode_type* rem = 2637 #else 2638 (void) 2639 #endif 2640 /* remove from tree, and schedule for deletion, so that callbacks 2641 * can safely deregister themselves and even create new serviced 2642 * queries that are identical to this one. */ 2643 rbtree_delete(sq->outnet->serviced, sq); 2644 log_assert(rem); /* should have been present */ 2645 sq->to_be_deleted = 1; 2646 verbose(VERB_ALGO, "svcd callbacks start"); 2647 if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR && c && 2648 !sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) { 2649 /* for type PTR do not check perturbed name in answer, 2650 * compatibility with cisco dns guard boxes that mess up 2651 * reverse queries 0x20 contents */ 2652 /* noerror and nxdomain must have a qname in reply */ 2653 if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 && 2654 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2655 == LDNS_RCODE_NOERROR || 2656 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2657 == LDNS_RCODE_NXDOMAIN)) { 2658 verbose(VERB_DETAIL, "no qname in reply to check 0x20ID"); 2659 log_addr(VERB_DETAIL, "from server", 2660 &sq->addr, sq->addrlen); 2661 log_buf(VERB_DETAIL, "for packet", c->buffer); 2662 error = NETEVENT_CLOSED; 2663 c = NULL; 2664 } else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 && 2665 !serviced_check_qname(c->buffer, sq->qbuf, 2666 sq->qbuflen)) { 2667 verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname"); 2668 log_addr(VERB_DETAIL, "from server", 2669 &sq->addr, sq->addrlen); 2670 log_buf(VERB_DETAIL, "for packet", c->buffer); 2671 error = NETEVENT_CAPSFAIL; 2672 /* and cleanup too */ 2673 pkt_dname_tolower(c->buffer, 2674 sldns_buffer_at(c->buffer, 12)); 2675 } else { 2676 verbose(VERB_ALGO, "good 0x20-ID in reply qname"); 2677 /* cleanup caps, prettier cache contents. */ 2678 pkt_dname_tolower(c->buffer, 2679 sldns_buffer_at(c->buffer, 12)); 2680 } 2681 } 2682 if(dobackup && c) { 2683 /* make a backup of the query, since the querystate processing 2684 * may send outgoing queries that overwrite the buffer. 2685 * use secondary buffer to store the query. 2686 * This is a data copy, but faster than packet to server */ 2687 backlen = sldns_buffer_limit(c->buffer); 2688 backup_p = memdup(sldns_buffer_begin(c->buffer), backlen); 2689 if(!backup_p) { 2690 log_err("malloc failure in serviced query callbacks"); 2691 error = NETEVENT_CLOSED; 2692 c = NULL; 2693 } 2694 sq->outnet->svcd_overhead = backlen; 2695 } 2696 /* test the actual sq->cblist, because the next elem could be deleted*/ 2697 while((p=sq->cblist) != NULL) { 2698 sq->cblist = p->next; /* remove this element */ 2699 if(dobackup && c) { 2700 sldns_buffer_clear(c->buffer); 2701 sldns_buffer_write(c->buffer, backup_p, backlen); 2702 sldns_buffer_flip(c->buffer); 2703 } 2704 fptr_ok(fptr_whitelist_serviced_query(p->cb)); 2705 (void)(*p->cb)(c, p->cb_arg, error, rep); 2706 free(p); 2707 } 2708 if(backup_p) { 2709 free(backup_p); 2710 sq->outnet->svcd_overhead = 0; 2711 } 2712 verbose(VERB_ALGO, "svcd callbacks end"); 2713 log_assert(sq->cblist == NULL); 2714 serviced_delete(sq); 2715 } 2716 2717 int 2718 serviced_tcp_callback(struct comm_point* c, void* arg, int error, 2719 struct comm_reply* rep) 2720 { 2721 struct serviced_query* sq = (struct serviced_query*)arg; 2722 struct comm_reply r2; 2723 sq->pending = NULL; /* removed after this callback */ 2724 if(error != NETEVENT_NOERROR) 2725 log_addr(VERB_QUERY, "tcp error for address", 2726 &sq->addr, sq->addrlen); 2727 if(error==NETEVENT_NOERROR) 2728 infra_update_tcp_works(sq->outnet->infra, &sq->addr, 2729 sq->addrlen, sq->zone, sq->zonelen); 2730 #ifdef USE_DNSTAP 2731 if(error==NETEVENT_NOERROR && sq->outnet->dtenv && 2732 (sq->outnet->dtenv->log_resolver_response_messages || 2733 sq->outnet->dtenv->log_forwarder_response_messages)) 2734 dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr, 2735 c->type, sq->zone, sq->zonelen, sq->qbuf, sq->qbuflen, 2736 &sq->last_sent_time, sq->outnet->now_tv, c->buffer); 2737 #endif 2738 if(error==NETEVENT_NOERROR && sq->status == serviced_query_TCP_EDNS && 2739 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 2740 LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin( 2741 c->buffer)) == LDNS_RCODE_NOTIMPL) ) { 2742 /* attempt to fallback to nonEDNS */ 2743 sq->status = serviced_query_TCP_EDNS_fallback; 2744 serviced_tcp_initiate(sq, c->buffer); 2745 return 0; 2746 } else if(error==NETEVENT_NOERROR && 2747 sq->status == serviced_query_TCP_EDNS_fallback && 2748 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 2749 LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE( 2750 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NXDOMAIN 2751 || LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2752 == LDNS_RCODE_YXDOMAIN)) { 2753 /* the fallback produced a result that looks promising, note 2754 * that this server should be approached without EDNS */ 2755 /* only store noEDNS in cache if domain is noDNSSEC */ 2756 if(!sq->want_dnssec) 2757 if(!infra_edns_update(sq->outnet->infra, &sq->addr, 2758 sq->addrlen, sq->zone, sq->zonelen, -1, 2759 *sq->outnet->now_secs)) 2760 log_err("Out of memory caching no edns for host"); 2761 sq->status = serviced_query_TCP; 2762 } 2763 if(sq->tcp_upstream || sq->ssl_upstream) { 2764 struct timeval now = *sq->outnet->now_tv; 2765 if(error!=NETEVENT_NOERROR) { 2766 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 2767 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 2768 -1, sq->last_rtt, (time_t)now.tv_sec)) 2769 log_err("out of memory in TCP exponential backoff."); 2770 } else if(now.tv_sec > sq->last_sent_time.tv_sec || 2771 (now.tv_sec == sq->last_sent_time.tv_sec && 2772 now.tv_usec > sq->last_sent_time.tv_usec)) { 2773 /* convert from microseconds to milliseconds */ 2774 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 2775 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 2776 verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime); 2777 log_assert(roundtime >= 0); 2778 /* only store if less then AUTH_TIMEOUT seconds, it could be 2779 * huge due to system-hibernated and we woke up */ 2780 if(roundtime < 60000) { 2781 if(!infra_rtt_update(sq->outnet->infra, &sq->addr, 2782 sq->addrlen, sq->zone, sq->zonelen, sq->qtype, 2783 roundtime, sq->last_rtt, (time_t)now.tv_sec)) 2784 log_err("out of memory noting rtt."); 2785 } 2786 } 2787 } 2788 /* insert address into reply info */ 2789 if(!rep) { 2790 /* create one if there isn't (on errors) */ 2791 rep = &r2; 2792 r2.c = c; 2793 } 2794 memcpy(&rep->addr, &sq->addr, sq->addrlen); 2795 rep->addrlen = sq->addrlen; 2796 serviced_callbacks(sq, error, c, rep); 2797 return 0; 2798 } 2799 2800 static void 2801 serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff) 2802 { 2803 verbose(VERB_ALGO, "initiate TCP query %s", 2804 sq->status==serviced_query_TCP_EDNS?"EDNS":""); 2805 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 2806 sq->last_sent_time = *sq->outnet->now_tv; 2807 sq->pending = pending_tcp_query(sq, buff, TCP_AUTH_QUERY_TIMEOUT, 2808 serviced_tcp_callback, sq); 2809 if(!sq->pending) { 2810 /* delete from tree so that a retry by above layer does not 2811 * clash with this entry */ 2812 verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query"); 2813 serviced_callbacks(sq, NETEVENT_CLOSED, NULL, NULL); 2814 } 2815 } 2816 2817 /** Send serviced query over TCP return false on initial failure */ 2818 static int 2819 serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff) 2820 { 2821 int vs, rtt, timeout; 2822 uint8_t edns_lame_known; 2823 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone, 2824 sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known, 2825 &rtt)) 2826 return 0; 2827 sq->last_rtt = rtt; 2828 if(vs != -1) 2829 sq->status = serviced_query_TCP_EDNS; 2830 else sq->status = serviced_query_TCP; 2831 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS); 2832 sq->last_sent_time = *sq->outnet->now_tv; 2833 if(sq->tcp_upstream || sq->ssl_upstream) { 2834 timeout = rtt; 2835 if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < TCP_AUTH_QUERY_TIMEOUT) 2836 timeout = TCP_AUTH_QUERY_TIMEOUT; 2837 } else { 2838 timeout = TCP_AUTH_QUERY_TIMEOUT; 2839 } 2840 sq->pending = pending_tcp_query(sq, buff, timeout, 2841 serviced_tcp_callback, sq); 2842 return sq->pending != NULL; 2843 } 2844 2845 /* see if packet is edns malformed; got zeroes at start. 2846 * This is from servers that return malformed packets to EDNS0 queries, 2847 * but they return good packets for nonEDNS0 queries. 2848 * We try to detect their output; without resorting to a full parse or 2849 * check for too many bytes after the end of the packet. */ 2850 static int 2851 packet_edns_malformed(struct sldns_buffer* buf, int qtype) 2852 { 2853 size_t len; 2854 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE) 2855 return 1; /* malformed */ 2856 /* they have NOERROR rcode, 1 answer. */ 2857 if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf)) != LDNS_RCODE_NOERROR) 2858 return 0; 2859 /* one query (to skip) and answer records */ 2860 if(LDNS_QDCOUNT(sldns_buffer_begin(buf)) != 1 || 2861 LDNS_ANCOUNT(sldns_buffer_begin(buf)) == 0) 2862 return 0; 2863 /* skip qname */ 2864 len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE), 2865 sldns_buffer_limit(buf)-LDNS_HEADER_SIZE); 2866 if(len == 0) 2867 return 0; 2868 if(len == 1 && qtype == 0) 2869 return 0; /* we asked for '.' and type 0 */ 2870 /* and then 4 bytes (type and class of query) */ 2871 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE + len + 4 + 3) 2872 return 0; 2873 2874 /* and start with 11 zeroes as the answer RR */ 2875 /* so check the qtype of the answer record, qname=0, type=0 */ 2876 if(sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[0] == 0 && 2877 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[1] == 0 && 2878 sldns_buffer_at(buf, LDNS_HEADER_SIZE+len+4)[2] == 0) 2879 return 1; 2880 return 0; 2881 } 2882 2883 int 2884 serviced_udp_callback(struct comm_point* c, void* arg, int error, 2885 struct comm_reply* rep) 2886 { 2887 struct serviced_query* sq = (struct serviced_query*)arg; 2888 struct outside_network* outnet = sq->outnet; 2889 struct timeval now = *sq->outnet->now_tv; 2890 2891 sq->pending = NULL; /* removed after callback */ 2892 if(error == NETEVENT_TIMEOUT) { 2893 if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) { 2894 /* fallback to 1480/1280 */ 2895 sq->status = serviced_query_UDP_EDNS_FRAG; 2896 log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10, 2897 &sq->addr, sq->addrlen); 2898 if(!serviced_udp_send(sq, c->buffer)) { 2899 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 2900 } 2901 return 0; 2902 } 2903 if(sq->status == serviced_query_UDP_EDNS_FRAG) { 2904 /* fragmentation size did not fix it */ 2905 sq->status = serviced_query_UDP_EDNS; 2906 } 2907 sq->retry++; 2908 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 2909 sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt, 2910 (time_t)now.tv_sec)) 2911 log_err("out of memory in UDP exponential backoff"); 2912 if(sq->retry < OUTBOUND_UDP_RETRY) { 2913 log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10, 2914 &sq->addr, sq->addrlen); 2915 if(!serviced_udp_send(sq, c->buffer)) { 2916 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 2917 } 2918 return 0; 2919 } 2920 } 2921 if(error != NETEVENT_NOERROR) { 2922 /* udp returns error (due to no ID or interface available) */ 2923 serviced_callbacks(sq, error, c, rep); 2924 return 0; 2925 } 2926 #ifdef USE_DNSTAP 2927 if(error == NETEVENT_NOERROR && outnet->dtenv && 2928 (outnet->dtenv->log_resolver_response_messages || 2929 outnet->dtenv->log_forwarder_response_messages)) 2930 dt_msg_send_outside_response(outnet->dtenv, &sq->addr, c->type, 2931 sq->zone, sq->zonelen, sq->qbuf, sq->qbuflen, 2932 &sq->last_sent_time, sq->outnet->now_tv, c->buffer); 2933 #endif 2934 if( (sq->status == serviced_query_UDP_EDNS 2935 ||sq->status == serviced_query_UDP_EDNS_FRAG) 2936 && (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) 2937 == LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE( 2938 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOTIMPL 2939 || packet_edns_malformed(c->buffer, sq->qtype) 2940 )) { 2941 /* try to get an answer by falling back without EDNS */ 2942 verbose(VERB_ALGO, "serviced query: attempt without EDNS"); 2943 sq->status = serviced_query_UDP_EDNS_fallback; 2944 sq->retry = 0; 2945 if(!serviced_udp_send(sq, c->buffer)) { 2946 serviced_callbacks(sq, NETEVENT_CLOSED, c, rep); 2947 } 2948 return 0; 2949 } else if(sq->status == serviced_query_UDP_EDNS && 2950 !sq->edns_lame_known) { 2951 /* now we know that edns queries received answers store that */ 2952 log_addr(VERB_ALGO, "serviced query: EDNS works for", 2953 &sq->addr, sq->addrlen); 2954 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 2955 sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) { 2956 log_err("Out of memory caching edns works"); 2957 } 2958 sq->edns_lame_known = 1; 2959 } else if(sq->status == serviced_query_UDP_EDNS_fallback && 2960 !sq->edns_lame_known && (LDNS_RCODE_WIRE( 2961 sldns_buffer_begin(c->buffer)) == LDNS_RCODE_NOERROR || 2962 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer)) == 2963 LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin( 2964 c->buffer)) == LDNS_RCODE_YXDOMAIN)) { 2965 /* the fallback produced a result that looks promising, note 2966 * that this server should be approached without EDNS */ 2967 /* only store noEDNS in cache if domain is noDNSSEC */ 2968 if(!sq->want_dnssec) { 2969 log_addr(VERB_ALGO, "serviced query: EDNS fails for", 2970 &sq->addr, sq->addrlen); 2971 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen, 2972 sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) { 2973 log_err("Out of memory caching no edns for host"); 2974 } 2975 } else { 2976 log_addr(VERB_ALGO, "serviced query: EDNS fails, but " 2977 "not stored because need DNSSEC for", &sq->addr, 2978 sq->addrlen); 2979 } 2980 sq->status = serviced_query_UDP; 2981 } 2982 if(now.tv_sec > sq->last_sent_time.tv_sec || 2983 (now.tv_sec == sq->last_sent_time.tv_sec && 2984 now.tv_usec > sq->last_sent_time.tv_usec)) { 2985 /* convert from microseconds to milliseconds */ 2986 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000 2987 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000; 2988 verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime); 2989 log_assert(roundtime >= 0); 2990 /* in case the system hibernated, do not enter a huge value, 2991 * above this value gives trouble with server selection */ 2992 if(roundtime < 60000) { 2993 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen, 2994 sq->zone, sq->zonelen, sq->qtype, roundtime, 2995 sq->last_rtt, (time_t)now.tv_sec)) 2996 log_err("out of memory noting rtt."); 2997 } 2998 } 2999 /* perform TC flag check and TCP fallback after updating our 3000 * cache entries for EDNS status and RTT times */ 3001 if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))) { 3002 /* fallback to TCP */ 3003 /* this discards partial UDP contents */ 3004 if(sq->status == serviced_query_UDP_EDNS || 3005 sq->status == serviced_query_UDP_EDNS_FRAG || 3006 sq->status == serviced_query_UDP_EDNS_fallback) 3007 /* if we have unfinished EDNS_fallback, start again */ 3008 sq->status = serviced_query_TCP_EDNS; 3009 else sq->status = serviced_query_TCP; 3010 serviced_tcp_initiate(sq, c->buffer); 3011 return 0; 3012 } 3013 /* yay! an answer */ 3014 serviced_callbacks(sq, error, c, rep); 3015 return 0; 3016 } 3017 3018 struct serviced_query* 3019 outnet_serviced_query(struct outside_network* outnet, 3020 struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec, 3021 int nocaps, int tcp_upstream, int ssl_upstream, char* tls_auth_name, 3022 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, 3023 size_t zonelen, struct module_qstate* qstate, 3024 comm_point_callback_type* callback, void* callback_arg, sldns_buffer* buff, 3025 struct module_env* env) 3026 { 3027 struct serviced_query* sq; 3028 struct service_callback* cb; 3029 struct edns_string_addr* client_string_addr; 3030 3031 if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone, zonelen, 3032 qstate, qstate->region)) 3033 return NULL; 3034 3035 if((client_string_addr = edns_string_addr_lookup( 3036 &env->edns_strings->client_strings, addr, addrlen))) { 3037 edns_opt_list_append(&qstate->edns_opts_back_out, 3038 env->edns_strings->client_string_opcode, 3039 client_string_addr->string_len, 3040 client_string_addr->string, qstate->region); 3041 } 3042 3043 serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype, 3044 qinfo->qclass, flags); 3045 sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen, 3046 qstate->edns_opts_back_out); 3047 /* duplicate entries are included in the callback list, because 3048 * there is a counterpart registration by our caller that needs to 3049 * be doubly-removed (with callbacks perhaps). */ 3050 if(!(cb = (struct service_callback*)malloc(sizeof(*cb)))) 3051 return NULL; 3052 if(!sq) { 3053 /* make new serviced query entry */ 3054 sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps, 3055 tcp_upstream, ssl_upstream, tls_auth_name, addr, 3056 addrlen, zone, zonelen, (int)qinfo->qtype, 3057 qstate->edns_opts_back_out, 3058 ( ssl_upstream && env->cfg->pad_queries 3059 ? env->cfg->pad_queries_block_size : 0 )); 3060 if(!sq) { 3061 free(cb); 3062 return NULL; 3063 } 3064 /* perform first network action */ 3065 if(outnet->do_udp && !(tcp_upstream || ssl_upstream)) { 3066 if(!serviced_udp_send(sq, buff)) { 3067 (void)rbtree_delete(outnet->serviced, sq); 3068 serviced_node_del(&sq->node, NULL); 3069 free(cb); 3070 return NULL; 3071 } 3072 } else { 3073 if(!serviced_tcp_send(sq, buff)) { 3074 (void)rbtree_delete(outnet->serviced, sq); 3075 serviced_node_del(&sq->node, NULL); 3076 free(cb); 3077 return NULL; 3078 } 3079 } 3080 } 3081 /* add callback to list of callbacks */ 3082 cb->cb = callback; 3083 cb->cb_arg = callback_arg; 3084 cb->next = sq->cblist; 3085 sq->cblist = cb; 3086 return sq; 3087 } 3088 3089 /** remove callback from list */ 3090 static void 3091 callback_list_remove(struct serviced_query* sq, void* cb_arg) 3092 { 3093 struct service_callback** pp = &sq->cblist; 3094 while(*pp) { 3095 if((*pp)->cb_arg == cb_arg) { 3096 struct service_callback* del = *pp; 3097 *pp = del->next; 3098 free(del); 3099 return; 3100 } 3101 pp = &(*pp)->next; 3102 } 3103 } 3104 3105 void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg) 3106 { 3107 if(!sq) 3108 return; 3109 callback_list_remove(sq, cb_arg); 3110 /* if callbacks() routine scheduled deletion, let it do that */ 3111 if(!sq->cblist && !sq->to_be_deleted) { 3112 (void)rbtree_delete(sq->outnet->serviced, sq); 3113 serviced_delete(sq); 3114 } 3115 } 3116 3117 /** create fd to send to this destination */ 3118 static int 3119 fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr, 3120 socklen_t to_addrlen) 3121 { 3122 struct sockaddr_storage* addr; 3123 socklen_t addrlen; 3124 int i, try, pnum, dscp; 3125 struct port_if* pif; 3126 3127 /* create fd */ 3128 dscp = outnet->ip_dscp; 3129 for(try = 0; try<1000; try++) { 3130 int port = 0; 3131 int freebind = 0; 3132 int noproto = 0; 3133 int inuse = 0; 3134 int fd = -1; 3135 3136 /* select interface */ 3137 if(addr_is_ip6(to_addr, to_addrlen)) { 3138 if(outnet->num_ip6 == 0) { 3139 char to[64]; 3140 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3141 verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to); 3142 return -1; 3143 } 3144 i = ub_random_max(outnet->rnd, outnet->num_ip6); 3145 pif = &outnet->ip6_ifs[i]; 3146 } else { 3147 if(outnet->num_ip4 == 0) { 3148 char to[64]; 3149 addr_to_str(to_addr, to_addrlen, to, sizeof(to)); 3150 verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to); 3151 return -1; 3152 } 3153 i = ub_random_max(outnet->rnd, outnet->num_ip4); 3154 pif = &outnet->ip4_ifs[i]; 3155 } 3156 addr = &pif->addr; 3157 addrlen = pif->addrlen; 3158 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3159 pnum = ub_random_max(outnet->rnd, pif->avail_total); 3160 if(pnum < pif->inuse) { 3161 /* port already open */ 3162 port = pif->out[pnum]->number; 3163 } else { 3164 /* unused ports in start part of array */ 3165 port = pif->avail_ports[pnum - pif->inuse]; 3166 } 3167 #else 3168 pnum = port = 0; 3169 #endif 3170 if(addr_is_ip6(to_addr, to_addrlen)) { 3171 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr; 3172 sa.sin6_port = (in_port_t)htons((uint16_t)port); 3173 fd = create_udp_sock(AF_INET6, SOCK_DGRAM, 3174 (struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto, 3175 0, 0, 0, NULL, 0, freebind, 0, dscp); 3176 } else { 3177 struct sockaddr_in* sa = (struct sockaddr_in*)addr; 3178 sa->sin_port = (in_port_t)htons((uint16_t)port); 3179 fd = create_udp_sock(AF_INET, SOCK_DGRAM, 3180 (struct sockaddr*)addr, addrlen, 1, &inuse, &noproto, 3181 0, 0, 0, NULL, 0, freebind, 0, dscp); 3182 } 3183 if(fd != -1) { 3184 return fd; 3185 } 3186 if(!inuse) { 3187 return -1; 3188 } 3189 } 3190 /* too many tries */ 3191 log_err("cannot send probe, ports are in use"); 3192 return -1; 3193 } 3194 3195 struct comm_point* 3196 outnet_comm_point_for_udp(struct outside_network* outnet, 3197 comm_point_callback_type* cb, void* cb_arg, 3198 struct sockaddr_storage* to_addr, socklen_t to_addrlen) 3199 { 3200 struct comm_point* cp; 3201 int fd = fd_for_dest(outnet, to_addr, to_addrlen); 3202 if(fd == -1) { 3203 return NULL; 3204 } 3205 cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff, 3206 cb, cb_arg); 3207 if(!cp) { 3208 log_err("malloc failure"); 3209 close(fd); 3210 return NULL; 3211 } 3212 return cp; 3213 } 3214 3215 /** setup SSL for comm point */ 3216 static int 3217 setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet, 3218 int fd, char* host) 3219 { 3220 cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd); 3221 if(!cp->ssl) { 3222 log_err("cannot create SSL object"); 3223 return 0; 3224 } 3225 #ifdef USE_WINSOCK 3226 comm_point_tcp_win_bio_cb(cp, cp->ssl); 3227 #endif 3228 cp->ssl_shake_state = comm_ssl_shake_write; 3229 /* https verification */ 3230 #ifdef HAVE_SSL 3231 if(outnet->tls_use_sni) { 3232 (void)SSL_set_tlsext_host_name(cp->ssl, host); 3233 } 3234 #endif 3235 #ifdef HAVE_SSL_SET1_HOST 3236 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3237 /* because we set SSL_VERIFY_PEER, in netevent in 3238 * ssl_handshake, it'll check if the certificate 3239 * verification has succeeded */ 3240 /* SSL_VERIFY_PEER is set on the sslctx */ 3241 /* and the certificates to verify with are loaded into 3242 * it with SSL_load_verify_locations or 3243 * SSL_CTX_set_default_verify_paths */ 3244 /* setting the hostname makes openssl verify the 3245 * host name in the x509 certificate in the 3246 * SSL connection*/ 3247 if(!SSL_set1_host(cp->ssl, host)) { 3248 log_err("SSL_set1_host failed"); 3249 return 0; 3250 } 3251 } 3252 #elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST) 3253 /* openssl 1.0.2 has this function that can be used for 3254 * set1_host like verification */ 3255 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER)) { 3256 X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl); 3257 # ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 3258 X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS); 3259 # endif 3260 if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) { 3261 log_err("X509_VERIFY_PARAM_set1_host failed"); 3262 return 0; 3263 } 3264 } 3265 #else 3266 (void)host; 3267 #endif /* HAVE_SSL_SET1_HOST */ 3268 return 1; 3269 } 3270 3271 struct comm_point* 3272 outnet_comm_point_for_tcp(struct outside_network* outnet, 3273 comm_point_callback_type* cb, void* cb_arg, 3274 struct sockaddr_storage* to_addr, socklen_t to_addrlen, 3275 sldns_buffer* query, int timeout, int ssl, char* host) 3276 { 3277 struct comm_point* cp; 3278 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3279 if(fd == -1) { 3280 return 0; 3281 } 3282 fd_set_nonblock(fd); 3283 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3284 /* outnet_tcp_connect has closed fd on error for us */ 3285 return 0; 3286 } 3287 cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg); 3288 if(!cp) { 3289 log_err("malloc failure"); 3290 close(fd); 3291 return 0; 3292 } 3293 cp->repinfo.addrlen = to_addrlen; 3294 memcpy(&cp->repinfo.addr, to_addr, to_addrlen); 3295 3296 /* setup for SSL (if needed) */ 3297 if(ssl) { 3298 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3299 log_err("cannot setup XoT"); 3300 comm_point_delete(cp); 3301 return NULL; 3302 } 3303 } 3304 3305 /* set timeout on TCP connection */ 3306 comm_point_start_listening(cp, fd, timeout); 3307 /* copy scratch buffer to cp->buffer */ 3308 sldns_buffer_copy(cp->buffer, query); 3309 return cp; 3310 } 3311 3312 /** setup http request headers in buffer for sending query to destination */ 3313 static int 3314 setup_http_request(sldns_buffer* buf, char* host, char* path) 3315 { 3316 sldns_buffer_clear(buf); 3317 sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path); 3318 sldns_buffer_printf(buf, "Host: %s\r\n", host); 3319 sldns_buffer_printf(buf, "User-Agent: unbound/%s\r\n", 3320 PACKAGE_VERSION); 3321 /* We do not really do multiple queries per connection, 3322 * but this header setting is also not needed. 3323 * sldns_buffer_printf(buf, "Connection: close\r\n") */ 3324 sldns_buffer_printf(buf, "\r\n"); 3325 if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf)) 3326 return 0; /* somehow buffer too short, but it is about 60K 3327 and the request is only a couple bytes long. */ 3328 sldns_buffer_flip(buf); 3329 return 1; 3330 } 3331 3332 struct comm_point* 3333 outnet_comm_point_for_http(struct outside_network* outnet, 3334 comm_point_callback_type* cb, void* cb_arg, 3335 struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout, 3336 int ssl, char* host, char* path) 3337 { 3338 /* cp calls cb with err=NETEVENT_DONE when transfer is done */ 3339 struct comm_point* cp; 3340 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp); 3341 if(fd == -1) { 3342 return 0; 3343 } 3344 fd_set_nonblock(fd); 3345 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) { 3346 /* outnet_tcp_connect has closed fd on error for us */ 3347 return 0; 3348 } 3349 cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg, 3350 outnet->udp_buff); 3351 if(!cp) { 3352 log_err("malloc failure"); 3353 close(fd); 3354 return 0; 3355 } 3356 cp->repinfo.addrlen = to_addrlen; 3357 memcpy(&cp->repinfo.addr, to_addr, to_addrlen); 3358 3359 /* setup for SSL (if needed) */ 3360 if(ssl) { 3361 if(!setup_comm_ssl(cp, outnet, fd, host)) { 3362 log_err("cannot setup https"); 3363 comm_point_delete(cp); 3364 return NULL; 3365 } 3366 } 3367 3368 /* set timeout on TCP connection */ 3369 comm_point_start_listening(cp, fd, timeout); 3370 3371 /* setup http request in cp->buffer */ 3372 if(!setup_http_request(cp->buffer, host, path)) { 3373 log_err("error setting up http request"); 3374 comm_point_delete(cp); 3375 return NULL; 3376 } 3377 return cp; 3378 } 3379 3380 /** get memory used by waiting tcp entry (in use or not) */ 3381 static size_t 3382 waiting_tcp_get_mem(struct waiting_tcp* w) 3383 { 3384 size_t s; 3385 if(!w) return 0; 3386 s = sizeof(*w) + w->pkt_len; 3387 if(w->timer) 3388 s += comm_timer_get_mem(w->timer); 3389 return s; 3390 } 3391 3392 /** get memory used by port if */ 3393 static size_t 3394 if_get_mem(struct port_if* pif) 3395 { 3396 size_t s; 3397 int i; 3398 s = sizeof(*pif) + 3399 #ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION 3400 sizeof(int)*pif->avail_total + 3401 #endif 3402 sizeof(struct port_comm*)*pif->maxout; 3403 for(i=0; i<pif->inuse; i++) 3404 s += sizeof(*pif->out[i]) + 3405 comm_point_get_mem(pif->out[i]->cp); 3406 return s; 3407 } 3408 3409 /** get memory used by waiting udp */ 3410 static size_t 3411 waiting_udp_get_mem(struct pending* w) 3412 { 3413 size_t s; 3414 s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len; 3415 return s; 3416 } 3417 3418 size_t outnet_get_mem(struct outside_network* outnet) 3419 { 3420 size_t i; 3421 int k; 3422 struct waiting_tcp* w; 3423 struct pending* u; 3424 struct serviced_query* sq; 3425 struct service_callback* sb; 3426 struct port_comm* pc; 3427 size_t s = sizeof(*outnet) + sizeof(*outnet->base) + 3428 sizeof(*outnet->udp_buff) + 3429 sldns_buffer_capacity(outnet->udp_buff); 3430 /* second buffer is not ours */ 3431 for(pc = outnet->unused_fds; pc; pc = pc->next) { 3432 s += sizeof(*pc) + comm_point_get_mem(pc->cp); 3433 } 3434 for(k=0; k<outnet->num_ip4; k++) 3435 s += if_get_mem(&outnet->ip4_ifs[k]); 3436 for(k=0; k<outnet->num_ip6; k++) 3437 s += if_get_mem(&outnet->ip6_ifs[k]); 3438 for(u=outnet->udp_wait_first; u; u=u->next_waiting) 3439 s += waiting_udp_get_mem(u); 3440 3441 s += sizeof(struct pending_tcp*)*outnet->num_tcp; 3442 for(i=0; i<outnet->num_tcp; i++) { 3443 s += sizeof(struct pending_tcp); 3444 s += comm_point_get_mem(outnet->tcp_conns[i]->c); 3445 if(outnet->tcp_conns[i]->query) 3446 s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query); 3447 } 3448 for(w=outnet->tcp_wait_first; w; w = w->next_waiting) 3449 s += waiting_tcp_get_mem(w); 3450 s += sizeof(*outnet->pending); 3451 s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) * 3452 outnet->pending->count; 3453 s += sizeof(*outnet->serviced); 3454 s += outnet->svcd_overhead; 3455 RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) { 3456 s += sizeof(*sq) + sq->qbuflen; 3457 for(sb = sq->cblist; sb; sb = sb->next) 3458 s += sizeof(*sb); 3459 } 3460 return s; 3461 } 3462 3463 size_t 3464 serviced_get_mem(struct serviced_query* sq) 3465 { 3466 struct service_callback* sb; 3467 size_t s; 3468 s = sizeof(*sq) + sq->qbuflen; 3469 for(sb = sq->cblist; sb; sb = sb->next) 3470 s += sizeof(*sb); 3471 if(sq->status == serviced_query_UDP_EDNS || 3472 sq->status == serviced_query_UDP || 3473 sq->status == serviced_query_UDP_EDNS_FRAG || 3474 sq->status == serviced_query_UDP_EDNS_fallback) { 3475 s += sizeof(struct pending); 3476 s += comm_timer_get_mem(NULL); 3477 } else { 3478 /* does not have size of the pkt pointer */ 3479 /* always has a timer except on malloc failures */ 3480 3481 /* these sizes are part of the main outside network mem */ 3482 /* 3483 s += sizeof(struct waiting_tcp); 3484 s += comm_timer_get_mem(NULL); 3485 */ 3486 } 3487 return s; 3488 } 3489 3490