1 /* 2 * services/mesh.c - deal with mesh of query states and handle events for that. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains functions to assist in dealing with a mesh of 40 * query states. This mesh is supposed to be thread-specific. 41 * It consists of query states (per qname, qtype, qclass) and connections 42 * between query states and the super and subquery states, and replies to 43 * send back to clients. 44 */ 45 #include "config.h" 46 #include "services/mesh.h" 47 #include "services/outbound_list.h" 48 #include "services/cache/dns.h" 49 #include "services/cache/rrset.h" 50 #include "util/log.h" 51 #include "util/net_help.h" 52 #include "util/module.h" 53 #include "util/regional.h" 54 #include "util/data/msgencode.h" 55 #include "util/timehist.h" 56 #include "util/fptr_wlist.h" 57 #include "util/alloc.h" 58 #include "util/config_file.h" 59 #include "util/edns.h" 60 #include "sldns/sbuffer.h" 61 #include "sldns/wire2str.h" 62 #include "services/localzone.h" 63 #include "util/data/dname.h" 64 #include "respip/respip.h" 65 #include "services/listen_dnsport.h" 66 67 /** subtract timers and the values do not overflow or become negative */ 68 static void 69 timeval_subtract(struct timeval* d, const struct timeval* end, const struct timeval* start) 70 { 71 #ifndef S_SPLINT_S 72 time_t end_usec = end->tv_usec; 73 d->tv_sec = end->tv_sec - start->tv_sec; 74 if(end_usec < start->tv_usec) { 75 end_usec += 1000000; 76 d->tv_sec--; 77 } 78 d->tv_usec = end_usec - start->tv_usec; 79 #endif 80 } 81 82 /** add timers and the values do not overflow or become negative */ 83 static void 84 timeval_add(struct timeval* d, const struct timeval* add) 85 { 86 #ifndef S_SPLINT_S 87 d->tv_sec += add->tv_sec; 88 d->tv_usec += add->tv_usec; 89 if(d->tv_usec >= 1000000 ) { 90 d->tv_usec -= 1000000; 91 d->tv_sec++; 92 } 93 #endif 94 } 95 96 /** divide sum of timers to get average */ 97 static void 98 timeval_divide(struct timeval* avg, const struct timeval* sum, size_t d) 99 { 100 #ifndef S_SPLINT_S 101 size_t leftover; 102 if(d == 0) { 103 avg->tv_sec = 0; 104 avg->tv_usec = 0; 105 return; 106 } 107 avg->tv_sec = sum->tv_sec / d; 108 avg->tv_usec = sum->tv_usec / d; 109 /* handle fraction from seconds divide */ 110 leftover = sum->tv_sec - avg->tv_sec*d; 111 avg->tv_usec += (leftover*1000000)/d; 112 #endif 113 } 114 115 /** histogram compare of time values */ 116 static int 117 timeval_smaller(const struct timeval* x, const struct timeval* y) 118 { 119 #ifndef S_SPLINT_S 120 if(x->tv_sec < y->tv_sec) 121 return 1; 122 else if(x->tv_sec == y->tv_sec) { 123 if(x->tv_usec <= y->tv_usec) 124 return 1; 125 else return 0; 126 } 127 else return 0; 128 #endif 129 } 130 131 /** 132 * Compare two response-ip client info entries for the purpose of mesh state 133 * compare. It returns 0 if ci_a and ci_b are considered equal; otherwise 134 * 1 or -1 (they mean 'ci_a is larger/smaller than ci_b', respectively, but 135 * in practice it should be only used to mean they are different). 136 * We cannot share the mesh state for two queries if different response-ip 137 * actions can apply in the end, even if those queries are otherwise identical. 138 * For this purpose we compare tag lists and tag action lists; they should be 139 * identical to share the same state. 140 * For tag data, we don't look into the data content, as it can be 141 * expensive; unless tag data are not defined for both or they point to the 142 * exact same data in memory (i.e., they come from the same ACL entry), we 143 * consider these data different. 144 * Likewise, if the client info is associated with views, we don't look into 145 * the views. They are considered different unless they are exactly the same 146 * even if the views only differ in the names. 147 */ 148 static int 149 client_info_compare(const struct respip_client_info* ci_a, 150 const struct respip_client_info* ci_b) 151 { 152 int cmp; 153 154 if(!ci_a && !ci_b) 155 return 0; 156 if(ci_a && !ci_b) 157 return -1; 158 if(!ci_a && ci_b) 159 return 1; 160 if(ci_a->taglen != ci_b->taglen) 161 return (ci_a->taglen < ci_b->taglen) ? -1 : 1; 162 if(ci_a->taglist && !ci_b->taglist) 163 return -1; 164 if(!ci_a->taglist && ci_b->taglist) 165 return 1; 166 if(ci_a->taglist && ci_b->taglist) { 167 cmp = memcmp(ci_a->taglist, ci_b->taglist, ci_a->taglen); 168 if(cmp != 0) 169 return cmp; 170 } 171 if(ci_a->tag_actions_size != ci_b->tag_actions_size) 172 return (ci_a->tag_actions_size < ci_b->tag_actions_size) ? 173 -1 : 1; 174 if(ci_a->tag_actions && !ci_b->tag_actions) 175 return -1; 176 if(!ci_a->tag_actions && ci_b->tag_actions) 177 return 1; 178 if(ci_a->tag_actions && ci_b->tag_actions) { 179 cmp = memcmp(ci_a->tag_actions, ci_b->tag_actions, 180 ci_a->tag_actions_size); 181 if(cmp != 0) 182 return cmp; 183 } 184 if(ci_a->tag_datas != ci_b->tag_datas) 185 return ci_a->tag_datas < ci_b->tag_datas ? -1 : 1; 186 if(ci_a->view != ci_b->view) 187 return ci_a->view < ci_b->view ? -1 : 1; 188 /* For the unbound daemon these should be non-NULL and identical, 189 * but we check that just in case. */ 190 if(ci_a->respip_set != ci_b->respip_set) 191 return ci_a->respip_set < ci_b->respip_set ? -1 : 1; 192 return 0; 193 } 194 195 int 196 mesh_state_compare(const void* ap, const void* bp) 197 { 198 struct mesh_state* a = (struct mesh_state*)ap; 199 struct mesh_state* b = (struct mesh_state*)bp; 200 int cmp; 201 202 if(a->unique < b->unique) 203 return -1; 204 if(a->unique > b->unique) 205 return 1; 206 207 if(a->s.is_priming && !b->s.is_priming) 208 return -1; 209 if(!a->s.is_priming && b->s.is_priming) 210 return 1; 211 212 if(a->s.is_valrec && !b->s.is_valrec) 213 return -1; 214 if(!a->s.is_valrec && b->s.is_valrec) 215 return 1; 216 217 if((a->s.query_flags&BIT_RD) && !(b->s.query_flags&BIT_RD)) 218 return -1; 219 if(!(a->s.query_flags&BIT_RD) && (b->s.query_flags&BIT_RD)) 220 return 1; 221 222 if((a->s.query_flags&BIT_CD) && !(b->s.query_flags&BIT_CD)) 223 return -1; 224 if(!(a->s.query_flags&BIT_CD) && (b->s.query_flags&BIT_CD)) 225 return 1; 226 227 cmp = query_info_compare(&a->s.qinfo, &b->s.qinfo); 228 if(cmp != 0) 229 return cmp; 230 return client_info_compare(a->s.client_info, b->s.client_info); 231 } 232 233 int 234 mesh_state_ref_compare(const void* ap, const void* bp) 235 { 236 struct mesh_state_ref* a = (struct mesh_state_ref*)ap; 237 struct mesh_state_ref* b = (struct mesh_state_ref*)bp; 238 return mesh_state_compare(a->s, b->s); 239 } 240 241 struct mesh_area* 242 mesh_create(struct module_stack* stack, struct module_env* env) 243 { 244 struct mesh_area* mesh = calloc(1, sizeof(struct mesh_area)); 245 if(!mesh) { 246 log_err("mesh area alloc: out of memory"); 247 return NULL; 248 } 249 mesh->histogram = timehist_setup(); 250 mesh->qbuf_bak = sldns_buffer_new(env->cfg->msg_buffer_size); 251 if(!mesh->histogram || !mesh->qbuf_bak) { 252 free(mesh); 253 log_err("mesh area alloc: out of memory"); 254 return NULL; 255 } 256 mesh->mods = *stack; 257 mesh->env = env; 258 rbtree_init(&mesh->run, &mesh_state_compare); 259 rbtree_init(&mesh->all, &mesh_state_compare); 260 mesh->num_reply_addrs = 0; 261 mesh->num_reply_states = 0; 262 mesh->num_detached_states = 0; 263 mesh->num_forever_states = 0; 264 mesh->stats_jostled = 0; 265 mesh->stats_dropped = 0; 266 mesh->ans_expired = 0; 267 mesh->max_reply_states = env->cfg->num_queries_per_thread; 268 mesh->max_forever_states = (mesh->max_reply_states+1)/2; 269 #ifndef S_SPLINT_S 270 mesh->jostle_max.tv_sec = (time_t)(env->cfg->jostle_time / 1000); 271 mesh->jostle_max.tv_usec = (time_t)((env->cfg->jostle_time % 1000) 272 *1000); 273 #endif 274 return mesh; 275 } 276 277 /** help mesh delete delete mesh states */ 278 static void 279 mesh_delete_helper(rbnode_type* n) 280 { 281 struct mesh_state* mstate = (struct mesh_state*)n->key; 282 /* perform a full delete, not only 'cleanup' routine, 283 * because other callbacks expect a clean state in the mesh. 284 * For 're-entrant' calls */ 285 mesh_state_delete(&mstate->s); 286 /* but because these delete the items from the tree, postorder 287 * traversal and rbtree rebalancing do not work together */ 288 } 289 290 void 291 mesh_delete(struct mesh_area* mesh) 292 { 293 if(!mesh) 294 return; 295 /* free all query states */ 296 while(mesh->all.count) 297 mesh_delete_helper(mesh->all.root); 298 timehist_delete(mesh->histogram); 299 sldns_buffer_free(mesh->qbuf_bak); 300 free(mesh); 301 } 302 303 void 304 mesh_delete_all(struct mesh_area* mesh) 305 { 306 /* free all query states */ 307 while(mesh->all.count) 308 mesh_delete_helper(mesh->all.root); 309 mesh->stats_dropped += mesh->num_reply_addrs; 310 /* clear mesh area references */ 311 rbtree_init(&mesh->run, &mesh_state_compare); 312 rbtree_init(&mesh->all, &mesh_state_compare); 313 mesh->num_reply_addrs = 0; 314 mesh->num_reply_states = 0; 315 mesh->num_detached_states = 0; 316 mesh->num_forever_states = 0; 317 mesh->forever_first = NULL; 318 mesh->forever_last = NULL; 319 mesh->jostle_first = NULL; 320 mesh->jostle_last = NULL; 321 } 322 323 int mesh_make_new_space(struct mesh_area* mesh, sldns_buffer* qbuf) 324 { 325 struct mesh_state* m = mesh->jostle_first; 326 /* free space is available */ 327 if(mesh->num_reply_states < mesh->max_reply_states) 328 return 1; 329 /* try to kick out a jostle-list item */ 330 if(m && m->reply_list && m->list_select == mesh_jostle_list) { 331 /* how old is it? */ 332 struct timeval age; 333 timeval_subtract(&age, mesh->env->now_tv, 334 &m->reply_list->start_time); 335 if(timeval_smaller(&mesh->jostle_max, &age)) { 336 /* its a goner */ 337 log_nametypeclass(VERB_ALGO, "query jostled out to " 338 "make space for a new one", 339 m->s.qinfo.qname, m->s.qinfo.qtype, 340 m->s.qinfo.qclass); 341 /* backup the query */ 342 if(qbuf) sldns_buffer_copy(mesh->qbuf_bak, qbuf); 343 /* notify supers */ 344 if(m->super_set.count > 0) { 345 verbose(VERB_ALGO, "notify supers of failure"); 346 m->s.return_msg = NULL; 347 m->s.return_rcode = LDNS_RCODE_SERVFAIL; 348 mesh_walk_supers(mesh, m); 349 } 350 mesh->stats_jostled ++; 351 mesh_state_delete(&m->s); 352 /* restore the query - note that the qinfo ptr to 353 * the querybuffer is then correct again. */ 354 if(qbuf) sldns_buffer_copy(qbuf, mesh->qbuf_bak); 355 return 1; 356 } 357 } 358 /* no space for new item */ 359 return 0; 360 } 361 362 struct dns_msg* 363 mesh_serve_expired_lookup(struct module_qstate* qstate, 364 struct query_info* lookup_qinfo) 365 { 366 hashvalue_type h; 367 struct lruhash_entry* e; 368 struct dns_msg* msg; 369 struct reply_info* data; 370 struct msgreply_entry* key; 371 time_t timenow = *qstate->env->now; 372 int must_validate = (!(qstate->query_flags&BIT_CD) 373 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate; 374 /* Lookup cache */ 375 h = query_info_hash(lookup_qinfo, qstate->query_flags); 376 e = slabhash_lookup(qstate->env->msg_cache, h, lookup_qinfo, 0); 377 if(!e) return NULL; 378 379 key = (struct msgreply_entry*)e->key; 380 data = (struct reply_info*)e->data; 381 msg = tomsg(qstate->env, &key->key, data, qstate->region, timenow, 382 qstate->env->cfg->serve_expired, qstate->env->scratch); 383 if(!msg) 384 goto bail_out; 385 386 /* Check CNAME chain (if any) 387 * This is part of tomsg above; no need to check now. */ 388 389 /* Check security status of the cached answer. 390 * tomsg above has a subset of these checks, so we are leaving 391 * these as is. 392 * In case of bogus or revalidation we don't care to reply here. */ 393 if(must_validate && (msg->rep->security == sec_status_bogus || 394 msg->rep->security == sec_status_secure_sentinel_fail)) { 395 verbose(VERB_ALGO, "Serve expired: bogus answer found in cache"); 396 goto bail_out; 397 } else if(msg->rep->security == sec_status_unchecked && must_validate) { 398 verbose(VERB_ALGO, "Serve expired: unchecked entry needs " 399 "validation"); 400 goto bail_out; /* need to validate cache entry first */ 401 } else if(msg->rep->security == sec_status_secure && 402 !reply_all_rrsets_secure(msg->rep) && must_validate) { 403 verbose(VERB_ALGO, "Serve expired: secure entry" 404 " changed status"); 405 goto bail_out; /* rrset changed, re-verify */ 406 } 407 408 lock_rw_unlock(&e->lock); 409 return msg; 410 411 bail_out: 412 lock_rw_unlock(&e->lock); 413 return NULL; 414 } 415 416 417 /** Init the serve expired data structure */ 418 static int 419 mesh_serve_expired_init(struct mesh_state* mstate, int timeout) 420 { 421 struct timeval t; 422 423 /* Create serve_expired_data if not there yet */ 424 if(!mstate->s.serve_expired_data) { 425 mstate->s.serve_expired_data = (struct serve_expired_data*) 426 regional_alloc_zero( 427 mstate->s.region, sizeof(struct serve_expired_data)); 428 if(!mstate->s.serve_expired_data) 429 return 0; 430 } 431 432 /* Don't overwrite the function if already set */ 433 mstate->s.serve_expired_data->get_cached_answer = 434 mstate->s.serve_expired_data->get_cached_answer? 435 mstate->s.serve_expired_data->get_cached_answer: 436 mesh_serve_expired_lookup; 437 438 /* In case this timer already popped, start it again */ 439 if(!mstate->s.serve_expired_data->timer) { 440 mstate->s.serve_expired_data->timer = comm_timer_create( 441 mstate->s.env->worker_base, mesh_serve_expired_callback, mstate); 442 if(!mstate->s.serve_expired_data->timer) 443 return 0; 444 #ifndef S_SPLINT_S 445 t.tv_sec = timeout/1000; 446 t.tv_usec = (timeout%1000)*1000; 447 #endif 448 comm_timer_set(mstate->s.serve_expired_data->timer, &t); 449 } 450 return 1; 451 } 452 453 void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo, 454 struct respip_client_info* cinfo, uint16_t qflags, 455 struct edns_data* edns, struct comm_reply* rep, uint16_t qid) 456 { 457 struct mesh_state* s = NULL; 458 int unique = unique_mesh_state(edns->opt_list, mesh->env); 459 int was_detached = 0; 460 int was_noreply = 0; 461 int added = 0; 462 int timeout = mesh->env->cfg->serve_expired? 463 mesh->env->cfg->serve_expired_client_timeout:0; 464 struct sldns_buffer* r_buffer = rep->c->buffer; 465 if(rep->c->tcp_req_info) { 466 r_buffer = rep->c->tcp_req_info->spool_buffer; 467 } 468 if(!unique) 469 s = mesh_area_find(mesh, cinfo, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0); 470 /* does this create a new reply state? */ 471 if(!s || s->list_select == mesh_no_list) { 472 if(!mesh_make_new_space(mesh, rep->c->buffer)) { 473 verbose(VERB_ALGO, "Too many queries. dropping " 474 "incoming query."); 475 comm_point_drop_reply(rep); 476 mesh->stats_dropped++; 477 return; 478 } 479 /* for this new reply state, the reply address is free, 480 * so the limit of reply addresses does not stop reply states*/ 481 } else { 482 /* protect our memory usage from storing reply addresses */ 483 if(mesh->num_reply_addrs > mesh->max_reply_states*16) { 484 verbose(VERB_ALGO, "Too many requests queued. " 485 "dropping incoming query."); 486 comm_point_drop_reply(rep); 487 mesh->stats_dropped++; 488 return; 489 } 490 } 491 /* see if it already exists, if not, create one */ 492 if(!s) { 493 #ifdef UNBOUND_DEBUG 494 struct rbnode_type* n; 495 #endif 496 s = mesh_state_create(mesh->env, qinfo, cinfo, 497 qflags&(BIT_RD|BIT_CD), 0, 0); 498 if(!s) { 499 log_err("mesh_state_create: out of memory; SERVFAIL"); 500 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, NULL, 501 LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch)) 502 edns->opt_list = NULL; 503 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 504 qinfo, qid, qflags, edns); 505 comm_point_send_reply(rep); 506 return; 507 } 508 if(unique) 509 mesh_state_make_unique(s); 510 /* copy the edns options we got from the front */ 511 if(edns->opt_list) { 512 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list, 513 s->s.region); 514 if(!s->s.edns_opts_front_in) { 515 log_err("mesh_state_create: out of memory; SERVFAIL"); 516 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, 517 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch)) 518 edns->opt_list = NULL; 519 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 520 qinfo, qid, qflags, edns); 521 comm_point_send_reply(rep); 522 return; 523 } 524 } 525 526 #ifdef UNBOUND_DEBUG 527 n = 528 #else 529 (void) 530 #endif 531 rbtree_insert(&mesh->all, &s->node); 532 log_assert(n != NULL); 533 /* set detached (it is now) */ 534 mesh->num_detached_states++; 535 added = 1; 536 } 537 if(!s->reply_list && !s->cb_list) { 538 was_noreply = 1; 539 if(s->super_set.count == 0) { 540 was_detached = 1; 541 } 542 } 543 /* add reply to s */ 544 if(!mesh_state_add_reply(s, edns, rep, qid, qflags, qinfo)) { 545 log_err("mesh_new_client: out of memory; SERVFAIL"); 546 goto servfail_mem; 547 } 548 if(rep->c->tcp_req_info) { 549 if(!tcp_req_info_add_meshstate(rep->c->tcp_req_info, mesh, s)) { 550 log_err("mesh_new_client: out of memory add tcpreqinfo"); 551 goto servfail_mem; 552 } 553 } 554 if(rep->c->use_h2) { 555 http2_stream_add_meshstate(rep->c->h2_stream, mesh, s); 556 } 557 /* add serve expired timer if required and not already there */ 558 if(timeout && !mesh_serve_expired_init(s, timeout)) { 559 log_err("mesh_new_client: out of memory initializing serve expired"); 560 goto servfail_mem; 561 } 562 /* update statistics */ 563 if(was_detached) { 564 log_assert(mesh->num_detached_states > 0); 565 mesh->num_detached_states--; 566 } 567 if(was_noreply) { 568 mesh->num_reply_states ++; 569 } 570 mesh->num_reply_addrs++; 571 if(s->list_select == mesh_no_list) { 572 /* move to either the forever or the jostle_list */ 573 if(mesh->num_forever_states < mesh->max_forever_states) { 574 mesh->num_forever_states ++; 575 mesh_list_insert(s, &mesh->forever_first, 576 &mesh->forever_last); 577 s->list_select = mesh_forever_list; 578 } else { 579 mesh_list_insert(s, &mesh->jostle_first, 580 &mesh->jostle_last); 581 s->list_select = mesh_jostle_list; 582 } 583 } 584 if(added) 585 mesh_run(mesh, s, module_event_new, NULL); 586 return; 587 588 servfail_mem: 589 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, &s->s, 590 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch)) 591 edns->opt_list = NULL; 592 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 593 qinfo, qid, qflags, edns); 594 comm_point_send_reply(rep); 595 if(added) 596 mesh_state_delete(&s->s); 597 return; 598 } 599 600 int 601 mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo, 602 uint16_t qflags, struct edns_data* edns, sldns_buffer* buf, 603 uint16_t qid, mesh_cb_func_type cb, void* cb_arg) 604 { 605 struct mesh_state* s = NULL; 606 int unique = unique_mesh_state(edns->opt_list, mesh->env); 607 int timeout = mesh->env->cfg->serve_expired? 608 mesh->env->cfg->serve_expired_client_timeout:0; 609 int was_detached = 0; 610 int was_noreply = 0; 611 int added = 0; 612 if(!unique) 613 s = mesh_area_find(mesh, NULL, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0); 614 615 /* there are no limits on the number of callbacks */ 616 617 /* see if it already exists, if not, create one */ 618 if(!s) { 619 #ifdef UNBOUND_DEBUG 620 struct rbnode_type* n; 621 #endif 622 s = mesh_state_create(mesh->env, qinfo, NULL, 623 qflags&(BIT_RD|BIT_CD), 0, 0); 624 if(!s) { 625 return 0; 626 } 627 if(unique) 628 mesh_state_make_unique(s); 629 if(edns->opt_list) { 630 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list, 631 s->s.region); 632 if(!s->s.edns_opts_front_in) { 633 return 0; 634 } 635 } 636 #ifdef UNBOUND_DEBUG 637 n = 638 #else 639 (void) 640 #endif 641 rbtree_insert(&mesh->all, &s->node); 642 log_assert(n != NULL); 643 /* set detached (it is now) */ 644 mesh->num_detached_states++; 645 added = 1; 646 } 647 if(!s->reply_list && !s->cb_list) { 648 was_noreply = 1; 649 if(s->super_set.count == 0) { 650 was_detached = 1; 651 } 652 } 653 /* add reply to s */ 654 if(!mesh_state_add_cb(s, edns, buf, cb, cb_arg, qid, qflags)) { 655 if(added) 656 mesh_state_delete(&s->s); 657 return 0; 658 } 659 /* add serve expired timer if not already there */ 660 if(timeout && !mesh_serve_expired_init(s, timeout)) { 661 return 0; 662 } 663 /* update statistics */ 664 if(was_detached) { 665 log_assert(mesh->num_detached_states > 0); 666 mesh->num_detached_states--; 667 } 668 if(was_noreply) { 669 mesh->num_reply_states ++; 670 } 671 mesh->num_reply_addrs++; 672 if(added) 673 mesh_run(mesh, s, module_event_new, NULL); 674 return 1; 675 } 676 677 /* Internal backend routine of mesh_new_prefetch(). It takes one additional 678 * parameter, 'run', which controls whether to run the prefetch state 679 * immediately. When this function is called internally 'run' could be 680 * 0 (false), in which case the new state is only made runnable so it 681 * will not be run recursively on top of the current state. */ 682 static void mesh_schedule_prefetch(struct mesh_area* mesh, 683 struct query_info* qinfo, uint16_t qflags, time_t leeway, int run) 684 { 685 struct mesh_state* s = mesh_area_find(mesh, NULL, qinfo, 686 qflags&(BIT_RD|BIT_CD), 0, 0); 687 #ifdef UNBOUND_DEBUG 688 struct rbnode_type* n; 689 #endif 690 /* already exists, and for a different purpose perhaps. 691 * if mesh_no_list, keep it that way. */ 692 if(s) { 693 /* make it ignore the cache from now on */ 694 if(!s->s.blacklist) 695 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 696 if(s->s.prefetch_leeway < leeway) 697 s->s.prefetch_leeway = leeway; 698 return; 699 } 700 if(!mesh_make_new_space(mesh, NULL)) { 701 verbose(VERB_ALGO, "Too many queries. dropped prefetch."); 702 mesh->stats_dropped ++; 703 return; 704 } 705 706 s = mesh_state_create(mesh->env, qinfo, NULL, 707 qflags&(BIT_RD|BIT_CD), 0, 0); 708 if(!s) { 709 log_err("prefetch mesh_state_create: out of memory"); 710 return; 711 } 712 #ifdef UNBOUND_DEBUG 713 n = 714 #else 715 (void) 716 #endif 717 rbtree_insert(&mesh->all, &s->node); 718 log_assert(n != NULL); 719 /* set detached (it is now) */ 720 mesh->num_detached_states++; 721 /* make it ignore the cache */ 722 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 723 s->s.prefetch_leeway = leeway; 724 725 if(s->list_select == mesh_no_list) { 726 /* move to either the forever or the jostle_list */ 727 if(mesh->num_forever_states < mesh->max_forever_states) { 728 mesh->num_forever_states ++; 729 mesh_list_insert(s, &mesh->forever_first, 730 &mesh->forever_last); 731 s->list_select = mesh_forever_list; 732 } else { 733 mesh_list_insert(s, &mesh->jostle_first, 734 &mesh->jostle_last); 735 s->list_select = mesh_jostle_list; 736 } 737 } 738 739 if(!run) { 740 #ifdef UNBOUND_DEBUG 741 n = 742 #else 743 (void) 744 #endif 745 rbtree_insert(&mesh->run, &s->run_node); 746 log_assert(n != NULL); 747 return; 748 } 749 750 mesh_run(mesh, s, module_event_new, NULL); 751 } 752 753 void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo, 754 uint16_t qflags, time_t leeway) 755 { 756 mesh_schedule_prefetch(mesh, qinfo, qflags, leeway, 1); 757 } 758 759 void mesh_report_reply(struct mesh_area* mesh, struct outbound_entry* e, 760 struct comm_reply* reply, int what) 761 { 762 enum module_ev event = module_event_reply; 763 e->qstate->reply = reply; 764 if(what != NETEVENT_NOERROR) { 765 event = module_event_noreply; 766 if(what == NETEVENT_CAPSFAIL) 767 event = module_event_capsfail; 768 } 769 mesh_run(mesh, e->qstate->mesh_info, event, e); 770 } 771 772 struct mesh_state* 773 mesh_state_create(struct module_env* env, struct query_info* qinfo, 774 struct respip_client_info* cinfo, uint16_t qflags, int prime, 775 int valrec) 776 { 777 struct regional* region = alloc_reg_obtain(env->alloc); 778 struct mesh_state* mstate; 779 int i; 780 if(!region) 781 return NULL; 782 mstate = (struct mesh_state*)regional_alloc(region, 783 sizeof(struct mesh_state)); 784 if(!mstate) { 785 alloc_reg_release(env->alloc, region); 786 return NULL; 787 } 788 memset(mstate, 0, sizeof(*mstate)); 789 mstate->node = *RBTREE_NULL; 790 mstate->run_node = *RBTREE_NULL; 791 mstate->node.key = mstate; 792 mstate->run_node.key = mstate; 793 mstate->reply_list = NULL; 794 mstate->list_select = mesh_no_list; 795 mstate->replies_sent = 0; 796 rbtree_init(&mstate->super_set, &mesh_state_ref_compare); 797 rbtree_init(&mstate->sub_set, &mesh_state_ref_compare); 798 mstate->num_activated = 0; 799 mstate->unique = NULL; 800 /* init module qstate */ 801 mstate->s.qinfo.qtype = qinfo->qtype; 802 mstate->s.qinfo.qclass = qinfo->qclass; 803 mstate->s.qinfo.local_alias = NULL; 804 mstate->s.qinfo.qname_len = qinfo->qname_len; 805 mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname, 806 qinfo->qname_len); 807 if(!mstate->s.qinfo.qname) { 808 alloc_reg_release(env->alloc, region); 809 return NULL; 810 } 811 if(cinfo) { 812 mstate->s.client_info = regional_alloc_init(region, cinfo, 813 sizeof(*cinfo)); 814 if(!mstate->s.client_info) { 815 alloc_reg_release(env->alloc, region); 816 return NULL; 817 } 818 } 819 /* remove all weird bits from qflags */ 820 mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD)); 821 mstate->s.is_priming = prime; 822 mstate->s.is_valrec = valrec; 823 mstate->s.reply = NULL; 824 mstate->s.region = region; 825 mstate->s.curmod = 0; 826 mstate->s.return_msg = 0; 827 mstate->s.return_rcode = LDNS_RCODE_NOERROR; 828 mstate->s.env = env; 829 mstate->s.mesh_info = mstate; 830 mstate->s.prefetch_leeway = 0; 831 mstate->s.serve_expired_data = NULL; 832 mstate->s.no_cache_lookup = 0; 833 mstate->s.no_cache_store = 0; 834 mstate->s.need_refetch = 0; 835 mstate->s.was_ratelimited = 0; 836 837 /* init modules */ 838 for(i=0; i<env->mesh->mods.num; i++) { 839 mstate->s.minfo[i] = NULL; 840 mstate->s.ext_state[i] = module_state_initial; 841 } 842 /* init edns option lists */ 843 mstate->s.edns_opts_front_in = NULL; 844 mstate->s.edns_opts_back_out = NULL; 845 mstate->s.edns_opts_back_in = NULL; 846 mstate->s.edns_opts_front_out = NULL; 847 848 return mstate; 849 } 850 851 int 852 mesh_state_is_unique(struct mesh_state* mstate) 853 { 854 return mstate->unique != NULL; 855 } 856 857 void 858 mesh_state_make_unique(struct mesh_state* mstate) 859 { 860 mstate->unique = mstate; 861 } 862 863 void 864 mesh_state_cleanup(struct mesh_state* mstate) 865 { 866 struct mesh_area* mesh; 867 int i; 868 if(!mstate) 869 return; 870 mesh = mstate->s.env->mesh; 871 /* Stop and delete the serve expired timer */ 872 if(mstate->s.serve_expired_data && mstate->s.serve_expired_data->timer) { 873 comm_timer_delete(mstate->s.serve_expired_data->timer); 874 mstate->s.serve_expired_data->timer = NULL; 875 } 876 /* drop unsent replies */ 877 if(!mstate->replies_sent) { 878 struct mesh_reply* rep = mstate->reply_list; 879 struct mesh_cb* cb; 880 /* in tcp_req_info, the mstates linked are removed, but 881 * the reply_list is now NULL, so the remove-from-empty-list 882 * takes no time and also it does not do the mesh accounting */ 883 mstate->reply_list = NULL; 884 for(; rep; rep=rep->next) { 885 comm_point_drop_reply(&rep->query_reply); 886 log_assert(mesh->num_reply_addrs > 0); 887 mesh->num_reply_addrs--; 888 } 889 while((cb = mstate->cb_list)!=NULL) { 890 mstate->cb_list = cb->next; 891 fptr_ok(fptr_whitelist_mesh_cb(cb->cb)); 892 (*cb->cb)(cb->cb_arg, LDNS_RCODE_SERVFAIL, NULL, 893 sec_status_unchecked, NULL, 0); 894 log_assert(mesh->num_reply_addrs > 0); 895 mesh->num_reply_addrs--; 896 } 897 } 898 899 /* de-init modules */ 900 for(i=0; i<mesh->mods.num; i++) { 901 fptr_ok(fptr_whitelist_mod_clear(mesh->mods.mod[i]->clear)); 902 (*mesh->mods.mod[i]->clear)(&mstate->s, i); 903 mstate->s.minfo[i] = NULL; 904 mstate->s.ext_state[i] = module_finished; 905 } 906 alloc_reg_release(mstate->s.env->alloc, mstate->s.region); 907 } 908 909 void 910 mesh_state_delete(struct module_qstate* qstate) 911 { 912 struct mesh_area* mesh; 913 struct mesh_state_ref* super, ref; 914 struct mesh_state* mstate; 915 if(!qstate) 916 return; 917 mstate = qstate->mesh_info; 918 mesh = mstate->s.env->mesh; 919 mesh_detach_subs(&mstate->s); 920 if(mstate->list_select == mesh_forever_list) { 921 mesh->num_forever_states --; 922 mesh_list_remove(mstate, &mesh->forever_first, 923 &mesh->forever_last); 924 } else if(mstate->list_select == mesh_jostle_list) { 925 mesh_list_remove(mstate, &mesh->jostle_first, 926 &mesh->jostle_last); 927 } 928 if(!mstate->reply_list && !mstate->cb_list 929 && mstate->super_set.count == 0) { 930 log_assert(mesh->num_detached_states > 0); 931 mesh->num_detached_states--; 932 } 933 if(mstate->reply_list || mstate->cb_list) { 934 log_assert(mesh->num_reply_states > 0); 935 mesh->num_reply_states--; 936 } 937 ref.node.key = &ref; 938 ref.s = mstate; 939 RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) { 940 (void)rbtree_delete(&super->s->sub_set, &ref); 941 } 942 (void)rbtree_delete(&mesh->run, mstate); 943 (void)rbtree_delete(&mesh->all, mstate); 944 mesh_state_cleanup(mstate); 945 } 946 947 /** helper recursive rbtree find routine */ 948 static int 949 find_in_subsub(struct mesh_state* m, struct mesh_state* tofind, size_t *c) 950 { 951 struct mesh_state_ref* r; 952 if((*c)++ > MESH_MAX_SUBSUB) 953 return 1; 954 RBTREE_FOR(r, struct mesh_state_ref*, &m->sub_set) { 955 if(r->s == tofind || find_in_subsub(r->s, tofind, c)) 956 return 1; 957 } 958 return 0; 959 } 960 961 /** find cycle for already looked up mesh_state */ 962 static int 963 mesh_detect_cycle_found(struct module_qstate* qstate, struct mesh_state* dep_m) 964 { 965 struct mesh_state* cyc_m = qstate->mesh_info; 966 size_t counter = 0; 967 if(!dep_m) 968 return 0; 969 if(dep_m == cyc_m || find_in_subsub(dep_m, cyc_m, &counter)) { 970 if(counter > MESH_MAX_SUBSUB) 971 return 2; 972 return 1; 973 } 974 return 0; 975 } 976 977 void mesh_detach_subs(struct module_qstate* qstate) 978 { 979 struct mesh_area* mesh = qstate->env->mesh; 980 struct mesh_state_ref* ref, lookup; 981 #ifdef UNBOUND_DEBUG 982 struct rbnode_type* n; 983 #endif 984 lookup.node.key = &lookup; 985 lookup.s = qstate->mesh_info; 986 RBTREE_FOR(ref, struct mesh_state_ref*, &qstate->mesh_info->sub_set) { 987 #ifdef UNBOUND_DEBUG 988 n = 989 #else 990 (void) 991 #endif 992 rbtree_delete(&ref->s->super_set, &lookup); 993 log_assert(n != NULL); /* must have been present */ 994 if(!ref->s->reply_list && !ref->s->cb_list 995 && ref->s->super_set.count == 0) { 996 mesh->num_detached_states++; 997 log_assert(mesh->num_detached_states + 998 mesh->num_reply_states <= mesh->all.count); 999 } 1000 } 1001 rbtree_init(&qstate->mesh_info->sub_set, &mesh_state_ref_compare); 1002 } 1003 1004 int mesh_add_sub(struct module_qstate* qstate, struct query_info* qinfo, 1005 uint16_t qflags, int prime, int valrec, struct module_qstate** newq, 1006 struct mesh_state** sub) 1007 { 1008 /* find it, if not, create it */ 1009 struct mesh_area* mesh = qstate->env->mesh; 1010 *sub = mesh_area_find(mesh, NULL, qinfo, qflags, 1011 prime, valrec); 1012 if(mesh_detect_cycle_found(qstate, *sub)) { 1013 verbose(VERB_ALGO, "attach failed, cycle detected"); 1014 return 0; 1015 } 1016 if(!*sub) { 1017 #ifdef UNBOUND_DEBUG 1018 struct rbnode_type* n; 1019 #endif 1020 /* create a new one */ 1021 *sub = mesh_state_create(qstate->env, qinfo, NULL, qflags, prime, 1022 valrec); 1023 if(!*sub) { 1024 log_err("mesh_attach_sub: out of memory"); 1025 return 0; 1026 } 1027 #ifdef UNBOUND_DEBUG 1028 n = 1029 #else 1030 (void) 1031 #endif 1032 rbtree_insert(&mesh->all, &(*sub)->node); 1033 log_assert(n != NULL); 1034 /* set detached (it is now) */ 1035 mesh->num_detached_states++; 1036 /* set new query state to run */ 1037 #ifdef UNBOUND_DEBUG 1038 n = 1039 #else 1040 (void) 1041 #endif 1042 rbtree_insert(&mesh->run, &(*sub)->run_node); 1043 log_assert(n != NULL); 1044 *newq = &(*sub)->s; 1045 } else 1046 *newq = NULL; 1047 return 1; 1048 } 1049 1050 int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo, 1051 uint16_t qflags, int prime, int valrec, struct module_qstate** newq) 1052 { 1053 struct mesh_area* mesh = qstate->env->mesh; 1054 struct mesh_state* sub = NULL; 1055 int was_detached; 1056 if(!mesh_add_sub(qstate, qinfo, qflags, prime, valrec, newq, &sub)) 1057 return 0; 1058 was_detached = (sub->super_set.count == 0); 1059 if(!mesh_state_attachment(qstate->mesh_info, sub)) 1060 return 0; 1061 /* if it was a duplicate attachment, the count was not zero before */ 1062 if(!sub->reply_list && !sub->cb_list && was_detached && 1063 sub->super_set.count == 1) { 1064 /* it used to be detached, before this one got added */ 1065 log_assert(mesh->num_detached_states > 0); 1066 mesh->num_detached_states--; 1067 } 1068 /* *newq will be run when inited after the current module stops */ 1069 return 1; 1070 } 1071 1072 int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub) 1073 { 1074 #ifdef UNBOUND_DEBUG 1075 struct rbnode_type* n; 1076 #endif 1077 struct mesh_state_ref* subref; /* points to sub, inserted in super */ 1078 struct mesh_state_ref* superref; /* points to super, inserted in sub */ 1079 if( !(subref = regional_alloc(super->s.region, 1080 sizeof(struct mesh_state_ref))) || 1081 !(superref = regional_alloc(sub->s.region, 1082 sizeof(struct mesh_state_ref))) ) { 1083 log_err("mesh_state_attachment: out of memory"); 1084 return 0; 1085 } 1086 superref->node.key = superref; 1087 superref->s = super; 1088 subref->node.key = subref; 1089 subref->s = sub; 1090 if(!rbtree_insert(&sub->super_set, &superref->node)) { 1091 /* this should not happen, iterator and validator do not 1092 * attach subqueries that are identical. */ 1093 /* already attached, we are done, nothing todo. 1094 * since superref and subref already allocated in region, 1095 * we cannot free them */ 1096 return 1; 1097 } 1098 #ifdef UNBOUND_DEBUG 1099 n = 1100 #else 1101 (void) 1102 #endif 1103 rbtree_insert(&super->sub_set, &subref->node); 1104 log_assert(n != NULL); /* we checked above if statement, the reverse 1105 administration should not fail now, unless they are out of sync */ 1106 return 1; 1107 } 1108 1109 /** 1110 * callback results to mesh cb entry 1111 * @param m: mesh state to send it for. 1112 * @param rcode: if not 0, error code. 1113 * @param rep: reply to send (or NULL if rcode is set). 1114 * @param r: callback entry 1115 */ 1116 static void 1117 mesh_do_callback(struct mesh_state* m, int rcode, struct reply_info* rep, 1118 struct mesh_cb* r) 1119 { 1120 int secure; 1121 char* reason = NULL; 1122 int was_ratelimited = m->s.was_ratelimited; 1123 /* bogus messages are not made into servfail, sec_status passed 1124 * to the callback function */ 1125 if(rep && rep->security == sec_status_secure) 1126 secure = 1; 1127 else secure = 0; 1128 if(!rep && rcode == LDNS_RCODE_NOERROR) 1129 rcode = LDNS_RCODE_SERVFAIL; 1130 if(!rcode && (rep->security == sec_status_bogus || 1131 rep->security == sec_status_secure_sentinel_fail)) { 1132 if(!(reason = errinf_to_str_bogus(&m->s))) 1133 rcode = LDNS_RCODE_SERVFAIL; 1134 } 1135 /* send the reply */ 1136 if(rcode) { 1137 if(rcode == LDNS_RCODE_SERVFAIL) { 1138 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1139 rep, rcode, &r->edns, NULL, m->s.region)) 1140 r->edns.opt_list = NULL; 1141 } else { 1142 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode, 1143 &r->edns, NULL, m->s.region)) 1144 r->edns.opt_list = NULL; 1145 } 1146 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1147 (*r->cb)(r->cb_arg, rcode, r->buf, sec_status_unchecked, NULL, 1148 was_ratelimited); 1149 } else { 1150 size_t udp_size = r->edns.udp_size; 1151 sldns_buffer_clear(r->buf); 1152 r->edns.edns_version = EDNS_ADVERTISED_VERSION; 1153 r->edns.udp_size = EDNS_ADVERTISED_SIZE; 1154 r->edns.ext_rcode = 0; 1155 r->edns.bits &= EDNS_DO; 1156 1157 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, 1158 LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region) || 1159 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 1160 r->qflags, r->buf, 0, 1, 1161 m->s.env->scratch, udp_size, &r->edns, 1162 (int)(r->edns.bits & EDNS_DO), secure)) 1163 { 1164 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1165 (*r->cb)(r->cb_arg, LDNS_RCODE_SERVFAIL, r->buf, 1166 sec_status_unchecked, NULL, 0); 1167 } else { 1168 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1169 (*r->cb)(r->cb_arg, LDNS_RCODE_NOERROR, r->buf, 1170 rep->security, reason, was_ratelimited); 1171 } 1172 } 1173 free(reason); 1174 log_assert(m->s.env->mesh->num_reply_addrs > 0); 1175 m->s.env->mesh->num_reply_addrs--; 1176 } 1177 1178 /** 1179 * Send reply to mesh reply entry 1180 * @param m: mesh state to send it for. 1181 * @param rcode: if not 0, error code. 1182 * @param rep: reply to send (or NULL if rcode is set). 1183 * @param r: reply entry 1184 * @param r_buffer: buffer to use for reply entry. 1185 * @param prev: previous reply, already has its answer encoded in buffer. 1186 * @param prev_buffer: buffer for previous reply. 1187 */ 1188 static void 1189 mesh_send_reply(struct mesh_state* m, int rcode, struct reply_info* rep, 1190 struct mesh_reply* r, struct sldns_buffer* r_buffer, 1191 struct mesh_reply* prev, struct sldns_buffer* prev_buffer) 1192 { 1193 struct timeval end_time; 1194 struct timeval duration; 1195 int secure; 1196 /* Copy the client's EDNS for later restore, to make sure the edns 1197 * compare is with the correct edns options. */ 1198 struct edns_data edns_bak = r->edns; 1199 /* examine security status */ 1200 if(m->s.env->need_to_validate && (!(r->qflags&BIT_CD) || 1201 m->s.env->cfg->ignore_cd) && rep && 1202 (rep->security <= sec_status_bogus || 1203 rep->security == sec_status_secure_sentinel_fail)) { 1204 rcode = LDNS_RCODE_SERVFAIL; 1205 if(m->s.env->cfg->stat_extended) 1206 m->s.env->mesh->ans_bogus++; 1207 } 1208 if(rep && rep->security == sec_status_secure) 1209 secure = 1; 1210 else secure = 0; 1211 if(!rep && rcode == LDNS_RCODE_NOERROR) 1212 rcode = LDNS_RCODE_SERVFAIL; 1213 if(r->query_reply.c->use_h2) { 1214 r->query_reply.c->h2_stream = r->h2_stream; 1215 /* Mesh reply won't exist for long anymore. Make it impossible 1216 * for HTTP/2 stream to refer to mesh state, in case 1217 * connection gets cleanup before HTTP/2 stream close. */ 1218 r->h2_stream->mesh_state = NULL; 1219 } 1220 /* send the reply */ 1221 /* We don't reuse the encoded answer if either the previous or current 1222 * response has a local alias. We could compare the alias records 1223 * and still reuse the previous answer if they are the same, but that 1224 * would be complicated and error prone for the relatively minor case. 1225 * So we err on the side of safety. */ 1226 if(prev && prev_buffer && prev->qflags == r->qflags && 1227 !prev->local_alias && !r->local_alias && 1228 prev->edns.edns_present == r->edns.edns_present && 1229 prev->edns.bits == r->edns.bits && 1230 prev->edns.udp_size == r->edns.udp_size && 1231 edns_opt_list_compare(prev->edns.opt_list, r->edns.opt_list) 1232 == 0) { 1233 /* if the previous reply is identical to this one, fix ID */ 1234 if(prev_buffer != r_buffer) 1235 sldns_buffer_copy(r_buffer, prev_buffer); 1236 sldns_buffer_write_at(r_buffer, 0, &r->qid, sizeof(uint16_t)); 1237 sldns_buffer_write_at(r_buffer, 12, r->qname, 1238 m->s.qinfo.qname_len); 1239 comm_point_send_reply(&r->query_reply); 1240 } else if(rcode) { 1241 m->s.qinfo.qname = r->qname; 1242 m->s.qinfo.local_alias = r->local_alias; 1243 if(rcode == LDNS_RCODE_SERVFAIL) { 1244 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1245 rep, rcode, &r->edns, NULL, m->s.region)) 1246 r->edns.opt_list = NULL; 1247 } else { 1248 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode, 1249 &r->edns, NULL, m->s.region)) 1250 r->edns.opt_list = NULL; 1251 } 1252 error_encode(r_buffer, rcode, &m->s.qinfo, r->qid, 1253 r->qflags, &r->edns); 1254 comm_point_send_reply(&r->query_reply); 1255 } else { 1256 size_t udp_size = r->edns.udp_size; 1257 r->edns.edns_version = EDNS_ADVERTISED_VERSION; 1258 r->edns.udp_size = EDNS_ADVERTISED_SIZE; 1259 r->edns.ext_rcode = 0; 1260 r->edns.bits &= EDNS_DO; 1261 m->s.qinfo.qname = r->qname; 1262 m->s.qinfo.local_alias = r->local_alias; 1263 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, 1264 LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region) || 1265 !apply_edns_options(&r->edns, &edns_bak, 1266 m->s.env->cfg, r->query_reply.c, 1267 m->s.region) || 1268 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 1269 r->qflags, r_buffer, 0, 1, m->s.env->scratch, 1270 udp_size, &r->edns, (int)(r->edns.bits & EDNS_DO), 1271 secure)) 1272 { 1273 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1274 rep, LDNS_RCODE_SERVFAIL, &r->edns, NULL, m->s.region)) 1275 r->edns.opt_list = NULL; 1276 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 1277 &m->s.qinfo, r->qid, r->qflags, &r->edns); 1278 } 1279 r->edns = edns_bak; 1280 comm_point_send_reply(&r->query_reply); 1281 } 1282 /* account */ 1283 log_assert(m->s.env->mesh->num_reply_addrs > 0); 1284 m->s.env->mesh->num_reply_addrs--; 1285 end_time = *m->s.env->now_tv; 1286 timeval_subtract(&duration, &end_time, &r->start_time); 1287 verbose(VERB_ALGO, "query took " ARG_LL "d.%6.6d sec", 1288 (long long)duration.tv_sec, (int)duration.tv_usec); 1289 m->s.env->mesh->replies_sent++; 1290 timeval_add(&m->s.env->mesh->replies_sum_wait, &duration); 1291 timehist_insert(m->s.env->mesh->histogram, &duration); 1292 if(m->s.env->cfg->stat_extended) { 1293 uint16_t rc = FLAGS_GET_RCODE(sldns_buffer_read_u16_at( 1294 r_buffer, 2)); 1295 if(secure) m->s.env->mesh->ans_secure++; 1296 m->s.env->mesh->ans_rcode[ rc ] ++; 1297 if(rc == 0 && LDNS_ANCOUNT(sldns_buffer_begin(r_buffer)) == 0) 1298 m->s.env->mesh->ans_nodata++; 1299 } 1300 /* Log reply sent */ 1301 if(m->s.env->cfg->log_replies) { 1302 log_reply_info(NO_VERBOSE, &m->s.qinfo, &r->query_reply.addr, 1303 r->query_reply.addrlen, duration, 0, r_buffer); 1304 } 1305 } 1306 1307 void mesh_query_done(struct mesh_state* mstate) 1308 { 1309 struct mesh_reply* r; 1310 struct mesh_reply* prev = NULL; 1311 struct sldns_buffer* prev_buffer = NULL; 1312 struct mesh_cb* c; 1313 struct reply_info* rep = (mstate->s.return_msg? 1314 mstate->s.return_msg->rep:NULL); 1315 /* No need for the serve expired timer anymore; we are going to reply. */ 1316 if(mstate->s.serve_expired_data) { 1317 comm_timer_delete(mstate->s.serve_expired_data->timer); 1318 mstate->s.serve_expired_data->timer = NULL; 1319 } 1320 if(mstate->s.return_rcode == LDNS_RCODE_SERVFAIL || 1321 (rep && FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_SERVFAIL)) { 1322 /* we are SERVFAILing; check for expired asnwer here */ 1323 mesh_serve_expired_callback(mstate); 1324 if((mstate->reply_list || mstate->cb_list) 1325 && mstate->s.env->cfg->log_servfail 1326 && !mstate->s.env->cfg->val_log_squelch) { 1327 char* err = errinf_to_str_servfail(&mstate->s); 1328 if(err) 1329 log_err("%s", err); 1330 free(err); 1331 } 1332 } 1333 for(r = mstate->reply_list; r; r = r->next) { 1334 /* if a response-ip address block has been stored the 1335 * information should be logged for each client. */ 1336 if(mstate->s.respip_action_info && 1337 mstate->s.respip_action_info->addrinfo) { 1338 respip_inform_print(mstate->s.respip_action_info, 1339 r->qname, mstate->s.qinfo.qtype, 1340 mstate->s.qinfo.qclass, r->local_alias, 1341 &r->query_reply); 1342 if(mstate->s.env->cfg->stat_extended && 1343 mstate->s.respip_action_info->rpz_used) { 1344 if(mstate->s.respip_action_info->rpz_disabled) 1345 mstate->s.env->mesh->rpz_action[RPZ_DISABLED_ACTION]++; 1346 if(mstate->s.respip_action_info->rpz_cname_override) 1347 mstate->s.env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION]++; 1348 else 1349 mstate->s.env->mesh->rpz_action[respip_action_to_rpz_action( 1350 mstate->s.respip_action_info->action)]++; 1351 } 1352 } 1353 1354 /* if this query is determined to be dropped during the 1355 * mesh processing, this is the point to take that action. */ 1356 if(mstate->s.is_drop) { 1357 /* briefly set the reply_list to NULL, so that the 1358 * tcp req info cleanup routine that calls the mesh 1359 * to deregister the meshstate for it is not done 1360 * because the list is NULL and also accounting is not 1361 * done there, but instead we do that here. */ 1362 struct mesh_reply* reply_list = mstate->reply_list; 1363 mstate->reply_list = NULL; 1364 comm_point_drop_reply(&r->query_reply); 1365 mstate->reply_list = reply_list; 1366 } else { 1367 struct sldns_buffer* r_buffer = r->query_reply.c->buffer; 1368 struct mesh_reply* rlist = mstate->reply_list; 1369 if(r->query_reply.c->tcp_req_info) { 1370 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer; 1371 prev_buffer = NULL; 1372 } 1373 /* briefly set the replylist to null in case the 1374 * meshsendreply calls tcpreqinfo sendreply that 1375 * comm_point_drops because of size, and then the 1376 * null stops the mesh state remove and thus 1377 * reply_list modification and accounting */ 1378 mstate->reply_list = NULL; 1379 mesh_send_reply(mstate, mstate->s.return_rcode, rep, 1380 r, r_buffer, prev, prev_buffer); 1381 mstate->reply_list = rlist; 1382 if(r->query_reply.c->tcp_req_info) { 1383 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate); 1384 r_buffer = NULL; 1385 } 1386 prev = r; 1387 prev_buffer = r_buffer; 1388 } 1389 } 1390 if(mstate->reply_list) { 1391 mstate->reply_list = NULL; 1392 if(!mstate->reply_list && !mstate->cb_list) { 1393 /* was a reply state, not anymore */ 1394 log_assert(mstate->s.env->mesh->num_reply_states > 0); 1395 mstate->s.env->mesh->num_reply_states--; 1396 } 1397 if(!mstate->reply_list && !mstate->cb_list && 1398 mstate->super_set.count == 0) 1399 mstate->s.env->mesh->num_detached_states++; 1400 } 1401 mstate->replies_sent = 1; 1402 while((c = mstate->cb_list) != NULL) { 1403 /* take this cb off the list; so that the list can be 1404 * changed, eg. by adds from the callback routine */ 1405 if(!mstate->reply_list && mstate->cb_list && !c->next) { 1406 /* was a reply state, not anymore */ 1407 log_assert(mstate->s.env->mesh->num_reply_states > 0); 1408 mstate->s.env->mesh->num_reply_states--; 1409 } 1410 mstate->cb_list = c->next; 1411 if(!mstate->reply_list && !mstate->cb_list && 1412 mstate->super_set.count == 0) 1413 mstate->s.env->mesh->num_detached_states++; 1414 mesh_do_callback(mstate, mstate->s.return_rcode, rep, c); 1415 } 1416 } 1417 1418 void mesh_walk_supers(struct mesh_area* mesh, struct mesh_state* mstate) 1419 { 1420 struct mesh_state_ref* ref; 1421 RBTREE_FOR(ref, struct mesh_state_ref*, &mstate->super_set) 1422 { 1423 /* make super runnable */ 1424 (void)rbtree_insert(&mesh->run, &ref->s->run_node); 1425 /* callback the function to inform super of result */ 1426 fptr_ok(fptr_whitelist_mod_inform_super( 1427 mesh->mods.mod[ref->s->s.curmod]->inform_super)); 1428 (*mesh->mods.mod[ref->s->s.curmod]->inform_super)(&mstate->s, 1429 ref->s->s.curmod, &ref->s->s); 1430 /* copy state that is always relevant to super */ 1431 copy_state_to_super(&mstate->s, ref->s->s.curmod, &ref->s->s); 1432 } 1433 } 1434 1435 struct mesh_state* mesh_area_find(struct mesh_area* mesh, 1436 struct respip_client_info* cinfo, struct query_info* qinfo, 1437 uint16_t qflags, int prime, int valrec) 1438 { 1439 struct mesh_state key; 1440 struct mesh_state* result; 1441 1442 key.node.key = &key; 1443 key.s.is_priming = prime; 1444 key.s.is_valrec = valrec; 1445 key.s.qinfo = *qinfo; 1446 key.s.query_flags = qflags; 1447 /* We are searching for a similar mesh state when we DO want to 1448 * aggregate the state. Thus unique is set to NULL. (default when we 1449 * desire aggregation).*/ 1450 key.unique = NULL; 1451 key.s.client_info = cinfo; 1452 1453 result = (struct mesh_state*)rbtree_search(&mesh->all, &key); 1454 return result; 1455 } 1456 1457 int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns, 1458 sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg, 1459 uint16_t qid, uint16_t qflags) 1460 { 1461 struct mesh_cb* r = regional_alloc(s->s.region, 1462 sizeof(struct mesh_cb)); 1463 if(!r) 1464 return 0; 1465 r->buf = buf; 1466 log_assert(fptr_whitelist_mesh_cb(cb)); /* early failure ifmissing*/ 1467 r->cb = cb; 1468 r->cb_arg = cb_arg; 1469 r->edns = *edns; 1470 if(edns->opt_list) { 1471 r->edns.opt_list = edns_opt_copy_region(edns->opt_list, 1472 s->s.region); 1473 if(!r->edns.opt_list) 1474 return 0; 1475 } 1476 r->qid = qid; 1477 r->qflags = qflags; 1478 r->next = s->cb_list; 1479 s->cb_list = r; 1480 return 1; 1481 1482 } 1483 1484 int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns, 1485 struct comm_reply* rep, uint16_t qid, uint16_t qflags, 1486 const struct query_info* qinfo) 1487 { 1488 struct mesh_reply* r = regional_alloc(s->s.region, 1489 sizeof(struct mesh_reply)); 1490 if(!r) 1491 return 0; 1492 r->query_reply = *rep; 1493 r->edns = *edns; 1494 if(edns->opt_list) { 1495 r->edns.opt_list = edns_opt_copy_region(edns->opt_list, 1496 s->s.region); 1497 if(!r->edns.opt_list) 1498 return 0; 1499 } 1500 r->qid = qid; 1501 r->qflags = qflags; 1502 r->start_time = *s->s.env->now_tv; 1503 r->next = s->reply_list; 1504 r->qname = regional_alloc_init(s->s.region, qinfo->qname, 1505 s->s.qinfo.qname_len); 1506 if(!r->qname) 1507 return 0; 1508 if(rep->c->use_h2) 1509 r->h2_stream = rep->c->h2_stream; 1510 1511 /* Data related to local alias stored in 'qinfo' (if any) is ephemeral 1512 * and can be different for different original queries (even if the 1513 * replaced query name is the same). So we need to make a deep copy 1514 * and store the copy for each reply info. */ 1515 if(qinfo->local_alias) { 1516 struct packed_rrset_data* d; 1517 struct packed_rrset_data* dsrc; 1518 r->local_alias = regional_alloc_zero(s->s.region, 1519 sizeof(*qinfo->local_alias)); 1520 if(!r->local_alias) 1521 return 0; 1522 r->local_alias->rrset = regional_alloc_init(s->s.region, 1523 qinfo->local_alias->rrset, 1524 sizeof(*qinfo->local_alias->rrset)); 1525 if(!r->local_alias->rrset) 1526 return 0; 1527 dsrc = qinfo->local_alias->rrset->entry.data; 1528 1529 /* In the current implementation, a local alias must be 1530 * a single CNAME RR (see worker_handle_request()). */ 1531 log_assert(!qinfo->local_alias->next && dsrc->count == 1 && 1532 qinfo->local_alias->rrset->rk.type == 1533 htons(LDNS_RR_TYPE_CNAME)); 1534 /* we should make a local copy for the owner name of 1535 * the RRset */ 1536 r->local_alias->rrset->rk.dname_len = 1537 qinfo->local_alias->rrset->rk.dname_len; 1538 r->local_alias->rrset->rk.dname = regional_alloc_init( 1539 s->s.region, qinfo->local_alias->rrset->rk.dname, 1540 qinfo->local_alias->rrset->rk.dname_len); 1541 if(!r->local_alias->rrset->rk.dname) 1542 return 0; 1543 1544 /* the rrset is not packed, like in the cache, but it is 1545 * individualy allocated with an allocator from localzone. */ 1546 d = regional_alloc_zero(s->s.region, sizeof(*d)); 1547 if(!d) 1548 return 0; 1549 r->local_alias->rrset->entry.data = d; 1550 if(!rrset_insert_rr(s->s.region, d, dsrc->rr_data[0], 1551 dsrc->rr_len[0], dsrc->rr_ttl[0], "CNAME local alias")) 1552 return 0; 1553 } else 1554 r->local_alias = NULL; 1555 1556 s->reply_list = r; 1557 return 1; 1558 } 1559 1560 /* Extract the query info and flags from 'mstate' into '*qinfop' and '*qflags'. 1561 * Since this is only used for internal refetch of otherwise-expired answer, 1562 * we simply ignore the rare failure mode when memory allocation fails. */ 1563 static void 1564 mesh_copy_qinfo(struct mesh_state* mstate, struct query_info** qinfop, 1565 uint16_t* qflags) 1566 { 1567 struct regional* region = mstate->s.env->scratch; 1568 struct query_info* qinfo; 1569 1570 qinfo = regional_alloc_init(region, &mstate->s.qinfo, sizeof(*qinfo)); 1571 if(!qinfo) 1572 return; 1573 qinfo->qname = regional_alloc_init(region, qinfo->qname, 1574 qinfo->qname_len); 1575 if(!qinfo->qname) 1576 return; 1577 *qinfop = qinfo; 1578 *qflags = mstate->s.query_flags; 1579 } 1580 1581 /** 1582 * Continue processing the mesh state at another module. 1583 * Handles module to modules transfer of control. 1584 * Handles module finished. 1585 * @param mesh: the mesh area. 1586 * @param mstate: currently active mesh state. 1587 * Deleted if finished, calls _done and _supers to 1588 * send replies to clients and inform other mesh states. 1589 * This in turn may create additional runnable mesh states. 1590 * @param s: state at which the current module exited. 1591 * @param ev: the event sent to the module. 1592 * returned is the event to send to the next module. 1593 * @return true if continue processing at the new module. 1594 * false if not continued processing is needed. 1595 */ 1596 static int 1597 mesh_continue(struct mesh_area* mesh, struct mesh_state* mstate, 1598 enum module_ext_state s, enum module_ev* ev) 1599 { 1600 mstate->num_activated++; 1601 if(mstate->num_activated > MESH_MAX_ACTIVATION) { 1602 /* module is looping. Stop it. */ 1603 log_err("internal error: looping module (%s) stopped", 1604 mesh->mods.mod[mstate->s.curmod]->name); 1605 log_query_info(NO_VERBOSE, "pass error for qstate", 1606 &mstate->s.qinfo); 1607 s = module_error; 1608 } 1609 if(s == module_wait_module || s == module_restart_next) { 1610 /* start next module */ 1611 mstate->s.curmod++; 1612 if(mesh->mods.num == mstate->s.curmod) { 1613 log_err("Cannot pass to next module; at last module"); 1614 log_query_info(VERB_QUERY, "pass error for qstate", 1615 &mstate->s.qinfo); 1616 mstate->s.curmod--; 1617 return mesh_continue(mesh, mstate, module_error, ev); 1618 } 1619 if(s == module_restart_next) { 1620 int curmod = mstate->s.curmod; 1621 for(; mstate->s.curmod < mesh->mods.num; 1622 mstate->s.curmod++) { 1623 fptr_ok(fptr_whitelist_mod_clear( 1624 mesh->mods.mod[mstate->s.curmod]->clear)); 1625 (*mesh->mods.mod[mstate->s.curmod]->clear) 1626 (&mstate->s, mstate->s.curmod); 1627 mstate->s.minfo[mstate->s.curmod] = NULL; 1628 } 1629 mstate->s.curmod = curmod; 1630 } 1631 *ev = module_event_pass; 1632 return 1; 1633 } 1634 if(s == module_wait_subquery && mstate->sub_set.count == 0) { 1635 log_err("module cannot wait for subquery, subquery list empty"); 1636 log_query_info(VERB_QUERY, "pass error for qstate", 1637 &mstate->s.qinfo); 1638 s = module_error; 1639 } 1640 if(s == module_error && mstate->s.return_rcode == LDNS_RCODE_NOERROR) { 1641 /* error is bad, handle pass back up below */ 1642 mstate->s.return_rcode = LDNS_RCODE_SERVFAIL; 1643 } 1644 if(s == module_error) { 1645 mesh_query_done(mstate); 1646 mesh_walk_supers(mesh, mstate); 1647 mesh_state_delete(&mstate->s); 1648 return 0; 1649 } 1650 if(s == module_finished) { 1651 if(mstate->s.curmod == 0) { 1652 struct query_info* qinfo = NULL; 1653 uint16_t qflags; 1654 1655 mesh_query_done(mstate); 1656 mesh_walk_supers(mesh, mstate); 1657 1658 /* If the answer to the query needs to be refetched 1659 * from an external DNS server, we'll need to schedule 1660 * a prefetch after removing the current state, so 1661 * we need to make a copy of the query info here. */ 1662 if(mstate->s.need_refetch) 1663 mesh_copy_qinfo(mstate, &qinfo, &qflags); 1664 1665 mesh_state_delete(&mstate->s); 1666 if(qinfo) { 1667 mesh_schedule_prefetch(mesh, qinfo, qflags, 1668 0, 1); 1669 } 1670 return 0; 1671 } 1672 /* pass along the locus of control */ 1673 mstate->s.curmod --; 1674 *ev = module_event_moddone; 1675 return 1; 1676 } 1677 return 0; 1678 } 1679 1680 void mesh_run(struct mesh_area* mesh, struct mesh_state* mstate, 1681 enum module_ev ev, struct outbound_entry* e) 1682 { 1683 enum module_ext_state s; 1684 verbose(VERB_ALGO, "mesh_run: start"); 1685 while(mstate) { 1686 /* run the module */ 1687 fptr_ok(fptr_whitelist_mod_operate( 1688 mesh->mods.mod[mstate->s.curmod]->operate)); 1689 (*mesh->mods.mod[mstate->s.curmod]->operate) 1690 (&mstate->s, ev, mstate->s.curmod, e); 1691 1692 /* examine results */ 1693 mstate->s.reply = NULL; 1694 regional_free_all(mstate->s.env->scratch); 1695 s = mstate->s.ext_state[mstate->s.curmod]; 1696 verbose(VERB_ALGO, "mesh_run: %s module exit state is %s", 1697 mesh->mods.mod[mstate->s.curmod]->name, strextstate(s)); 1698 e = NULL; 1699 if(mesh_continue(mesh, mstate, s, &ev)) 1700 continue; 1701 1702 /* run more modules */ 1703 ev = module_event_pass; 1704 if(mesh->run.count > 0) { 1705 /* pop random element off the runnable tree */ 1706 mstate = (struct mesh_state*)mesh->run.root->key; 1707 (void)rbtree_delete(&mesh->run, mstate); 1708 } else mstate = NULL; 1709 } 1710 if(verbosity >= VERB_ALGO) { 1711 mesh_stats(mesh, "mesh_run: end"); 1712 mesh_log_list(mesh); 1713 } 1714 } 1715 1716 void 1717 mesh_log_list(struct mesh_area* mesh) 1718 { 1719 char buf[30]; 1720 struct mesh_state* m; 1721 int num = 0; 1722 RBTREE_FOR(m, struct mesh_state*, &mesh->all) { 1723 snprintf(buf, sizeof(buf), "%d%s%s%s%s%s%s mod%d %s%s", 1724 num++, (m->s.is_priming)?"p":"", /* prime */ 1725 (m->s.is_valrec)?"v":"", /* prime */ 1726 (m->s.query_flags&BIT_RD)?"RD":"", 1727 (m->s.query_flags&BIT_CD)?"CD":"", 1728 (m->super_set.count==0)?"d":"", /* detached */ 1729 (m->sub_set.count!=0)?"c":"", /* children */ 1730 m->s.curmod, (m->reply_list)?"rep":"", /*hasreply*/ 1731 (m->cb_list)?"cb":"" /* callbacks */ 1732 ); 1733 log_query_info(VERB_ALGO, buf, &m->s.qinfo); 1734 } 1735 } 1736 1737 void 1738 mesh_stats(struct mesh_area* mesh, const char* str) 1739 { 1740 verbose(VERB_DETAIL, "%s %u recursion states (%u with reply, " 1741 "%u detached), %u waiting replies, %u recursion replies " 1742 "sent, %d replies dropped, %d states jostled out", 1743 str, (unsigned)mesh->all.count, 1744 (unsigned)mesh->num_reply_states, 1745 (unsigned)mesh->num_detached_states, 1746 (unsigned)mesh->num_reply_addrs, 1747 (unsigned)mesh->replies_sent, 1748 (unsigned)mesh->stats_dropped, 1749 (unsigned)mesh->stats_jostled); 1750 if(mesh->replies_sent > 0) { 1751 struct timeval avg; 1752 timeval_divide(&avg, &mesh->replies_sum_wait, 1753 mesh->replies_sent); 1754 log_info("average recursion processing time " 1755 ARG_LL "d.%6.6d sec", 1756 (long long)avg.tv_sec, (int)avg.tv_usec); 1757 log_info("histogram of recursion processing times"); 1758 timehist_log(mesh->histogram, "recursions"); 1759 } 1760 } 1761 1762 void 1763 mesh_stats_clear(struct mesh_area* mesh) 1764 { 1765 if(!mesh) 1766 return; 1767 mesh->replies_sent = 0; 1768 mesh->replies_sum_wait.tv_sec = 0; 1769 mesh->replies_sum_wait.tv_usec = 0; 1770 mesh->stats_jostled = 0; 1771 mesh->stats_dropped = 0; 1772 timehist_clear(mesh->histogram); 1773 mesh->ans_secure = 0; 1774 mesh->ans_bogus = 0; 1775 mesh->ans_expired = 0; 1776 memset(&mesh->ans_rcode[0], 0, sizeof(size_t)*UB_STATS_RCODE_NUM); 1777 memset(&mesh->rpz_action[0], 0, sizeof(size_t)*UB_STATS_RPZ_ACTION_NUM); 1778 mesh->ans_nodata = 0; 1779 } 1780 1781 size_t 1782 mesh_get_mem(struct mesh_area* mesh) 1783 { 1784 struct mesh_state* m; 1785 size_t s = sizeof(*mesh) + sizeof(struct timehist) + 1786 sizeof(struct th_buck)*mesh->histogram->num + 1787 sizeof(sldns_buffer) + sldns_buffer_capacity(mesh->qbuf_bak); 1788 RBTREE_FOR(m, struct mesh_state*, &mesh->all) { 1789 /* all, including m itself allocated in qstate region */ 1790 s += regional_get_mem(m->s.region); 1791 } 1792 return s; 1793 } 1794 1795 int 1796 mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo, 1797 uint16_t flags, int prime, int valrec) 1798 { 1799 struct mesh_area* mesh = qstate->env->mesh; 1800 struct mesh_state* dep_m = NULL; 1801 if(!mesh_state_is_unique(qstate->mesh_info)) 1802 dep_m = mesh_area_find(mesh, NULL, qinfo, flags, prime, valrec); 1803 return mesh_detect_cycle_found(qstate, dep_m); 1804 } 1805 1806 void mesh_list_insert(struct mesh_state* m, struct mesh_state** fp, 1807 struct mesh_state** lp) 1808 { 1809 /* insert as last element */ 1810 m->prev = *lp; 1811 m->next = NULL; 1812 if(*lp) 1813 (*lp)->next = m; 1814 else *fp = m; 1815 *lp = m; 1816 } 1817 1818 void mesh_list_remove(struct mesh_state* m, struct mesh_state** fp, 1819 struct mesh_state** lp) 1820 { 1821 if(m->next) 1822 m->next->prev = m->prev; 1823 else *lp = m->prev; 1824 if(m->prev) 1825 m->prev->next = m->next; 1826 else *fp = m->next; 1827 } 1828 1829 void mesh_state_remove_reply(struct mesh_area* mesh, struct mesh_state* m, 1830 struct comm_point* cp) 1831 { 1832 struct mesh_reply* n, *prev = NULL; 1833 n = m->reply_list; 1834 /* when in mesh_cleanup, it sets the reply_list to NULL, so that 1835 * there is no accounting twice */ 1836 if(!n) return; /* nothing to remove, also no accounting needed */ 1837 while(n) { 1838 if(n->query_reply.c == cp) { 1839 /* unlink it */ 1840 if(prev) prev->next = n->next; 1841 else m->reply_list = n->next; 1842 /* delete it, but allocated in m region */ 1843 log_assert(mesh->num_reply_addrs > 0); 1844 mesh->num_reply_addrs--; 1845 1846 /* prev = prev; */ 1847 n = n->next; 1848 continue; 1849 } 1850 prev = n; 1851 n = n->next; 1852 } 1853 /* it was not detached (because it had a reply list), could be now */ 1854 if(!m->reply_list && !m->cb_list 1855 && m->super_set.count == 0) { 1856 mesh->num_detached_states++; 1857 } 1858 /* if not replies any more in mstate, it is no longer a reply_state */ 1859 if(!m->reply_list && !m->cb_list) { 1860 log_assert(mesh->num_reply_states > 0); 1861 mesh->num_reply_states--; 1862 } 1863 } 1864 1865 1866 static int 1867 apply_respip_action(struct module_qstate* qstate, 1868 const struct query_info* qinfo, struct respip_client_info* cinfo, 1869 struct respip_action_info* actinfo, struct reply_info* rep, 1870 struct ub_packed_rrset_key** alias_rrset, 1871 struct reply_info** encode_repp, struct auth_zones* az) 1872 { 1873 if(qinfo->qtype != LDNS_RR_TYPE_A && 1874 qinfo->qtype != LDNS_RR_TYPE_AAAA && 1875 qinfo->qtype != LDNS_RR_TYPE_ANY) 1876 return 1; 1877 1878 if(!respip_rewrite_reply(qinfo, cinfo, rep, encode_repp, actinfo, 1879 alias_rrset, 0, qstate->region, az)) 1880 return 0; 1881 1882 /* xxx_deny actions mean dropping the reply, unless the original reply 1883 * was redirected to response-ip data. */ 1884 if((actinfo->action == respip_deny || 1885 actinfo->action == respip_inform_deny) && 1886 *encode_repp == rep) 1887 *encode_repp = NULL; 1888 1889 return 1; 1890 } 1891 1892 void 1893 mesh_serve_expired_callback(void* arg) 1894 { 1895 struct mesh_state* mstate = (struct mesh_state*) arg; 1896 struct module_qstate* qstate = &mstate->s; 1897 struct mesh_reply* r, *rlist; 1898 struct mesh_area* mesh = qstate->env->mesh; 1899 struct dns_msg* msg; 1900 struct mesh_cb* c; 1901 struct mesh_reply* prev = NULL; 1902 struct sldns_buffer* prev_buffer = NULL; 1903 struct sldns_buffer* r_buffer = NULL; 1904 struct reply_info* partial_rep = NULL; 1905 struct ub_packed_rrset_key* alias_rrset = NULL; 1906 struct reply_info* encode_rep = NULL; 1907 struct respip_action_info actinfo; 1908 struct query_info* lookup_qinfo = &qstate->qinfo; 1909 struct query_info qinfo_tmp; 1910 int must_validate = (!(qstate->query_flags&BIT_CD) 1911 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate; 1912 if(!qstate->serve_expired_data) return; 1913 verbose(VERB_ALGO, "Serve expired: Trying to reply with expired data"); 1914 comm_timer_delete(qstate->serve_expired_data->timer); 1915 qstate->serve_expired_data->timer = NULL; 1916 if(qstate->blacklist || qstate->no_cache_lookup || qstate->is_drop) { 1917 verbose(VERB_ALGO, 1918 "Serve expired: Not allowed to look into cache for stale"); 1919 return; 1920 } 1921 /* The following while is used instead of the `goto lookup_cache` 1922 * like in the worker. */ 1923 while(1) { 1924 fptr_ok(fptr_whitelist_serve_expired_lookup( 1925 qstate->serve_expired_data->get_cached_answer)); 1926 msg = qstate->serve_expired_data->get_cached_answer(qstate, 1927 lookup_qinfo); 1928 if(!msg) 1929 return; 1930 /* Reset these in case we pass a second time from here. */ 1931 encode_rep = msg->rep; 1932 memset(&actinfo, 0, sizeof(actinfo)); 1933 actinfo.action = respip_none; 1934 alias_rrset = NULL; 1935 if((mesh->use_response_ip || mesh->use_rpz) && 1936 !partial_rep && !apply_respip_action(qstate, &qstate->qinfo, 1937 qstate->client_info, &actinfo, msg->rep, &alias_rrset, &encode_rep, 1938 qstate->env->auth_zones)) { 1939 return; 1940 } else if(partial_rep && 1941 !respip_merge_cname(partial_rep, &qstate->qinfo, msg->rep, 1942 qstate->client_info, must_validate, &encode_rep, qstate->region, 1943 qstate->env->auth_zones)) { 1944 return; 1945 } 1946 if(!encode_rep || alias_rrset) { 1947 if(!encode_rep) { 1948 /* Needs drop */ 1949 return; 1950 } else { 1951 /* A partial CNAME chain is found. */ 1952 partial_rep = encode_rep; 1953 } 1954 } 1955 /* We've found a partial reply ending with an 1956 * alias. Replace the lookup qinfo for the 1957 * alias target and lookup the cache again to 1958 * (possibly) complete the reply. As we're 1959 * passing the "base" reply, there will be no 1960 * more alias chasing. */ 1961 if(partial_rep) { 1962 memset(&qinfo_tmp, 0, sizeof(qinfo_tmp)); 1963 get_cname_target(alias_rrset, &qinfo_tmp.qname, 1964 &qinfo_tmp.qname_len); 1965 if(!qinfo_tmp.qname) { 1966 log_err("Serve expired: unexpected: invalid answer alias"); 1967 return; 1968 } 1969 qinfo_tmp.qtype = qstate->qinfo.qtype; 1970 qinfo_tmp.qclass = qstate->qinfo.qclass; 1971 lookup_qinfo = &qinfo_tmp; 1972 continue; 1973 } 1974 break; 1975 } 1976 1977 if(verbosity >= VERB_ALGO) 1978 log_dns_msg("Serve expired lookup", &qstate->qinfo, msg->rep); 1979 1980 for(r = mstate->reply_list; r; r = r->next) { 1981 /* If address info is returned, it means the action should be an 1982 * 'inform' variant and the information should be logged. */ 1983 if(actinfo.addrinfo) { 1984 respip_inform_print(&actinfo, r->qname, 1985 qstate->qinfo.qtype, qstate->qinfo.qclass, 1986 r->local_alias, &r->query_reply); 1987 1988 if(qstate->env->cfg->stat_extended && actinfo.rpz_used) { 1989 if(actinfo.rpz_disabled) 1990 qstate->env->mesh->rpz_action[RPZ_DISABLED_ACTION]++; 1991 if(actinfo.rpz_cname_override) 1992 qstate->env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION]++; 1993 else 1994 qstate->env->mesh->rpz_action[ 1995 respip_action_to_rpz_action(actinfo.action)]++; 1996 } 1997 } 1998 1999 r_buffer = r->query_reply.c->buffer; 2000 if(r->query_reply.c->tcp_req_info) 2001 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer; 2002 /* briefly set the replylist to null in case the meshsendreply 2003 * calls tcpreqinfo sendreply that comm_point_drops because 2004 * of size, and then the null stops the mesh state remove and 2005 * thus reply_list modification and accounting */ 2006 rlist = mstate->reply_list; 2007 mstate->reply_list = NULL; 2008 mesh_send_reply(mstate, LDNS_RCODE_NOERROR, msg->rep, 2009 r, r_buffer, prev, prev_buffer); 2010 mstate->reply_list = rlist; 2011 if(r->query_reply.c->tcp_req_info) 2012 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate); 2013 prev = r; 2014 prev_buffer = r_buffer; 2015 2016 /* Account for each reply sent. */ 2017 mesh->ans_expired++; 2018 2019 } 2020 if(mstate->reply_list) { 2021 mstate->reply_list = NULL; 2022 if(!mstate->reply_list && !mstate->cb_list) { 2023 log_assert(mesh->num_reply_states > 0); 2024 mesh->num_reply_states--; 2025 if(mstate->super_set.count == 0) { 2026 mesh->num_detached_states++; 2027 } 2028 } 2029 } 2030 while((c = mstate->cb_list) != NULL) { 2031 /* take this cb off the list; so that the list can be 2032 * changed, eg. by adds from the callback routine */ 2033 if(!mstate->reply_list && mstate->cb_list && !c->next) { 2034 /* was a reply state, not anymore */ 2035 log_assert(qstate->env->mesh->num_reply_states > 0); 2036 qstate->env->mesh->num_reply_states--; 2037 } 2038 mstate->cb_list = c->next; 2039 if(!mstate->reply_list && !mstate->cb_list && 2040 mstate->super_set.count == 0) 2041 qstate->env->mesh->num_detached_states++; 2042 mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c); 2043 } 2044 } 2045