1 /* 2 * services/mesh.c - deal with mesh of query states and handle events for that. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains functions to assist in dealing with a mesh of 40 * query states. This mesh is supposed to be thread-specific. 41 * It consists of query states (per qname, qtype, qclass) and connections 42 * between query states and the super and subquery states, and replies to 43 * send back to clients. 44 */ 45 #include "config.h" 46 #include "services/mesh.h" 47 #include "services/outbound_list.h" 48 #include "services/cache/dns.h" 49 #include "services/cache/rrset.h" 50 #include "util/log.h" 51 #include "util/net_help.h" 52 #include "util/module.h" 53 #include "util/regional.h" 54 #include "util/data/msgencode.h" 55 #include "util/timehist.h" 56 #include "util/fptr_wlist.h" 57 #include "util/alloc.h" 58 #include "util/config_file.h" 59 #include "util/edns.h" 60 #include "sldns/sbuffer.h" 61 #include "sldns/wire2str.h" 62 #include "services/localzone.h" 63 #include "util/data/dname.h" 64 #include "respip/respip.h" 65 #include "services/listen_dnsport.h" 66 #include "util/timeval_func.h" 67 68 #ifdef CLIENT_SUBNET 69 #include "edns-subnet/subnetmod.h" 70 #include "edns-subnet/edns-subnet.h" 71 #endif 72 73 /** 74 * Compare two response-ip client info entries for the purpose of mesh state 75 * compare. It returns 0 if ci_a and ci_b are considered equal; otherwise 76 * 1 or -1 (they mean 'ci_a is larger/smaller than ci_b', respectively, but 77 * in practice it should be only used to mean they are different). 78 * We cannot share the mesh state for two queries if different response-ip 79 * actions can apply in the end, even if those queries are otherwise identical. 80 * For this purpose we compare tag lists and tag action lists; they should be 81 * identical to share the same state. 82 * For tag data, we don't look into the data content, as it can be 83 * expensive; unless tag data are not defined for both or they point to the 84 * exact same data in memory (i.e., they come from the same ACL entry), we 85 * consider these data different. 86 * Likewise, if the client info is associated with views, we don't look into 87 * the views. They are considered different unless they are exactly the same 88 * even if the views only differ in the names. 89 */ 90 static int 91 client_info_compare(const struct respip_client_info* ci_a, 92 const struct respip_client_info* ci_b) 93 { 94 int cmp; 95 96 if(!ci_a && !ci_b) 97 return 0; 98 if(ci_a && !ci_b) 99 return -1; 100 if(!ci_a && ci_b) 101 return 1; 102 if(ci_a->taglen != ci_b->taglen) 103 return (ci_a->taglen < ci_b->taglen) ? -1 : 1; 104 if(ci_a->taglist && !ci_b->taglist) 105 return -1; 106 if(!ci_a->taglist && ci_b->taglist) 107 return 1; 108 if(ci_a->taglist && ci_b->taglist) { 109 cmp = memcmp(ci_a->taglist, ci_b->taglist, ci_a->taglen); 110 if(cmp != 0) 111 return cmp; 112 } 113 if(ci_a->tag_actions_size != ci_b->tag_actions_size) 114 return (ci_a->tag_actions_size < ci_b->tag_actions_size) ? 115 -1 : 1; 116 if(ci_a->tag_actions && !ci_b->tag_actions) 117 return -1; 118 if(!ci_a->tag_actions && ci_b->tag_actions) 119 return 1; 120 if(ci_a->tag_actions && ci_b->tag_actions) { 121 cmp = memcmp(ci_a->tag_actions, ci_b->tag_actions, 122 ci_a->tag_actions_size); 123 if(cmp != 0) 124 return cmp; 125 } 126 if(ci_a->tag_datas != ci_b->tag_datas) 127 return ci_a->tag_datas < ci_b->tag_datas ? -1 : 1; 128 if(ci_a->view != ci_b->view) 129 return ci_a->view < ci_b->view ? -1 : 1; 130 /* For the unbound daemon these should be non-NULL and identical, 131 * but we check that just in case. */ 132 if(ci_a->respip_set != ci_b->respip_set) 133 return ci_a->respip_set < ci_b->respip_set ? -1 : 1; 134 return 0; 135 } 136 137 int 138 mesh_state_compare(const void* ap, const void* bp) 139 { 140 struct mesh_state* a = (struct mesh_state*)ap; 141 struct mesh_state* b = (struct mesh_state*)bp; 142 int cmp; 143 144 if(a->unique < b->unique) 145 return -1; 146 if(a->unique > b->unique) 147 return 1; 148 149 if(a->s.is_priming && !b->s.is_priming) 150 return -1; 151 if(!a->s.is_priming && b->s.is_priming) 152 return 1; 153 154 if(a->s.is_valrec && !b->s.is_valrec) 155 return -1; 156 if(!a->s.is_valrec && b->s.is_valrec) 157 return 1; 158 159 if((a->s.query_flags&BIT_RD) && !(b->s.query_flags&BIT_RD)) 160 return -1; 161 if(!(a->s.query_flags&BIT_RD) && (b->s.query_flags&BIT_RD)) 162 return 1; 163 164 if((a->s.query_flags&BIT_CD) && !(b->s.query_flags&BIT_CD)) 165 return -1; 166 if(!(a->s.query_flags&BIT_CD) && (b->s.query_flags&BIT_CD)) 167 return 1; 168 169 cmp = query_info_compare(&a->s.qinfo, &b->s.qinfo); 170 if(cmp != 0) 171 return cmp; 172 return client_info_compare(a->s.client_info, b->s.client_info); 173 } 174 175 int 176 mesh_state_ref_compare(const void* ap, const void* bp) 177 { 178 struct mesh_state_ref* a = (struct mesh_state_ref*)ap; 179 struct mesh_state_ref* b = (struct mesh_state_ref*)bp; 180 return mesh_state_compare(a->s, b->s); 181 } 182 183 struct mesh_area* 184 mesh_create(struct module_stack* stack, struct module_env* env) 185 { 186 struct mesh_area* mesh = calloc(1, sizeof(struct mesh_area)); 187 if(!mesh) { 188 log_err("mesh area alloc: out of memory"); 189 return NULL; 190 } 191 mesh->histogram = timehist_setup(); 192 mesh->qbuf_bak = sldns_buffer_new(env->cfg->msg_buffer_size); 193 if(!mesh->histogram || !mesh->qbuf_bak) { 194 free(mesh); 195 log_err("mesh area alloc: out of memory"); 196 return NULL; 197 } 198 mesh->mods = *stack; 199 mesh->env = env; 200 rbtree_init(&mesh->run, &mesh_state_compare); 201 rbtree_init(&mesh->all, &mesh_state_compare); 202 mesh->num_reply_addrs = 0; 203 mesh->num_reply_states = 0; 204 mesh->num_detached_states = 0; 205 mesh->num_forever_states = 0; 206 mesh->stats_jostled = 0; 207 mesh->stats_dropped = 0; 208 mesh->ans_expired = 0; 209 mesh->ans_cachedb = 0; 210 mesh->max_reply_states = env->cfg->num_queries_per_thread; 211 mesh->max_forever_states = (mesh->max_reply_states+1)/2; 212 #ifndef S_SPLINT_S 213 mesh->jostle_max.tv_sec = (time_t)(env->cfg->jostle_time / 1000); 214 mesh->jostle_max.tv_usec = (time_t)((env->cfg->jostle_time % 1000) 215 *1000); 216 #endif 217 return mesh; 218 } 219 220 /** help mesh delete delete mesh states */ 221 static void 222 mesh_delete_helper(rbnode_type* n) 223 { 224 struct mesh_state* mstate = (struct mesh_state*)n->key; 225 /* perform a full delete, not only 'cleanup' routine, 226 * because other callbacks expect a clean state in the mesh. 227 * For 're-entrant' calls */ 228 mesh_state_delete(&mstate->s); 229 /* but because these delete the items from the tree, postorder 230 * traversal and rbtree rebalancing do not work together */ 231 } 232 233 void 234 mesh_delete(struct mesh_area* mesh) 235 { 236 if(!mesh) 237 return; 238 /* free all query states */ 239 while(mesh->all.count) 240 mesh_delete_helper(mesh->all.root); 241 timehist_delete(mesh->histogram); 242 sldns_buffer_free(mesh->qbuf_bak); 243 free(mesh); 244 } 245 246 void 247 mesh_delete_all(struct mesh_area* mesh) 248 { 249 /* free all query states */ 250 while(mesh->all.count) 251 mesh_delete_helper(mesh->all.root); 252 mesh->stats_dropped += mesh->num_reply_addrs; 253 /* clear mesh area references */ 254 rbtree_init(&mesh->run, &mesh_state_compare); 255 rbtree_init(&mesh->all, &mesh_state_compare); 256 mesh->num_reply_addrs = 0; 257 mesh->num_reply_states = 0; 258 mesh->num_detached_states = 0; 259 mesh->num_forever_states = 0; 260 mesh->forever_first = NULL; 261 mesh->forever_last = NULL; 262 mesh->jostle_first = NULL; 263 mesh->jostle_last = NULL; 264 } 265 266 int mesh_make_new_space(struct mesh_area* mesh, sldns_buffer* qbuf) 267 { 268 struct mesh_state* m = mesh->jostle_first; 269 /* free space is available */ 270 if(mesh->num_reply_states < mesh->max_reply_states) 271 return 1; 272 /* try to kick out a jostle-list item */ 273 if(m && m->reply_list && m->list_select == mesh_jostle_list) { 274 /* how old is it? */ 275 struct timeval age; 276 timeval_subtract(&age, mesh->env->now_tv, 277 &m->reply_list->start_time); 278 if(timeval_smaller(&mesh->jostle_max, &age)) { 279 /* its a goner */ 280 log_nametypeclass(VERB_ALGO, "query jostled out to " 281 "make space for a new one", 282 m->s.qinfo.qname, m->s.qinfo.qtype, 283 m->s.qinfo.qclass); 284 /* backup the query */ 285 if(qbuf) sldns_buffer_copy(mesh->qbuf_bak, qbuf); 286 /* notify supers */ 287 if(m->super_set.count > 0) { 288 verbose(VERB_ALGO, "notify supers of failure"); 289 m->s.return_msg = NULL; 290 m->s.return_rcode = LDNS_RCODE_SERVFAIL; 291 mesh_walk_supers(mesh, m); 292 } 293 mesh->stats_jostled ++; 294 mesh_state_delete(&m->s); 295 /* restore the query - note that the qinfo ptr to 296 * the querybuffer is then correct again. */ 297 if(qbuf) sldns_buffer_copy(qbuf, mesh->qbuf_bak); 298 return 1; 299 } 300 } 301 /* no space for new item */ 302 return 0; 303 } 304 305 struct dns_msg* 306 mesh_serve_expired_lookup(struct module_qstate* qstate, 307 struct query_info* lookup_qinfo) 308 { 309 hashvalue_type h; 310 struct lruhash_entry* e; 311 struct dns_msg* msg; 312 struct reply_info* data; 313 struct msgreply_entry* key; 314 time_t timenow = *qstate->env->now; 315 int must_validate = (!(qstate->query_flags&BIT_CD) 316 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate; 317 /* Lookup cache */ 318 h = query_info_hash(lookup_qinfo, qstate->query_flags); 319 e = slabhash_lookup(qstate->env->msg_cache, h, lookup_qinfo, 0); 320 if(!e) return NULL; 321 322 key = (struct msgreply_entry*)e->key; 323 data = (struct reply_info*)e->data; 324 msg = tomsg(qstate->env, &key->key, data, qstate->region, timenow, 325 qstate->env->cfg->serve_expired, qstate->env->scratch); 326 if(!msg) 327 goto bail_out; 328 329 /* Check CNAME chain (if any) 330 * This is part of tomsg above; no need to check now. */ 331 332 /* Check security status of the cached answer. 333 * tomsg above has a subset of these checks, so we are leaving 334 * these as is. 335 * In case of bogus or revalidation we don't care to reply here. */ 336 if(must_validate && (msg->rep->security == sec_status_bogus || 337 msg->rep->security == sec_status_secure_sentinel_fail)) { 338 verbose(VERB_ALGO, "Serve expired: bogus answer found in cache"); 339 goto bail_out; 340 } else if(msg->rep->security == sec_status_unchecked && must_validate) { 341 verbose(VERB_ALGO, "Serve expired: unchecked entry needs " 342 "validation"); 343 goto bail_out; /* need to validate cache entry first */ 344 } else if(msg->rep->security == sec_status_secure && 345 !reply_all_rrsets_secure(msg->rep) && must_validate) { 346 verbose(VERB_ALGO, "Serve expired: secure entry" 347 " changed status"); 348 goto bail_out; /* rrset changed, re-verify */ 349 } 350 351 lock_rw_unlock(&e->lock); 352 return msg; 353 354 bail_out: 355 lock_rw_unlock(&e->lock); 356 return NULL; 357 } 358 359 360 /** Init the serve expired data structure */ 361 static int 362 mesh_serve_expired_init(struct mesh_state* mstate, int timeout) 363 { 364 struct timeval t; 365 366 /* Create serve_expired_data if not there yet */ 367 if(!mstate->s.serve_expired_data) { 368 mstate->s.serve_expired_data = (struct serve_expired_data*) 369 regional_alloc_zero( 370 mstate->s.region, sizeof(struct serve_expired_data)); 371 if(!mstate->s.serve_expired_data) 372 return 0; 373 } 374 375 /* Don't overwrite the function if already set */ 376 mstate->s.serve_expired_data->get_cached_answer = 377 mstate->s.serve_expired_data->get_cached_answer? 378 mstate->s.serve_expired_data->get_cached_answer: 379 &mesh_serve_expired_lookup; 380 381 /* In case this timer already popped, start it again */ 382 if(!mstate->s.serve_expired_data->timer) { 383 mstate->s.serve_expired_data->timer = comm_timer_create( 384 mstate->s.env->worker_base, mesh_serve_expired_callback, mstate); 385 if(!mstate->s.serve_expired_data->timer) 386 return 0; 387 #ifndef S_SPLINT_S 388 t.tv_sec = timeout/1000; 389 t.tv_usec = (timeout%1000)*1000; 390 #endif 391 comm_timer_set(mstate->s.serve_expired_data->timer, &t); 392 } 393 return 1; 394 } 395 396 void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo, 397 struct respip_client_info* cinfo, uint16_t qflags, 398 struct edns_data* edns, struct comm_reply* rep, uint16_t qid, 399 int rpz_passthru) 400 { 401 struct mesh_state* s = NULL; 402 int unique = unique_mesh_state(edns->opt_list_in, mesh->env); 403 int was_detached = 0; 404 int was_noreply = 0; 405 int added = 0; 406 int timeout = mesh->env->cfg->serve_expired? 407 mesh->env->cfg->serve_expired_client_timeout:0; 408 struct sldns_buffer* r_buffer = rep->c->buffer; 409 if(rep->c->tcp_req_info) { 410 r_buffer = rep->c->tcp_req_info->spool_buffer; 411 } 412 if(!unique) 413 s = mesh_area_find(mesh, cinfo, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0); 414 /* does this create a new reply state? */ 415 if(!s || s->list_select == mesh_no_list) { 416 if(!mesh_make_new_space(mesh, rep->c->buffer)) { 417 verbose(VERB_ALGO, "Too many queries. dropping " 418 "incoming query."); 419 comm_point_drop_reply(rep); 420 mesh->stats_dropped++; 421 return; 422 } 423 /* for this new reply state, the reply address is free, 424 * so the limit of reply addresses does not stop reply states*/ 425 } else { 426 /* protect our memory usage from storing reply addresses */ 427 if(mesh->num_reply_addrs > mesh->max_reply_states*16) { 428 verbose(VERB_ALGO, "Too many requests queued. " 429 "dropping incoming query."); 430 comm_point_drop_reply(rep); 431 mesh->stats_dropped++; 432 return; 433 } 434 } 435 /* see if it already exists, if not, create one */ 436 if(!s) { 437 #ifdef UNBOUND_DEBUG 438 struct rbnode_type* n; 439 #endif 440 s = mesh_state_create(mesh->env, qinfo, cinfo, 441 qflags&(BIT_RD|BIT_CD), 0, 0); 442 if(!s) { 443 log_err("mesh_state_create: out of memory; SERVFAIL"); 444 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, NULL, 445 LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv)) 446 edns->opt_list_inplace_cb_out = NULL; 447 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 448 qinfo, qid, qflags, edns); 449 comm_point_send_reply(rep); 450 return; 451 } 452 /* set detached (it is now) */ 453 mesh->num_detached_states++; 454 if(unique) 455 mesh_state_make_unique(s); 456 s->s.rpz_passthru = rpz_passthru; 457 /* copy the edns options we got from the front */ 458 if(edns->opt_list_in) { 459 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in, 460 s->s.region); 461 if(!s->s.edns_opts_front_in) { 462 log_err("edns_opt_copy_region: out of memory; SERVFAIL"); 463 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, 464 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv)) 465 edns->opt_list_inplace_cb_out = NULL; 466 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 467 qinfo, qid, qflags, edns); 468 comm_point_send_reply(rep); 469 mesh_state_delete(&s->s); 470 return; 471 } 472 } 473 474 #ifdef UNBOUND_DEBUG 475 n = 476 #else 477 (void) 478 #endif 479 rbtree_insert(&mesh->all, &s->node); 480 log_assert(n != NULL); 481 added = 1; 482 } 483 if(!s->reply_list && !s->cb_list) { 484 was_noreply = 1; 485 if(s->super_set.count == 0) { 486 was_detached = 1; 487 } 488 } 489 /* add reply to s */ 490 if(!mesh_state_add_reply(s, edns, rep, qid, qflags, qinfo)) { 491 log_err("mesh_new_client: out of memory; SERVFAIL"); 492 goto servfail_mem; 493 } 494 if(rep->c->tcp_req_info) { 495 if(!tcp_req_info_add_meshstate(rep->c->tcp_req_info, mesh, s)) { 496 log_err("mesh_new_client: out of memory add tcpreqinfo"); 497 goto servfail_mem; 498 } 499 } 500 if(rep->c->use_h2) { 501 http2_stream_add_meshstate(rep->c->h2_stream, mesh, s); 502 } 503 /* add serve expired timer if required and not already there */ 504 if(timeout && !mesh_serve_expired_init(s, timeout)) { 505 log_err("mesh_new_client: out of memory initializing serve expired"); 506 goto servfail_mem; 507 } 508 /* update statistics */ 509 if(was_detached) { 510 log_assert(mesh->num_detached_states > 0); 511 mesh->num_detached_states--; 512 } 513 if(was_noreply) { 514 mesh->num_reply_states ++; 515 } 516 mesh->num_reply_addrs++; 517 if(s->list_select == mesh_no_list) { 518 /* move to either the forever or the jostle_list */ 519 if(mesh->num_forever_states < mesh->max_forever_states) { 520 mesh->num_forever_states ++; 521 mesh_list_insert(s, &mesh->forever_first, 522 &mesh->forever_last); 523 s->list_select = mesh_forever_list; 524 } else { 525 mesh_list_insert(s, &mesh->jostle_first, 526 &mesh->jostle_last); 527 s->list_select = mesh_jostle_list; 528 } 529 } 530 if(added) 531 mesh_run(mesh, s, module_event_new, NULL); 532 return; 533 534 servfail_mem: 535 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, &s->s, 536 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv)) 537 edns->opt_list_inplace_cb_out = NULL; 538 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 539 qinfo, qid, qflags, edns); 540 comm_point_send_reply(rep); 541 if(added) 542 mesh_state_delete(&s->s); 543 return; 544 } 545 546 int 547 mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo, 548 uint16_t qflags, struct edns_data* edns, sldns_buffer* buf, 549 uint16_t qid, mesh_cb_func_type cb, void* cb_arg, int rpz_passthru) 550 { 551 struct mesh_state* s = NULL; 552 int unique = unique_mesh_state(edns->opt_list_in, mesh->env); 553 int timeout = mesh->env->cfg->serve_expired? 554 mesh->env->cfg->serve_expired_client_timeout:0; 555 int was_detached = 0; 556 int was_noreply = 0; 557 int added = 0; 558 if(!unique) 559 s = mesh_area_find(mesh, NULL, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0); 560 561 /* there are no limits on the number of callbacks */ 562 563 /* see if it already exists, if not, create one */ 564 if(!s) { 565 #ifdef UNBOUND_DEBUG 566 struct rbnode_type* n; 567 #endif 568 s = mesh_state_create(mesh->env, qinfo, NULL, 569 qflags&(BIT_RD|BIT_CD), 0, 0); 570 if(!s) { 571 return 0; 572 } 573 /* set detached (it is now) */ 574 mesh->num_detached_states++; 575 if(unique) 576 mesh_state_make_unique(s); 577 s->s.rpz_passthru = rpz_passthru; 578 if(edns->opt_list_in) { 579 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in, 580 s->s.region); 581 if(!s->s.edns_opts_front_in) { 582 mesh_state_delete(&s->s); 583 return 0; 584 } 585 } 586 #ifdef UNBOUND_DEBUG 587 n = 588 #else 589 (void) 590 #endif 591 rbtree_insert(&mesh->all, &s->node); 592 log_assert(n != NULL); 593 added = 1; 594 } 595 if(!s->reply_list && !s->cb_list) { 596 was_noreply = 1; 597 if(s->super_set.count == 0) { 598 was_detached = 1; 599 } 600 } 601 /* add reply to s */ 602 if(!mesh_state_add_cb(s, edns, buf, cb, cb_arg, qid, qflags)) { 603 if(added) 604 mesh_state_delete(&s->s); 605 return 0; 606 } 607 /* add serve expired timer if not already there */ 608 if(timeout && !mesh_serve_expired_init(s, timeout)) { 609 if(added) 610 mesh_state_delete(&s->s); 611 return 0; 612 } 613 /* update statistics */ 614 if(was_detached) { 615 log_assert(mesh->num_detached_states > 0); 616 mesh->num_detached_states--; 617 } 618 if(was_noreply) { 619 mesh->num_reply_states ++; 620 } 621 mesh->num_reply_addrs++; 622 if(added) 623 mesh_run(mesh, s, module_event_new, NULL); 624 return 1; 625 } 626 627 /* Internal backend routine of mesh_new_prefetch(). It takes one additional 628 * parameter, 'run', which controls whether to run the prefetch state 629 * immediately. When this function is called internally 'run' could be 630 * 0 (false), in which case the new state is only made runnable so it 631 * will not be run recursively on top of the current state. */ 632 static void mesh_schedule_prefetch(struct mesh_area* mesh, 633 struct query_info* qinfo, uint16_t qflags, time_t leeway, int run, 634 int rpz_passthru) 635 { 636 struct mesh_state* s = mesh_area_find(mesh, NULL, qinfo, 637 qflags&(BIT_RD|BIT_CD), 0, 0); 638 #ifdef UNBOUND_DEBUG 639 struct rbnode_type* n; 640 #endif 641 /* already exists, and for a different purpose perhaps. 642 * if mesh_no_list, keep it that way. */ 643 if(s) { 644 /* make it ignore the cache from now on */ 645 if(!s->s.blacklist) 646 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 647 if(s->s.prefetch_leeway < leeway) 648 s->s.prefetch_leeway = leeway; 649 return; 650 } 651 if(!mesh_make_new_space(mesh, NULL)) { 652 verbose(VERB_ALGO, "Too many queries. dropped prefetch."); 653 mesh->stats_dropped ++; 654 return; 655 } 656 657 s = mesh_state_create(mesh->env, qinfo, NULL, 658 qflags&(BIT_RD|BIT_CD), 0, 0); 659 if(!s) { 660 log_err("prefetch mesh_state_create: out of memory"); 661 return; 662 } 663 #ifdef UNBOUND_DEBUG 664 n = 665 #else 666 (void) 667 #endif 668 rbtree_insert(&mesh->all, &s->node); 669 log_assert(n != NULL); 670 /* set detached (it is now) */ 671 mesh->num_detached_states++; 672 /* make it ignore the cache */ 673 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 674 s->s.prefetch_leeway = leeway; 675 676 if(s->list_select == mesh_no_list) { 677 /* move to either the forever or the jostle_list */ 678 if(mesh->num_forever_states < mesh->max_forever_states) { 679 mesh->num_forever_states ++; 680 mesh_list_insert(s, &mesh->forever_first, 681 &mesh->forever_last); 682 s->list_select = mesh_forever_list; 683 } else { 684 mesh_list_insert(s, &mesh->jostle_first, 685 &mesh->jostle_last); 686 s->list_select = mesh_jostle_list; 687 } 688 } 689 s->s.rpz_passthru = rpz_passthru; 690 691 if(!run) { 692 #ifdef UNBOUND_DEBUG 693 n = 694 #else 695 (void) 696 #endif 697 rbtree_insert(&mesh->run, &s->run_node); 698 log_assert(n != NULL); 699 return; 700 } 701 702 mesh_run(mesh, s, module_event_new, NULL); 703 } 704 705 #ifdef CLIENT_SUBNET 706 /* Same logic as mesh_schedule_prefetch but tailored to the subnet module logic 707 * like passing along the comm_reply info. This will be faked into an EDNS 708 * option for processing by the subnet module if the client has not already 709 * attached its own ECS data. */ 710 static void mesh_schedule_prefetch_subnet(struct mesh_area* mesh, 711 struct query_info* qinfo, uint16_t qflags, time_t leeway, int run, 712 int rpz_passthru, struct sockaddr_storage* addr, struct edns_option* edns_list) 713 { 714 struct mesh_state* s = NULL; 715 struct edns_option* opt = NULL; 716 #ifdef UNBOUND_DEBUG 717 struct rbnode_type* n; 718 #endif 719 if(!mesh_make_new_space(mesh, NULL)) { 720 verbose(VERB_ALGO, "Too many queries. dropped prefetch."); 721 mesh->stats_dropped ++; 722 return; 723 } 724 725 s = mesh_state_create(mesh->env, qinfo, NULL, 726 qflags&(BIT_RD|BIT_CD), 0, 0); 727 if(!s) { 728 log_err("prefetch_subnet mesh_state_create: out of memory"); 729 return; 730 } 731 mesh_state_make_unique(s); 732 733 opt = edns_opt_list_find(edns_list, mesh->env->cfg->client_subnet_opcode); 734 if(opt) { 735 /* Use the client's ECS data */ 736 if(!edns_opt_list_append(&s->s.edns_opts_front_in, opt->opt_code, 737 opt->opt_len, opt->opt_data, s->s.region)) { 738 log_err("prefetch_subnet edns_opt_list_append: out of memory"); 739 return; 740 } 741 } else { 742 /* Store the client's address. Later in the subnet module, 743 * it is decided whether to include an ECS option or not. 744 */ 745 s->s.client_addr = *addr; 746 } 747 #ifdef UNBOUND_DEBUG 748 n = 749 #else 750 (void) 751 #endif 752 rbtree_insert(&mesh->all, &s->node); 753 log_assert(n != NULL); 754 /* set detached (it is now) */ 755 mesh->num_detached_states++; 756 /* make it ignore the cache */ 757 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 758 s->s.prefetch_leeway = leeway; 759 760 if(s->list_select == mesh_no_list) { 761 /* move to either the forever or the jostle_list */ 762 if(mesh->num_forever_states < mesh->max_forever_states) { 763 mesh->num_forever_states ++; 764 mesh_list_insert(s, &mesh->forever_first, 765 &mesh->forever_last); 766 s->list_select = mesh_forever_list; 767 } else { 768 mesh_list_insert(s, &mesh->jostle_first, 769 &mesh->jostle_last); 770 s->list_select = mesh_jostle_list; 771 } 772 } 773 s->s.rpz_passthru = rpz_passthru; 774 775 if(!run) { 776 #ifdef UNBOUND_DEBUG 777 n = 778 #else 779 (void) 780 #endif 781 rbtree_insert(&mesh->run, &s->run_node); 782 log_assert(n != NULL); 783 return; 784 } 785 786 mesh_run(mesh, s, module_event_new, NULL); 787 } 788 #endif /* CLIENT_SUBNET */ 789 790 void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo, 791 uint16_t qflags, time_t leeway, int rpz_passthru, 792 struct sockaddr_storage* addr, struct edns_option* opt_list) 793 { 794 (void)addr; 795 (void)opt_list; 796 #ifdef CLIENT_SUBNET 797 if(addr) 798 mesh_schedule_prefetch_subnet(mesh, qinfo, qflags, leeway, 1, 799 rpz_passthru, addr, opt_list); 800 else 801 #endif 802 mesh_schedule_prefetch(mesh, qinfo, qflags, leeway, 1, 803 rpz_passthru); 804 } 805 806 void mesh_report_reply(struct mesh_area* mesh, struct outbound_entry* e, 807 struct comm_reply* reply, int what) 808 { 809 enum module_ev event = module_event_reply; 810 e->qstate->reply = reply; 811 if(what != NETEVENT_NOERROR) { 812 event = module_event_noreply; 813 if(what == NETEVENT_CAPSFAIL) 814 event = module_event_capsfail; 815 } 816 mesh_run(mesh, e->qstate->mesh_info, event, e); 817 } 818 819 struct mesh_state* 820 mesh_state_create(struct module_env* env, struct query_info* qinfo, 821 struct respip_client_info* cinfo, uint16_t qflags, int prime, 822 int valrec) 823 { 824 struct regional* region = alloc_reg_obtain(env->alloc); 825 struct mesh_state* mstate; 826 int i; 827 if(!region) 828 return NULL; 829 mstate = (struct mesh_state*)regional_alloc(region, 830 sizeof(struct mesh_state)); 831 if(!mstate) { 832 alloc_reg_release(env->alloc, region); 833 return NULL; 834 } 835 memset(mstate, 0, sizeof(*mstate)); 836 mstate->node = *RBTREE_NULL; 837 mstate->run_node = *RBTREE_NULL; 838 mstate->node.key = mstate; 839 mstate->run_node.key = mstate; 840 mstate->reply_list = NULL; 841 mstate->list_select = mesh_no_list; 842 mstate->replies_sent = 0; 843 rbtree_init(&mstate->super_set, &mesh_state_ref_compare); 844 rbtree_init(&mstate->sub_set, &mesh_state_ref_compare); 845 mstate->num_activated = 0; 846 mstate->unique = NULL; 847 /* init module qstate */ 848 mstate->s.qinfo.qtype = qinfo->qtype; 849 mstate->s.qinfo.qclass = qinfo->qclass; 850 mstate->s.qinfo.local_alias = NULL; 851 mstate->s.qinfo.qname_len = qinfo->qname_len; 852 mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname, 853 qinfo->qname_len); 854 if(!mstate->s.qinfo.qname) { 855 alloc_reg_release(env->alloc, region); 856 return NULL; 857 } 858 if(cinfo) { 859 mstate->s.client_info = regional_alloc_init(region, cinfo, 860 sizeof(*cinfo)); 861 if(!mstate->s.client_info) { 862 alloc_reg_release(env->alloc, region); 863 return NULL; 864 } 865 } 866 /* remove all weird bits from qflags */ 867 mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD)); 868 mstate->s.is_priming = prime; 869 mstate->s.is_valrec = valrec; 870 mstate->s.reply = NULL; 871 mstate->s.region = region; 872 mstate->s.curmod = 0; 873 mstate->s.return_msg = 0; 874 mstate->s.return_rcode = LDNS_RCODE_NOERROR; 875 mstate->s.env = env; 876 mstate->s.mesh_info = mstate; 877 mstate->s.prefetch_leeway = 0; 878 mstate->s.serve_expired_data = NULL; 879 mstate->s.no_cache_lookup = 0; 880 mstate->s.no_cache_store = 0; 881 mstate->s.need_refetch = 0; 882 mstate->s.was_ratelimited = 0; 883 mstate->s.qstarttime = *env->now; 884 885 /* init modules */ 886 for(i=0; i<env->mesh->mods.num; i++) { 887 mstate->s.minfo[i] = NULL; 888 mstate->s.ext_state[i] = module_state_initial; 889 } 890 /* init edns option lists */ 891 mstate->s.edns_opts_front_in = NULL; 892 mstate->s.edns_opts_back_out = NULL; 893 mstate->s.edns_opts_back_in = NULL; 894 mstate->s.edns_opts_front_out = NULL; 895 896 return mstate; 897 } 898 899 void 900 mesh_state_make_unique(struct mesh_state* mstate) 901 { 902 mstate->unique = mstate; 903 } 904 905 void 906 mesh_state_cleanup(struct mesh_state* mstate) 907 { 908 struct mesh_area* mesh; 909 int i; 910 if(!mstate) 911 return; 912 mesh = mstate->s.env->mesh; 913 /* Stop and delete the serve expired timer */ 914 if(mstate->s.serve_expired_data && mstate->s.serve_expired_data->timer) { 915 comm_timer_delete(mstate->s.serve_expired_data->timer); 916 mstate->s.serve_expired_data->timer = NULL; 917 } 918 /* drop unsent replies */ 919 if(!mstate->replies_sent) { 920 struct mesh_reply* rep = mstate->reply_list; 921 struct mesh_cb* cb; 922 /* in tcp_req_info, the mstates linked are removed, but 923 * the reply_list is now NULL, so the remove-from-empty-list 924 * takes no time and also it does not do the mesh accounting */ 925 mstate->reply_list = NULL; 926 for(; rep; rep=rep->next) { 927 comm_point_drop_reply(&rep->query_reply); 928 log_assert(mesh->num_reply_addrs > 0); 929 mesh->num_reply_addrs--; 930 } 931 while((cb = mstate->cb_list)!=NULL) { 932 mstate->cb_list = cb->next; 933 fptr_ok(fptr_whitelist_mesh_cb(cb->cb)); 934 (*cb->cb)(cb->cb_arg, LDNS_RCODE_SERVFAIL, NULL, 935 sec_status_unchecked, NULL, 0); 936 log_assert(mesh->num_reply_addrs > 0); 937 mesh->num_reply_addrs--; 938 } 939 } 940 941 /* de-init modules */ 942 for(i=0; i<mesh->mods.num; i++) { 943 fptr_ok(fptr_whitelist_mod_clear(mesh->mods.mod[i]->clear)); 944 (*mesh->mods.mod[i]->clear)(&mstate->s, i); 945 mstate->s.minfo[i] = NULL; 946 mstate->s.ext_state[i] = module_finished; 947 } 948 alloc_reg_release(mstate->s.env->alloc, mstate->s.region); 949 } 950 951 void 952 mesh_state_delete(struct module_qstate* qstate) 953 { 954 struct mesh_area* mesh; 955 struct mesh_state_ref* super, ref; 956 struct mesh_state* mstate; 957 if(!qstate) 958 return; 959 mstate = qstate->mesh_info; 960 mesh = mstate->s.env->mesh; 961 mesh_detach_subs(&mstate->s); 962 if(mstate->list_select == mesh_forever_list) { 963 mesh->num_forever_states --; 964 mesh_list_remove(mstate, &mesh->forever_first, 965 &mesh->forever_last); 966 } else if(mstate->list_select == mesh_jostle_list) { 967 mesh_list_remove(mstate, &mesh->jostle_first, 968 &mesh->jostle_last); 969 } 970 if(!mstate->reply_list && !mstate->cb_list 971 && mstate->super_set.count == 0) { 972 log_assert(mesh->num_detached_states > 0); 973 mesh->num_detached_states--; 974 } 975 if(mstate->reply_list || mstate->cb_list) { 976 log_assert(mesh->num_reply_states > 0); 977 mesh->num_reply_states--; 978 } 979 ref.node.key = &ref; 980 ref.s = mstate; 981 RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) { 982 (void)rbtree_delete(&super->s->sub_set, &ref); 983 } 984 (void)rbtree_delete(&mesh->run, mstate); 985 (void)rbtree_delete(&mesh->all, mstate); 986 mesh_state_cleanup(mstate); 987 } 988 989 /** helper recursive rbtree find routine */ 990 static int 991 find_in_subsub(struct mesh_state* m, struct mesh_state* tofind, size_t *c) 992 { 993 struct mesh_state_ref* r; 994 if((*c)++ > MESH_MAX_SUBSUB) 995 return 1; 996 RBTREE_FOR(r, struct mesh_state_ref*, &m->sub_set) { 997 if(r->s == tofind || find_in_subsub(r->s, tofind, c)) 998 return 1; 999 } 1000 return 0; 1001 } 1002 1003 /** find cycle for already looked up mesh_state */ 1004 static int 1005 mesh_detect_cycle_found(struct module_qstate* qstate, struct mesh_state* dep_m) 1006 { 1007 struct mesh_state* cyc_m = qstate->mesh_info; 1008 size_t counter = 0; 1009 if(!dep_m) 1010 return 0; 1011 if(dep_m == cyc_m || find_in_subsub(dep_m, cyc_m, &counter)) { 1012 if(counter > MESH_MAX_SUBSUB) 1013 return 2; 1014 return 1; 1015 } 1016 return 0; 1017 } 1018 1019 void mesh_detach_subs(struct module_qstate* qstate) 1020 { 1021 struct mesh_area* mesh = qstate->env->mesh; 1022 struct mesh_state_ref* ref, lookup; 1023 #ifdef UNBOUND_DEBUG 1024 struct rbnode_type* n; 1025 #endif 1026 lookup.node.key = &lookup; 1027 lookup.s = qstate->mesh_info; 1028 RBTREE_FOR(ref, struct mesh_state_ref*, &qstate->mesh_info->sub_set) { 1029 #ifdef UNBOUND_DEBUG 1030 n = 1031 #else 1032 (void) 1033 #endif 1034 rbtree_delete(&ref->s->super_set, &lookup); 1035 log_assert(n != NULL); /* must have been present */ 1036 if(!ref->s->reply_list && !ref->s->cb_list 1037 && ref->s->super_set.count == 0) { 1038 mesh->num_detached_states++; 1039 log_assert(mesh->num_detached_states + 1040 mesh->num_reply_states <= mesh->all.count); 1041 } 1042 } 1043 rbtree_init(&qstate->mesh_info->sub_set, &mesh_state_ref_compare); 1044 } 1045 1046 int mesh_add_sub(struct module_qstate* qstate, struct query_info* qinfo, 1047 uint16_t qflags, int prime, int valrec, struct module_qstate** newq, 1048 struct mesh_state** sub) 1049 { 1050 /* find it, if not, create it */ 1051 struct mesh_area* mesh = qstate->env->mesh; 1052 *sub = mesh_area_find(mesh, NULL, qinfo, qflags, 1053 prime, valrec); 1054 if(mesh_detect_cycle_found(qstate, *sub)) { 1055 verbose(VERB_ALGO, "attach failed, cycle detected"); 1056 return 0; 1057 } 1058 if(!*sub) { 1059 #ifdef UNBOUND_DEBUG 1060 struct rbnode_type* n; 1061 #endif 1062 /* create a new one */ 1063 *sub = mesh_state_create(qstate->env, qinfo, NULL, qflags, prime, 1064 valrec); 1065 if(!*sub) { 1066 log_err("mesh_attach_sub: out of memory"); 1067 return 0; 1068 } 1069 #ifdef UNBOUND_DEBUG 1070 n = 1071 #else 1072 (void) 1073 #endif 1074 rbtree_insert(&mesh->all, &(*sub)->node); 1075 log_assert(n != NULL); 1076 /* set detached (it is now) */ 1077 mesh->num_detached_states++; 1078 /* set new query state to run */ 1079 #ifdef UNBOUND_DEBUG 1080 n = 1081 #else 1082 (void) 1083 #endif 1084 rbtree_insert(&mesh->run, &(*sub)->run_node); 1085 log_assert(n != NULL); 1086 *newq = &(*sub)->s; 1087 } else 1088 *newq = NULL; 1089 return 1; 1090 } 1091 1092 int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo, 1093 uint16_t qflags, int prime, int valrec, struct module_qstate** newq) 1094 { 1095 struct mesh_area* mesh = qstate->env->mesh; 1096 struct mesh_state* sub = NULL; 1097 int was_detached; 1098 if(!mesh_add_sub(qstate, qinfo, qflags, prime, valrec, newq, &sub)) 1099 return 0; 1100 was_detached = (sub->super_set.count == 0); 1101 if(!mesh_state_attachment(qstate->mesh_info, sub)) 1102 return 0; 1103 /* if it was a duplicate attachment, the count was not zero before */ 1104 if(!sub->reply_list && !sub->cb_list && was_detached && 1105 sub->super_set.count == 1) { 1106 /* it used to be detached, before this one got added */ 1107 log_assert(mesh->num_detached_states > 0); 1108 mesh->num_detached_states--; 1109 } 1110 /* *newq will be run when inited after the current module stops */ 1111 return 1; 1112 } 1113 1114 int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub) 1115 { 1116 #ifdef UNBOUND_DEBUG 1117 struct rbnode_type* n; 1118 #endif 1119 struct mesh_state_ref* subref; /* points to sub, inserted in super */ 1120 struct mesh_state_ref* superref; /* points to super, inserted in sub */ 1121 if( !(subref = regional_alloc(super->s.region, 1122 sizeof(struct mesh_state_ref))) || 1123 !(superref = regional_alloc(sub->s.region, 1124 sizeof(struct mesh_state_ref))) ) { 1125 log_err("mesh_state_attachment: out of memory"); 1126 return 0; 1127 } 1128 superref->node.key = superref; 1129 superref->s = super; 1130 subref->node.key = subref; 1131 subref->s = sub; 1132 if(!rbtree_insert(&sub->super_set, &superref->node)) { 1133 /* this should not happen, iterator and validator do not 1134 * attach subqueries that are identical. */ 1135 /* already attached, we are done, nothing todo. 1136 * since superref and subref already allocated in region, 1137 * we cannot free them */ 1138 return 1; 1139 } 1140 #ifdef UNBOUND_DEBUG 1141 n = 1142 #else 1143 (void) 1144 #endif 1145 rbtree_insert(&super->sub_set, &subref->node); 1146 log_assert(n != NULL); /* we checked above if statement, the reverse 1147 administration should not fail now, unless they are out of sync */ 1148 return 1; 1149 } 1150 1151 /** 1152 * callback results to mesh cb entry 1153 * @param m: mesh state to send it for. 1154 * @param rcode: if not 0, error code. 1155 * @param rep: reply to send (or NULL if rcode is set). 1156 * @param r: callback entry 1157 * @param start_time: the time to pass to callback functions, it is 0 or 1158 * a value from one of the packets if the mesh state had packets. 1159 */ 1160 static void 1161 mesh_do_callback(struct mesh_state* m, int rcode, struct reply_info* rep, 1162 struct mesh_cb* r, struct timeval* start_time) 1163 { 1164 int secure; 1165 char* reason = NULL; 1166 int was_ratelimited = m->s.was_ratelimited; 1167 /* bogus messages are not made into servfail, sec_status passed 1168 * to the callback function */ 1169 if(rep && rep->security == sec_status_secure) 1170 secure = 1; 1171 else secure = 0; 1172 if(!rep && rcode == LDNS_RCODE_NOERROR) 1173 rcode = LDNS_RCODE_SERVFAIL; 1174 if(!rcode && rep && (rep->security == sec_status_bogus || 1175 rep->security == sec_status_secure_sentinel_fail)) { 1176 if(!(reason = errinf_to_str_bogus(&m->s))) 1177 rcode = LDNS_RCODE_SERVFAIL; 1178 } 1179 /* send the reply */ 1180 if(rcode) { 1181 if(rcode == LDNS_RCODE_SERVFAIL) { 1182 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1183 rep, rcode, &r->edns, NULL, m->s.region, start_time)) 1184 r->edns.opt_list_inplace_cb_out = NULL; 1185 } else { 1186 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode, 1187 &r->edns, NULL, m->s.region, start_time)) 1188 r->edns.opt_list_inplace_cb_out = NULL; 1189 } 1190 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1191 (*r->cb)(r->cb_arg, rcode, r->buf, sec_status_unchecked, NULL, 1192 was_ratelimited); 1193 } else { 1194 size_t udp_size = r->edns.udp_size; 1195 sldns_buffer_clear(r->buf); 1196 r->edns.edns_version = EDNS_ADVERTISED_VERSION; 1197 r->edns.udp_size = EDNS_ADVERTISED_SIZE; 1198 r->edns.ext_rcode = 0; 1199 r->edns.bits &= EDNS_DO; 1200 1201 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, 1202 LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region, start_time) || 1203 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 1204 r->qflags, r->buf, 0, 1, 1205 m->s.env->scratch, udp_size, &r->edns, 1206 (int)(r->edns.bits & EDNS_DO), secure)) 1207 { 1208 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1209 (*r->cb)(r->cb_arg, LDNS_RCODE_SERVFAIL, r->buf, 1210 sec_status_unchecked, NULL, 0); 1211 } else { 1212 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1213 (*r->cb)(r->cb_arg, LDNS_RCODE_NOERROR, r->buf, 1214 (rep?rep->security:sec_status_unchecked), 1215 reason, was_ratelimited); 1216 } 1217 } 1218 free(reason); 1219 log_assert(m->s.env->mesh->num_reply_addrs > 0); 1220 m->s.env->mesh->num_reply_addrs--; 1221 } 1222 1223 static inline int 1224 mesh_is_rpz_respip_tcponly_action(struct mesh_state const* m) 1225 { 1226 struct respip_action_info const* respip_info = m->s.respip_action_info; 1227 return respip_info == NULL 1228 ? 0 1229 : (respip_info->rpz_used 1230 && !respip_info->rpz_disabled 1231 && respip_info->action == respip_truncate); 1232 } 1233 1234 static inline int 1235 mesh_is_udp(struct mesh_reply const* r) 1236 { 1237 return r->query_reply.c->type == comm_udp; 1238 } 1239 1240 static inline void 1241 mesh_find_and_attach_ede_and_reason(struct mesh_state* m, 1242 struct reply_info* rep, struct mesh_reply* r) 1243 { 1244 /* OLD note: 1245 * During validation the EDE code can be received via two 1246 * code paths. One code path fills the reply_info EDE, and 1247 * the other fills it in the errinf_strlist. These paths 1248 * intersect at some points, but where is opaque due to 1249 * the complexity of the validator. At the time of writing 1250 * we make the choice to prefer the EDE from errinf_strlist 1251 * but a compelling reason to do otherwise is just as valid 1252 * NEW note: 1253 * The compelling reason is that with caching support, the value 1254 * in the reply_info is cached. 1255 * The reason members of the reply_info struct should be 1256 * updated as they are already cached. No reason to 1257 * try and find the EDE information in errinf anymore. 1258 */ 1259 if(rep->reason_bogus != LDNS_EDE_NONE) { 1260 edns_opt_list_append_ede(&r->edns.opt_list_out, 1261 m->s.region, rep->reason_bogus, rep->reason_bogus_str); 1262 } 1263 } 1264 1265 /** 1266 * Send reply to mesh reply entry 1267 * @param m: mesh state to send it for. 1268 * @param rcode: if not 0, error code. 1269 * @param rep: reply to send (or NULL if rcode is set). 1270 * @param r: reply entry 1271 * @param r_buffer: buffer to use for reply entry. 1272 * @param prev: previous reply, already has its answer encoded in buffer. 1273 * @param prev_buffer: buffer for previous reply. 1274 */ 1275 static void 1276 mesh_send_reply(struct mesh_state* m, int rcode, struct reply_info* rep, 1277 struct mesh_reply* r, struct sldns_buffer* r_buffer, 1278 struct mesh_reply* prev, struct sldns_buffer* prev_buffer) 1279 { 1280 struct timeval end_time; 1281 struct timeval duration; 1282 int secure; 1283 /* briefly set the replylist to null in case the 1284 * meshsendreply calls tcpreqinfo sendreply that 1285 * comm_point_drops because of size, and then the 1286 * null stops the mesh state remove and thus 1287 * reply_list modification and accounting */ 1288 struct mesh_reply* rlist = m->reply_list; 1289 1290 /* rpz: apply actions */ 1291 rcode = mesh_is_udp(r) && mesh_is_rpz_respip_tcponly_action(m) 1292 ? (rcode|BIT_TC) : rcode; 1293 1294 /* examine security status */ 1295 if(m->s.env->need_to_validate && (!(r->qflags&BIT_CD) || 1296 m->s.env->cfg->ignore_cd) && rep && 1297 (rep->security <= sec_status_bogus || 1298 rep->security == sec_status_secure_sentinel_fail)) { 1299 rcode = LDNS_RCODE_SERVFAIL; 1300 if(m->s.env->cfg->stat_extended) 1301 m->s.env->mesh->ans_bogus++; 1302 } 1303 if(rep && rep->security == sec_status_secure) 1304 secure = 1; 1305 else secure = 0; 1306 if(!rep && rcode == LDNS_RCODE_NOERROR) 1307 rcode = LDNS_RCODE_SERVFAIL; 1308 if(r->query_reply.c->use_h2) { 1309 r->query_reply.c->h2_stream = r->h2_stream; 1310 /* Mesh reply won't exist for long anymore. Make it impossible 1311 * for HTTP/2 stream to refer to mesh state, in case 1312 * connection gets cleanup before HTTP/2 stream close. */ 1313 r->h2_stream->mesh_state = NULL; 1314 } 1315 /* send the reply */ 1316 /* We don't reuse the encoded answer if: 1317 * - either the previous or current response has a local alias. We could 1318 * compare the alias records and still reuse the previous answer if they 1319 * are the same, but that would be complicated and error prone for the 1320 * relatively minor case. So we err on the side of safety. 1321 * - there are registered callback functions for the given rcode, as these 1322 * need to be called for each reply. */ 1323 if(((rcode != LDNS_RCODE_SERVFAIL && 1324 !m->s.env->inplace_cb_lists[inplace_cb_reply]) || 1325 (rcode == LDNS_RCODE_SERVFAIL && 1326 !m->s.env->inplace_cb_lists[inplace_cb_reply_servfail])) && 1327 prev && prev_buffer && prev->qflags == r->qflags && 1328 !prev->local_alias && !r->local_alias && 1329 prev->edns.edns_present == r->edns.edns_present && 1330 prev->edns.bits == r->edns.bits && 1331 prev->edns.udp_size == r->edns.udp_size && 1332 edns_opt_list_compare(prev->edns.opt_list_out, r->edns.opt_list_out) == 0 && 1333 edns_opt_list_compare(prev->edns.opt_list_inplace_cb_out, r->edns.opt_list_inplace_cb_out) == 0 1334 ) { 1335 /* if the previous reply is identical to this one, fix ID */ 1336 if(prev_buffer != r_buffer) 1337 sldns_buffer_copy(r_buffer, prev_buffer); 1338 sldns_buffer_write_at(r_buffer, 0, &r->qid, sizeof(uint16_t)); 1339 sldns_buffer_write_at(r_buffer, 12, r->qname, 1340 m->s.qinfo.qname_len); 1341 m->reply_list = NULL; 1342 comm_point_send_reply(&r->query_reply); 1343 m->reply_list = rlist; 1344 } else if(rcode) { 1345 m->s.qinfo.qname = r->qname; 1346 m->s.qinfo.local_alias = r->local_alias; 1347 if(rcode == LDNS_RCODE_SERVFAIL) { 1348 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1349 rep, rcode, &r->edns, &r->query_reply, m->s.region, &r->start_time)) 1350 r->edns.opt_list_inplace_cb_out = NULL; 1351 } else { 1352 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode, 1353 &r->edns, &r->query_reply, m->s.region, &r->start_time)) 1354 r->edns.opt_list_inplace_cb_out = NULL; 1355 } 1356 /* Send along EDE EDNS0 option when SERVFAILing; usually 1357 * DNSSEC validation failures */ 1358 /* Since we are SERVFAILing here, CD bit and rep->security 1359 * is already handled. */ 1360 if(m->s.env->cfg->ede && rep) { 1361 mesh_find_and_attach_ede_and_reason(m, rep, r); 1362 } 1363 error_encode(r_buffer, rcode, &m->s.qinfo, r->qid, 1364 r->qflags, &r->edns); 1365 m->reply_list = NULL; 1366 comm_point_send_reply(&r->query_reply); 1367 m->reply_list = rlist; 1368 } else { 1369 size_t udp_size = r->edns.udp_size; 1370 r->edns.edns_version = EDNS_ADVERTISED_VERSION; 1371 r->edns.udp_size = EDNS_ADVERTISED_SIZE; 1372 r->edns.ext_rcode = 0; 1373 r->edns.bits &= EDNS_DO; 1374 m->s.qinfo.qname = r->qname; 1375 m->s.qinfo.local_alias = r->local_alias; 1376 1377 /* Attach EDE without SERVFAIL if the validation failed. 1378 * Need to explicitly check for rep->security otherwise failed 1379 * validation paths may attach to a secure answer. */ 1380 if(m->s.env->cfg->ede && rep && 1381 (rep->security <= sec_status_bogus || 1382 rep->security == sec_status_secure_sentinel_fail)) { 1383 mesh_find_and_attach_ede_and_reason(m, rep, r); 1384 } 1385 1386 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, 1387 LDNS_RCODE_NOERROR, &r->edns, &r->query_reply, m->s.region, &r->start_time) || 1388 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 1389 r->qflags, r_buffer, 0, 1, m->s.env->scratch, 1390 udp_size, &r->edns, (int)(r->edns.bits & EDNS_DO), 1391 secure)) 1392 { 1393 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1394 rep, LDNS_RCODE_SERVFAIL, &r->edns, &r->query_reply, m->s.region, &r->start_time)) 1395 r->edns.opt_list_inplace_cb_out = NULL; 1396 /* internal server error (probably malloc failure) so no 1397 * EDE (RFC8914) needed */ 1398 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 1399 &m->s.qinfo, r->qid, r->qflags, &r->edns); 1400 } 1401 m->reply_list = NULL; 1402 comm_point_send_reply(&r->query_reply); 1403 m->reply_list = rlist; 1404 } 1405 /* account */ 1406 log_assert(m->s.env->mesh->num_reply_addrs > 0); 1407 m->s.env->mesh->num_reply_addrs--; 1408 end_time = *m->s.env->now_tv; 1409 timeval_subtract(&duration, &end_time, &r->start_time); 1410 verbose(VERB_ALGO, "query took " ARG_LL "d.%6.6d sec", 1411 (long long)duration.tv_sec, (int)duration.tv_usec); 1412 m->s.env->mesh->replies_sent++; 1413 timeval_add(&m->s.env->mesh->replies_sum_wait, &duration); 1414 timehist_insert(m->s.env->mesh->histogram, &duration); 1415 if(m->s.env->cfg->stat_extended) { 1416 uint16_t rc = FLAGS_GET_RCODE(sldns_buffer_read_u16_at( 1417 r_buffer, 2)); 1418 if(secure) m->s.env->mesh->ans_secure++; 1419 m->s.env->mesh->ans_rcode[ rc ] ++; 1420 if(rc == 0 && LDNS_ANCOUNT(sldns_buffer_begin(r_buffer)) == 0) 1421 m->s.env->mesh->ans_nodata++; 1422 } 1423 /* Log reply sent */ 1424 if(m->s.env->cfg->log_replies) { 1425 log_reply_info(NO_VERBOSE, &m->s.qinfo, 1426 &r->query_reply.client_addr, 1427 r->query_reply.client_addrlen, duration, 0, r_buffer); 1428 } 1429 } 1430 1431 void mesh_query_done(struct mesh_state* mstate) 1432 { 1433 struct mesh_reply* r; 1434 struct mesh_reply* prev = NULL; 1435 struct sldns_buffer* prev_buffer = NULL; 1436 struct mesh_cb* c; 1437 struct reply_info* rep = (mstate->s.return_msg? 1438 mstate->s.return_msg->rep:NULL); 1439 struct timeval tv = {0, 0}; 1440 int i = 0; 1441 /* No need for the serve expired timer anymore; we are going to reply. */ 1442 if(mstate->s.serve_expired_data) { 1443 comm_timer_delete(mstate->s.serve_expired_data->timer); 1444 mstate->s.serve_expired_data->timer = NULL; 1445 } 1446 if(mstate->s.return_rcode == LDNS_RCODE_SERVFAIL || 1447 (rep && FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_SERVFAIL)) { 1448 /* we are SERVFAILing; check for expired answer here */ 1449 mesh_serve_expired_callback(mstate); 1450 if((mstate->reply_list || mstate->cb_list) 1451 && mstate->s.env->cfg->log_servfail 1452 && !mstate->s.env->cfg->val_log_squelch) { 1453 char* err = errinf_to_str_servfail(&mstate->s); 1454 if(err) 1455 log_err("%s", err); 1456 free(err); 1457 } 1458 } 1459 for(r = mstate->reply_list; r; r = r->next) { 1460 i++; 1461 tv = r->start_time; 1462 1463 /* if a response-ip address block has been stored the 1464 * information should be logged for each client. */ 1465 if(mstate->s.respip_action_info && 1466 mstate->s.respip_action_info->addrinfo) { 1467 respip_inform_print(mstate->s.respip_action_info, 1468 r->qname, mstate->s.qinfo.qtype, 1469 mstate->s.qinfo.qclass, r->local_alias, 1470 &r->query_reply.client_addr, 1471 r->query_reply.client_addrlen); 1472 } 1473 1474 /* if this query is determined to be dropped during the 1475 * mesh processing, this is the point to take that action. */ 1476 if(mstate->s.is_drop) { 1477 /* briefly set the reply_list to NULL, so that the 1478 * tcp req info cleanup routine that calls the mesh 1479 * to deregister the meshstate for it is not done 1480 * because the list is NULL and also accounting is not 1481 * done there, but instead we do that here. */ 1482 struct mesh_reply* reply_list = mstate->reply_list; 1483 mstate->reply_list = NULL; 1484 comm_point_drop_reply(&r->query_reply); 1485 mstate->reply_list = reply_list; 1486 } else { 1487 struct sldns_buffer* r_buffer = r->query_reply.c->buffer; 1488 if(r->query_reply.c->tcp_req_info) { 1489 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer; 1490 prev_buffer = NULL; 1491 } 1492 mesh_send_reply(mstate, mstate->s.return_rcode, rep, 1493 r, r_buffer, prev, prev_buffer); 1494 if(r->query_reply.c->tcp_req_info) { 1495 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate); 1496 r_buffer = NULL; 1497 } 1498 prev = r; 1499 prev_buffer = r_buffer; 1500 } 1501 } 1502 /* Account for each reply sent. */ 1503 if(i > 0 && mstate->s.respip_action_info && 1504 mstate->s.respip_action_info->addrinfo && 1505 mstate->s.env->cfg->stat_extended && 1506 mstate->s.respip_action_info->rpz_used) { 1507 if(mstate->s.respip_action_info->rpz_disabled) 1508 mstate->s.env->mesh->rpz_action[RPZ_DISABLED_ACTION] += i; 1509 if(mstate->s.respip_action_info->rpz_cname_override) 1510 mstate->s.env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION] += i; 1511 else 1512 mstate->s.env->mesh->rpz_action[respip_action_to_rpz_action( 1513 mstate->s.respip_action_info->action)] += i; 1514 } 1515 if(!mstate->s.is_drop && i > 0) { 1516 if(mstate->s.env->cfg->stat_extended 1517 && mstate->s.is_cachedb_answer) { 1518 mstate->s.env->mesh->ans_cachedb += i; 1519 } 1520 } 1521 1522 /* Mesh area accounting */ 1523 if(mstate->reply_list) { 1524 mstate->reply_list = NULL; 1525 if(!mstate->reply_list && !mstate->cb_list) { 1526 /* was a reply state, not anymore */ 1527 log_assert(mstate->s.env->mesh->num_reply_states > 0); 1528 mstate->s.env->mesh->num_reply_states--; 1529 } 1530 if(!mstate->reply_list && !mstate->cb_list && 1531 mstate->super_set.count == 0) 1532 mstate->s.env->mesh->num_detached_states++; 1533 } 1534 mstate->replies_sent = 1; 1535 1536 while((c = mstate->cb_list) != NULL) { 1537 /* take this cb off the list; so that the list can be 1538 * changed, eg. by adds from the callback routine */ 1539 if(!mstate->reply_list && mstate->cb_list && !c->next) { 1540 /* was a reply state, not anymore */ 1541 log_assert(mstate->s.env->mesh->num_reply_states > 0); 1542 mstate->s.env->mesh->num_reply_states--; 1543 } 1544 mstate->cb_list = c->next; 1545 if(!mstate->reply_list && !mstate->cb_list && 1546 mstate->super_set.count == 0) 1547 mstate->s.env->mesh->num_detached_states++; 1548 mesh_do_callback(mstate, mstate->s.return_rcode, rep, c, &tv); 1549 } 1550 } 1551 1552 void mesh_walk_supers(struct mesh_area* mesh, struct mesh_state* mstate) 1553 { 1554 struct mesh_state_ref* ref; 1555 RBTREE_FOR(ref, struct mesh_state_ref*, &mstate->super_set) 1556 { 1557 /* make super runnable */ 1558 (void)rbtree_insert(&mesh->run, &ref->s->run_node); 1559 /* callback the function to inform super of result */ 1560 fptr_ok(fptr_whitelist_mod_inform_super( 1561 mesh->mods.mod[ref->s->s.curmod]->inform_super)); 1562 (*mesh->mods.mod[ref->s->s.curmod]->inform_super)(&mstate->s, 1563 ref->s->s.curmod, &ref->s->s); 1564 /* copy state that is always relevant to super */ 1565 copy_state_to_super(&mstate->s, ref->s->s.curmod, &ref->s->s); 1566 } 1567 } 1568 1569 struct mesh_state* mesh_area_find(struct mesh_area* mesh, 1570 struct respip_client_info* cinfo, struct query_info* qinfo, 1571 uint16_t qflags, int prime, int valrec) 1572 { 1573 struct mesh_state key; 1574 struct mesh_state* result; 1575 1576 key.node.key = &key; 1577 key.s.is_priming = prime; 1578 key.s.is_valrec = valrec; 1579 key.s.qinfo = *qinfo; 1580 key.s.query_flags = qflags; 1581 /* We are searching for a similar mesh state when we DO want to 1582 * aggregate the state. Thus unique is set to NULL. (default when we 1583 * desire aggregation).*/ 1584 key.unique = NULL; 1585 key.s.client_info = cinfo; 1586 1587 result = (struct mesh_state*)rbtree_search(&mesh->all, &key); 1588 return result; 1589 } 1590 1591 int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns, 1592 sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg, 1593 uint16_t qid, uint16_t qflags) 1594 { 1595 struct mesh_cb* r = regional_alloc(s->s.region, 1596 sizeof(struct mesh_cb)); 1597 if(!r) 1598 return 0; 1599 r->buf = buf; 1600 log_assert(fptr_whitelist_mesh_cb(cb)); /* early failure ifmissing*/ 1601 r->cb = cb; 1602 r->cb_arg = cb_arg; 1603 r->edns = *edns; 1604 if(edns->opt_list_in && !(r->edns.opt_list_in = 1605 edns_opt_copy_region(edns->opt_list_in, s->s.region))) 1606 return 0; 1607 if(edns->opt_list_out && !(r->edns.opt_list_out = 1608 edns_opt_copy_region(edns->opt_list_out, s->s.region))) 1609 return 0; 1610 if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out = 1611 edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region))) 1612 return 0; 1613 r->qid = qid; 1614 r->qflags = qflags; 1615 r->next = s->cb_list; 1616 s->cb_list = r; 1617 return 1; 1618 1619 } 1620 1621 int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns, 1622 struct comm_reply* rep, uint16_t qid, uint16_t qflags, 1623 const struct query_info* qinfo) 1624 { 1625 struct mesh_reply* r = regional_alloc(s->s.region, 1626 sizeof(struct mesh_reply)); 1627 if(!r) 1628 return 0; 1629 r->query_reply = *rep; 1630 r->edns = *edns; 1631 if(edns->opt_list_in && !(r->edns.opt_list_in = 1632 edns_opt_copy_region(edns->opt_list_in, s->s.region))) 1633 return 0; 1634 if(edns->opt_list_out && !(r->edns.opt_list_out = 1635 edns_opt_copy_region(edns->opt_list_out, s->s.region))) 1636 return 0; 1637 if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out = 1638 edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region))) 1639 return 0; 1640 r->qid = qid; 1641 r->qflags = qflags; 1642 r->start_time = *s->s.env->now_tv; 1643 r->next = s->reply_list; 1644 r->qname = regional_alloc_init(s->s.region, qinfo->qname, 1645 s->s.qinfo.qname_len); 1646 if(!r->qname) 1647 return 0; 1648 if(rep->c->use_h2) 1649 r->h2_stream = rep->c->h2_stream; 1650 1651 /* Data related to local alias stored in 'qinfo' (if any) is ephemeral 1652 * and can be different for different original queries (even if the 1653 * replaced query name is the same). So we need to make a deep copy 1654 * and store the copy for each reply info. */ 1655 if(qinfo->local_alias) { 1656 struct packed_rrset_data* d; 1657 struct packed_rrset_data* dsrc; 1658 r->local_alias = regional_alloc_zero(s->s.region, 1659 sizeof(*qinfo->local_alias)); 1660 if(!r->local_alias) 1661 return 0; 1662 r->local_alias->rrset = regional_alloc_init(s->s.region, 1663 qinfo->local_alias->rrset, 1664 sizeof(*qinfo->local_alias->rrset)); 1665 if(!r->local_alias->rrset) 1666 return 0; 1667 dsrc = qinfo->local_alias->rrset->entry.data; 1668 1669 /* In the current implementation, a local alias must be 1670 * a single CNAME RR (see worker_handle_request()). */ 1671 log_assert(!qinfo->local_alias->next && dsrc->count == 1 && 1672 qinfo->local_alias->rrset->rk.type == 1673 htons(LDNS_RR_TYPE_CNAME)); 1674 /* we should make a local copy for the owner name of 1675 * the RRset */ 1676 r->local_alias->rrset->rk.dname_len = 1677 qinfo->local_alias->rrset->rk.dname_len; 1678 r->local_alias->rrset->rk.dname = regional_alloc_init( 1679 s->s.region, qinfo->local_alias->rrset->rk.dname, 1680 qinfo->local_alias->rrset->rk.dname_len); 1681 if(!r->local_alias->rrset->rk.dname) 1682 return 0; 1683 1684 /* the rrset is not packed, like in the cache, but it is 1685 * individually allocated with an allocator from localzone. */ 1686 d = regional_alloc_zero(s->s.region, sizeof(*d)); 1687 if(!d) 1688 return 0; 1689 r->local_alias->rrset->entry.data = d; 1690 if(!rrset_insert_rr(s->s.region, d, dsrc->rr_data[0], 1691 dsrc->rr_len[0], dsrc->rr_ttl[0], "CNAME local alias")) 1692 return 0; 1693 } else 1694 r->local_alias = NULL; 1695 1696 s->reply_list = r; 1697 return 1; 1698 } 1699 1700 /* Extract the query info and flags from 'mstate' into '*qinfop' and '*qflags'. 1701 * Since this is only used for internal refetch of otherwise-expired answer, 1702 * we simply ignore the rare failure mode when memory allocation fails. */ 1703 static void 1704 mesh_copy_qinfo(struct mesh_state* mstate, struct query_info** qinfop, 1705 uint16_t* qflags) 1706 { 1707 struct regional* region = mstate->s.env->scratch; 1708 struct query_info* qinfo; 1709 1710 qinfo = regional_alloc_init(region, &mstate->s.qinfo, sizeof(*qinfo)); 1711 if(!qinfo) 1712 return; 1713 qinfo->qname = regional_alloc_init(region, qinfo->qname, 1714 qinfo->qname_len); 1715 if(!qinfo->qname) 1716 return; 1717 *qinfop = qinfo; 1718 *qflags = mstate->s.query_flags; 1719 } 1720 1721 /** 1722 * Continue processing the mesh state at another module. 1723 * Handles module to modules transfer of control. 1724 * Handles module finished. 1725 * @param mesh: the mesh area. 1726 * @param mstate: currently active mesh state. 1727 * Deleted if finished, calls _done and _supers to 1728 * send replies to clients and inform other mesh states. 1729 * This in turn may create additional runnable mesh states. 1730 * @param s: state at which the current module exited. 1731 * @param ev: the event sent to the module. 1732 * returned is the event to send to the next module. 1733 * @return true if continue processing at the new module. 1734 * false if not continued processing is needed. 1735 */ 1736 static int 1737 mesh_continue(struct mesh_area* mesh, struct mesh_state* mstate, 1738 enum module_ext_state s, enum module_ev* ev) 1739 { 1740 mstate->num_activated++; 1741 if(mstate->num_activated > MESH_MAX_ACTIVATION) { 1742 /* module is looping. Stop it. */ 1743 log_err("internal error: looping module (%s) stopped", 1744 mesh->mods.mod[mstate->s.curmod]->name); 1745 log_query_info(NO_VERBOSE, "pass error for qstate", 1746 &mstate->s.qinfo); 1747 s = module_error; 1748 } 1749 if(s == module_wait_module || s == module_restart_next) { 1750 /* start next module */ 1751 mstate->s.curmod++; 1752 if(mesh->mods.num == mstate->s.curmod) { 1753 log_err("Cannot pass to next module; at last module"); 1754 log_query_info(VERB_QUERY, "pass error for qstate", 1755 &mstate->s.qinfo); 1756 mstate->s.curmod--; 1757 return mesh_continue(mesh, mstate, module_error, ev); 1758 } 1759 if(s == module_restart_next) { 1760 int curmod = mstate->s.curmod; 1761 for(; mstate->s.curmod < mesh->mods.num; 1762 mstate->s.curmod++) { 1763 fptr_ok(fptr_whitelist_mod_clear( 1764 mesh->mods.mod[mstate->s.curmod]->clear)); 1765 (*mesh->mods.mod[mstate->s.curmod]->clear) 1766 (&mstate->s, mstate->s.curmod); 1767 mstate->s.minfo[mstate->s.curmod] = NULL; 1768 } 1769 mstate->s.curmod = curmod; 1770 } 1771 *ev = module_event_pass; 1772 return 1; 1773 } 1774 if(s == module_wait_subquery && mstate->sub_set.count == 0) { 1775 log_err("module cannot wait for subquery, subquery list empty"); 1776 log_query_info(VERB_QUERY, "pass error for qstate", 1777 &mstate->s.qinfo); 1778 s = module_error; 1779 } 1780 if(s == module_error && mstate->s.return_rcode == LDNS_RCODE_NOERROR) { 1781 /* error is bad, handle pass back up below */ 1782 mstate->s.return_rcode = LDNS_RCODE_SERVFAIL; 1783 } 1784 if(s == module_error) { 1785 mesh_query_done(mstate); 1786 mesh_walk_supers(mesh, mstate); 1787 mesh_state_delete(&mstate->s); 1788 return 0; 1789 } 1790 if(s == module_finished) { 1791 if(mstate->s.curmod == 0) { 1792 struct query_info* qinfo = NULL; 1793 struct edns_option* opt_list = NULL; 1794 struct sockaddr_storage addr; 1795 uint16_t qflags; 1796 int rpz_p = 0; 1797 1798 #ifdef CLIENT_SUBNET 1799 struct edns_option* ecs; 1800 if(mstate->s.need_refetch && mstate->reply_list && 1801 modstack_find(&mesh->mods, "subnetcache") != -1 && 1802 mstate->s.env->unique_mesh) { 1803 addr = mstate->reply_list->query_reply.client_addr; 1804 } else 1805 #endif 1806 memset(&addr, 0, sizeof(addr)); 1807 1808 mesh_query_done(mstate); 1809 mesh_walk_supers(mesh, mstate); 1810 1811 /* If the answer to the query needs to be refetched 1812 * from an external DNS server, we'll need to schedule 1813 * a prefetch after removing the current state, so 1814 * we need to make a copy of the query info here. */ 1815 if(mstate->s.need_refetch) { 1816 mesh_copy_qinfo(mstate, &qinfo, &qflags); 1817 #ifdef CLIENT_SUBNET 1818 /* Make also a copy of the ecs option if any */ 1819 if((ecs = edns_opt_list_find( 1820 mstate->s.edns_opts_front_in, 1821 mstate->s.env->cfg->client_subnet_opcode)) != NULL) { 1822 (void)edns_opt_list_append(&opt_list, 1823 ecs->opt_code, ecs->opt_len, 1824 ecs->opt_data, 1825 mstate->s.env->scratch); 1826 } 1827 #endif 1828 rpz_p = mstate->s.rpz_passthru; 1829 } 1830 1831 if(qinfo) { 1832 mesh_state_delete(&mstate->s); 1833 mesh_new_prefetch(mesh, qinfo, qflags, 0, 1834 rpz_p, 1835 addr.ss_family!=AF_UNSPEC?&addr:NULL, 1836 opt_list); 1837 } else { 1838 mesh_state_delete(&mstate->s); 1839 } 1840 return 0; 1841 } 1842 /* pass along the locus of control */ 1843 mstate->s.curmod --; 1844 *ev = module_event_moddone; 1845 return 1; 1846 } 1847 return 0; 1848 } 1849 1850 void mesh_run(struct mesh_area* mesh, struct mesh_state* mstate, 1851 enum module_ev ev, struct outbound_entry* e) 1852 { 1853 enum module_ext_state s; 1854 verbose(VERB_ALGO, "mesh_run: start"); 1855 while(mstate) { 1856 /* run the module */ 1857 fptr_ok(fptr_whitelist_mod_operate( 1858 mesh->mods.mod[mstate->s.curmod]->operate)); 1859 (*mesh->mods.mod[mstate->s.curmod]->operate) 1860 (&mstate->s, ev, mstate->s.curmod, e); 1861 1862 /* examine results */ 1863 mstate->s.reply = NULL; 1864 regional_free_all(mstate->s.env->scratch); 1865 s = mstate->s.ext_state[mstate->s.curmod]; 1866 verbose(VERB_ALGO, "mesh_run: %s module exit state is %s", 1867 mesh->mods.mod[mstate->s.curmod]->name, strextstate(s)); 1868 e = NULL; 1869 if(mesh_continue(mesh, mstate, s, &ev)) 1870 continue; 1871 1872 /* run more modules */ 1873 ev = module_event_pass; 1874 if(mesh->run.count > 0) { 1875 /* pop random element off the runnable tree */ 1876 mstate = (struct mesh_state*)mesh->run.root->key; 1877 (void)rbtree_delete(&mesh->run, mstate); 1878 } else mstate = NULL; 1879 } 1880 if(verbosity >= VERB_ALGO) { 1881 mesh_stats(mesh, "mesh_run: end"); 1882 mesh_log_list(mesh); 1883 } 1884 } 1885 1886 void 1887 mesh_log_list(struct mesh_area* mesh) 1888 { 1889 char buf[30]; 1890 struct mesh_state* m; 1891 int num = 0; 1892 RBTREE_FOR(m, struct mesh_state*, &mesh->all) { 1893 snprintf(buf, sizeof(buf), "%d%s%s%s%s%s%s mod%d %s%s", 1894 num++, (m->s.is_priming)?"p":"", /* prime */ 1895 (m->s.is_valrec)?"v":"", /* prime */ 1896 (m->s.query_flags&BIT_RD)?"RD":"", 1897 (m->s.query_flags&BIT_CD)?"CD":"", 1898 (m->super_set.count==0)?"d":"", /* detached */ 1899 (m->sub_set.count!=0)?"c":"", /* children */ 1900 m->s.curmod, (m->reply_list)?"rep":"", /*hasreply*/ 1901 (m->cb_list)?"cb":"" /* callbacks */ 1902 ); 1903 log_query_info(VERB_ALGO, buf, &m->s.qinfo); 1904 } 1905 } 1906 1907 void 1908 mesh_stats(struct mesh_area* mesh, const char* str) 1909 { 1910 verbose(VERB_DETAIL, "%s %u recursion states (%u with reply, " 1911 "%u detached), %u waiting replies, %u recursion replies " 1912 "sent, %d replies dropped, %d states jostled out", 1913 str, (unsigned)mesh->all.count, 1914 (unsigned)mesh->num_reply_states, 1915 (unsigned)mesh->num_detached_states, 1916 (unsigned)mesh->num_reply_addrs, 1917 (unsigned)mesh->replies_sent, 1918 (unsigned)mesh->stats_dropped, 1919 (unsigned)mesh->stats_jostled); 1920 if(mesh->replies_sent > 0) { 1921 struct timeval avg; 1922 timeval_divide(&avg, &mesh->replies_sum_wait, 1923 mesh->replies_sent); 1924 log_info("average recursion processing time " 1925 ARG_LL "d.%6.6d sec", 1926 (long long)avg.tv_sec, (int)avg.tv_usec); 1927 log_info("histogram of recursion processing times"); 1928 timehist_log(mesh->histogram, "recursions"); 1929 } 1930 } 1931 1932 void 1933 mesh_stats_clear(struct mesh_area* mesh) 1934 { 1935 if(!mesh) 1936 return; 1937 mesh->replies_sent = 0; 1938 mesh->replies_sum_wait.tv_sec = 0; 1939 mesh->replies_sum_wait.tv_usec = 0; 1940 mesh->stats_jostled = 0; 1941 mesh->stats_dropped = 0; 1942 timehist_clear(mesh->histogram); 1943 mesh->ans_secure = 0; 1944 mesh->ans_bogus = 0; 1945 mesh->ans_expired = 0; 1946 mesh->ans_cachedb = 0; 1947 memset(&mesh->ans_rcode[0], 0, sizeof(size_t)*UB_STATS_RCODE_NUM); 1948 memset(&mesh->rpz_action[0], 0, sizeof(size_t)*UB_STATS_RPZ_ACTION_NUM); 1949 mesh->ans_nodata = 0; 1950 } 1951 1952 size_t 1953 mesh_get_mem(struct mesh_area* mesh) 1954 { 1955 struct mesh_state* m; 1956 size_t s = sizeof(*mesh) + sizeof(struct timehist) + 1957 sizeof(struct th_buck)*mesh->histogram->num + 1958 sizeof(sldns_buffer) + sldns_buffer_capacity(mesh->qbuf_bak); 1959 RBTREE_FOR(m, struct mesh_state*, &mesh->all) { 1960 /* all, including m itself allocated in qstate region */ 1961 s += regional_get_mem(m->s.region); 1962 } 1963 return s; 1964 } 1965 1966 int 1967 mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo, 1968 uint16_t flags, int prime, int valrec) 1969 { 1970 struct mesh_area* mesh = qstate->env->mesh; 1971 struct mesh_state* dep_m = NULL; 1972 dep_m = mesh_area_find(mesh, NULL, qinfo, flags, prime, valrec); 1973 return mesh_detect_cycle_found(qstate, dep_m); 1974 } 1975 1976 void mesh_list_insert(struct mesh_state* m, struct mesh_state** fp, 1977 struct mesh_state** lp) 1978 { 1979 /* insert as last element */ 1980 m->prev = *lp; 1981 m->next = NULL; 1982 if(*lp) 1983 (*lp)->next = m; 1984 else *fp = m; 1985 *lp = m; 1986 } 1987 1988 void mesh_list_remove(struct mesh_state* m, struct mesh_state** fp, 1989 struct mesh_state** lp) 1990 { 1991 if(m->next) 1992 m->next->prev = m->prev; 1993 else *lp = m->prev; 1994 if(m->prev) 1995 m->prev->next = m->next; 1996 else *fp = m->next; 1997 } 1998 1999 void mesh_state_remove_reply(struct mesh_area* mesh, struct mesh_state* m, 2000 struct comm_point* cp) 2001 { 2002 struct mesh_reply* n, *prev = NULL; 2003 n = m->reply_list; 2004 /* when in mesh_cleanup, it sets the reply_list to NULL, so that 2005 * there is no accounting twice */ 2006 if(!n) return; /* nothing to remove, also no accounting needed */ 2007 while(n) { 2008 if(n->query_reply.c == cp) { 2009 /* unlink it */ 2010 if(prev) prev->next = n->next; 2011 else m->reply_list = n->next; 2012 /* delete it, but allocated in m region */ 2013 log_assert(mesh->num_reply_addrs > 0); 2014 mesh->num_reply_addrs--; 2015 2016 /* prev = prev; */ 2017 n = n->next; 2018 continue; 2019 } 2020 prev = n; 2021 n = n->next; 2022 } 2023 /* it was not detached (because it had a reply list), could be now */ 2024 if(!m->reply_list && !m->cb_list 2025 && m->super_set.count == 0) { 2026 mesh->num_detached_states++; 2027 } 2028 /* if not replies any more in mstate, it is no longer a reply_state */ 2029 if(!m->reply_list && !m->cb_list) { 2030 log_assert(mesh->num_reply_states > 0); 2031 mesh->num_reply_states--; 2032 } 2033 } 2034 2035 2036 static int 2037 apply_respip_action(struct module_qstate* qstate, 2038 const struct query_info* qinfo, struct respip_client_info* cinfo, 2039 struct respip_action_info* actinfo, struct reply_info* rep, 2040 struct ub_packed_rrset_key** alias_rrset, 2041 struct reply_info** encode_repp, struct auth_zones* az) 2042 { 2043 if(qinfo->qtype != LDNS_RR_TYPE_A && 2044 qinfo->qtype != LDNS_RR_TYPE_AAAA && 2045 qinfo->qtype != LDNS_RR_TYPE_ANY) 2046 return 1; 2047 2048 if(!respip_rewrite_reply(qinfo, cinfo, rep, encode_repp, actinfo, 2049 alias_rrset, 0, qstate->region, az, NULL)) 2050 return 0; 2051 2052 /* xxx_deny actions mean dropping the reply, unless the original reply 2053 * was redirected to response-ip data. */ 2054 if((actinfo->action == respip_deny || 2055 actinfo->action == respip_inform_deny) && 2056 *encode_repp == rep) 2057 *encode_repp = NULL; 2058 2059 return 1; 2060 } 2061 2062 void 2063 mesh_serve_expired_callback(void* arg) 2064 { 2065 struct mesh_state* mstate = (struct mesh_state*) arg; 2066 struct module_qstate* qstate = &mstate->s; 2067 struct mesh_reply* r; 2068 struct mesh_area* mesh = qstate->env->mesh; 2069 struct dns_msg* msg; 2070 struct mesh_cb* c; 2071 struct mesh_reply* prev = NULL; 2072 struct sldns_buffer* prev_buffer = NULL; 2073 struct sldns_buffer* r_buffer = NULL; 2074 struct reply_info* partial_rep = NULL; 2075 struct ub_packed_rrset_key* alias_rrset = NULL; 2076 struct reply_info* encode_rep = NULL; 2077 struct respip_action_info actinfo; 2078 struct query_info* lookup_qinfo = &qstate->qinfo; 2079 struct query_info qinfo_tmp; 2080 struct timeval tv = {0, 0}; 2081 int must_validate = (!(qstate->query_flags&BIT_CD) 2082 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate; 2083 int i = 0; 2084 if(!qstate->serve_expired_data) return; 2085 verbose(VERB_ALGO, "Serve expired: Trying to reply with expired data"); 2086 comm_timer_delete(qstate->serve_expired_data->timer); 2087 qstate->serve_expired_data->timer = NULL; 2088 /* If is_drop or no_cache_lookup (modules that handle their own cache e.g., 2089 * subnetmod) ignore stale data from the main cache. */ 2090 if(qstate->no_cache_lookup || qstate->is_drop) { 2091 verbose(VERB_ALGO, 2092 "Serve expired: Not allowed to look into cache for stale"); 2093 return; 2094 } 2095 /* The following while is used instead of the `goto lookup_cache` 2096 * like in the worker. */ 2097 while(1) { 2098 fptr_ok(fptr_whitelist_serve_expired_lookup( 2099 qstate->serve_expired_data->get_cached_answer)); 2100 msg = (*qstate->serve_expired_data->get_cached_answer)(qstate, 2101 lookup_qinfo); 2102 if(!msg) 2103 return; 2104 /* Reset these in case we pass a second time from here. */ 2105 encode_rep = msg->rep; 2106 memset(&actinfo, 0, sizeof(actinfo)); 2107 actinfo.action = respip_none; 2108 alias_rrset = NULL; 2109 if((mesh->use_response_ip || mesh->use_rpz) && 2110 !partial_rep && !apply_respip_action(qstate, &qstate->qinfo, 2111 qstate->client_info, &actinfo, msg->rep, &alias_rrset, &encode_rep, 2112 qstate->env->auth_zones)) { 2113 return; 2114 } else if(partial_rep && 2115 !respip_merge_cname(partial_rep, &qstate->qinfo, msg->rep, 2116 qstate->client_info, must_validate, &encode_rep, qstate->region, 2117 qstate->env->auth_zones)) { 2118 return; 2119 } 2120 if(!encode_rep || alias_rrset) { 2121 if(!encode_rep) { 2122 /* Needs drop */ 2123 return; 2124 } else { 2125 /* A partial CNAME chain is found. */ 2126 partial_rep = encode_rep; 2127 } 2128 } 2129 /* We've found a partial reply ending with an 2130 * alias. Replace the lookup qinfo for the 2131 * alias target and lookup the cache again to 2132 * (possibly) complete the reply. As we're 2133 * passing the "base" reply, there will be no 2134 * more alias chasing. */ 2135 if(partial_rep) { 2136 memset(&qinfo_tmp, 0, sizeof(qinfo_tmp)); 2137 get_cname_target(alias_rrset, &qinfo_tmp.qname, 2138 &qinfo_tmp.qname_len); 2139 if(!qinfo_tmp.qname) { 2140 log_err("Serve expired: unexpected: invalid answer alias"); 2141 return; 2142 } 2143 qinfo_tmp.qtype = qstate->qinfo.qtype; 2144 qinfo_tmp.qclass = qstate->qinfo.qclass; 2145 lookup_qinfo = &qinfo_tmp; 2146 continue; 2147 } 2148 break; 2149 } 2150 2151 if(verbosity >= VERB_ALGO) 2152 log_dns_msg("Serve expired lookup", &qstate->qinfo, msg->rep); 2153 2154 for(r = mstate->reply_list; r; r = r->next) { 2155 i++; 2156 tv = r->start_time; 2157 2158 /* If address info is returned, it means the action should be an 2159 * 'inform' variant and the information should be logged. */ 2160 if(actinfo.addrinfo) { 2161 respip_inform_print(&actinfo, r->qname, 2162 qstate->qinfo.qtype, qstate->qinfo.qclass, 2163 r->local_alias, &r->query_reply.client_addr, 2164 r->query_reply.client_addrlen); 2165 } 2166 2167 /* Add EDE Stale Answer (RCF8914). Ignore global ede as this is 2168 * warning instead of an error */ 2169 if (r->edns.edns_present && qstate->env->cfg->ede_serve_expired && 2170 qstate->env->cfg->ede) { 2171 edns_opt_list_append_ede(&r->edns.opt_list_out, 2172 mstate->s.region, LDNS_EDE_STALE_ANSWER, NULL); 2173 } 2174 2175 r_buffer = r->query_reply.c->buffer; 2176 if(r->query_reply.c->tcp_req_info) 2177 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer; 2178 mesh_send_reply(mstate, LDNS_RCODE_NOERROR, msg->rep, 2179 r, r_buffer, prev, prev_buffer); 2180 if(r->query_reply.c->tcp_req_info) 2181 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate); 2182 prev = r; 2183 prev_buffer = r_buffer; 2184 } 2185 /* Account for each reply sent. */ 2186 if(i > 0) { 2187 mesh->ans_expired += i; 2188 if(actinfo.addrinfo && qstate->env->cfg->stat_extended && 2189 actinfo.rpz_used) { 2190 if(actinfo.rpz_disabled) 2191 qstate->env->mesh->rpz_action[RPZ_DISABLED_ACTION] += i; 2192 if(actinfo.rpz_cname_override) 2193 qstate->env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION] += i; 2194 else 2195 qstate->env->mesh->rpz_action[ 2196 respip_action_to_rpz_action(actinfo.action)] += i; 2197 } 2198 } 2199 2200 /* Mesh area accounting */ 2201 if(mstate->reply_list) { 2202 mstate->reply_list = NULL; 2203 if(!mstate->reply_list && !mstate->cb_list) { 2204 log_assert(mesh->num_reply_states > 0); 2205 mesh->num_reply_states--; 2206 if(mstate->super_set.count == 0) { 2207 mesh->num_detached_states++; 2208 } 2209 } 2210 } 2211 2212 while((c = mstate->cb_list) != NULL) { 2213 /* take this cb off the list; so that the list can be 2214 * changed, eg. by adds from the callback routine */ 2215 if(!mstate->reply_list && mstate->cb_list && !c->next) { 2216 /* was a reply state, not anymore */ 2217 log_assert(qstate->env->mesh->num_reply_states > 0); 2218 qstate->env->mesh->num_reply_states--; 2219 } 2220 mstate->cb_list = c->next; 2221 if(!mstate->reply_list && !mstate->cb_list && 2222 mstate->super_set.count == 0) 2223 qstate->env->mesh->num_detached_states++; 2224 mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv); 2225 } 2226 } 2227 2228 int mesh_jostle_exceeded(struct mesh_area* mesh) 2229 { 2230 if(mesh->all.count < mesh->max_reply_states) 2231 return 0; 2232 return 1; 2233 } 2234