1 /* 2 * services/mesh.c - deal with mesh of query states and handle events for that. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains functions to assist in dealing with a mesh of 40 * query states. This mesh is supposed to be thread-specific. 41 * It consists of query states (per qname, qtype, qclass) and connections 42 * between query states and the super and subquery states, and replies to 43 * send back to clients. 44 */ 45 #include "config.h" 46 #include "services/mesh.h" 47 #include "services/outbound_list.h" 48 #include "services/cache/dns.h" 49 #include "services/cache/rrset.h" 50 #include "util/log.h" 51 #include "util/net_help.h" 52 #include "util/module.h" 53 #include "util/regional.h" 54 #include "util/data/msgencode.h" 55 #include "util/timehist.h" 56 #include "util/fptr_wlist.h" 57 #include "util/alloc.h" 58 #include "util/config_file.h" 59 #include "util/edns.h" 60 #include "sldns/sbuffer.h" 61 #include "sldns/wire2str.h" 62 #include "services/localzone.h" 63 #include "util/data/dname.h" 64 #include "respip/respip.h" 65 #include "services/listen_dnsport.h" 66 #include "util/timeval_func.h" 67 68 #ifdef CLIENT_SUBNET 69 #include "edns-subnet/subnetmod.h" 70 #include "edns-subnet/edns-subnet.h" 71 #endif 72 73 /** 74 * Compare two response-ip client info entries for the purpose of mesh state 75 * compare. It returns 0 if ci_a and ci_b are considered equal; otherwise 76 * 1 or -1 (they mean 'ci_a is larger/smaller than ci_b', respectively, but 77 * in practice it should be only used to mean they are different). 78 * We cannot share the mesh state for two queries if different response-ip 79 * actions can apply in the end, even if those queries are otherwise identical. 80 * For this purpose we compare tag lists and tag action lists; they should be 81 * identical to share the same state. 82 * For tag data, we don't look into the data content, as it can be 83 * expensive; unless tag data are not defined for both or they point to the 84 * exact same data in memory (i.e., they come from the same ACL entry), we 85 * consider these data different. 86 * Likewise, if the client info is associated with views, we don't look into 87 * the views. They are considered different unless they are exactly the same 88 * even if the views only differ in the names. 89 */ 90 static int 91 client_info_compare(const struct respip_client_info* ci_a, 92 const struct respip_client_info* ci_b) 93 { 94 int cmp; 95 96 if(!ci_a && !ci_b) 97 return 0; 98 if(ci_a && !ci_b) 99 return -1; 100 if(!ci_a && ci_b) 101 return 1; 102 if(ci_a->taglen != ci_b->taglen) 103 return (ci_a->taglen < ci_b->taglen) ? -1 : 1; 104 if(ci_a->taglist && !ci_b->taglist) 105 return -1; 106 if(!ci_a->taglist && ci_b->taglist) 107 return 1; 108 if(ci_a->taglist && ci_b->taglist) { 109 cmp = memcmp(ci_a->taglist, ci_b->taglist, ci_a->taglen); 110 if(cmp != 0) 111 return cmp; 112 } 113 if(ci_a->tag_actions_size != ci_b->tag_actions_size) 114 return (ci_a->tag_actions_size < ci_b->tag_actions_size) ? 115 -1 : 1; 116 if(ci_a->tag_actions && !ci_b->tag_actions) 117 return -1; 118 if(!ci_a->tag_actions && ci_b->tag_actions) 119 return 1; 120 if(ci_a->tag_actions && ci_b->tag_actions) { 121 cmp = memcmp(ci_a->tag_actions, ci_b->tag_actions, 122 ci_a->tag_actions_size); 123 if(cmp != 0) 124 return cmp; 125 } 126 if(ci_a->tag_datas != ci_b->tag_datas) 127 return ci_a->tag_datas < ci_b->tag_datas ? -1 : 1; 128 if(ci_a->view != ci_b->view) 129 return ci_a->view < ci_b->view ? -1 : 1; 130 /* For the unbound daemon these should be non-NULL and identical, 131 * but we check that just in case. */ 132 if(ci_a->respip_set != ci_b->respip_set) 133 return ci_a->respip_set < ci_b->respip_set ? -1 : 1; 134 return 0; 135 } 136 137 int 138 mesh_state_compare(const void* ap, const void* bp) 139 { 140 struct mesh_state* a = (struct mesh_state*)ap; 141 struct mesh_state* b = (struct mesh_state*)bp; 142 int cmp; 143 144 if(a->unique < b->unique) 145 return -1; 146 if(a->unique > b->unique) 147 return 1; 148 149 if(a->s.is_priming && !b->s.is_priming) 150 return -1; 151 if(!a->s.is_priming && b->s.is_priming) 152 return 1; 153 154 if(a->s.is_valrec && !b->s.is_valrec) 155 return -1; 156 if(!a->s.is_valrec && b->s.is_valrec) 157 return 1; 158 159 if((a->s.query_flags&BIT_RD) && !(b->s.query_flags&BIT_RD)) 160 return -1; 161 if(!(a->s.query_flags&BIT_RD) && (b->s.query_flags&BIT_RD)) 162 return 1; 163 164 if((a->s.query_flags&BIT_CD) && !(b->s.query_flags&BIT_CD)) 165 return -1; 166 if(!(a->s.query_flags&BIT_CD) && (b->s.query_flags&BIT_CD)) 167 return 1; 168 169 cmp = query_info_compare(&a->s.qinfo, &b->s.qinfo); 170 if(cmp != 0) 171 return cmp; 172 return client_info_compare(a->s.client_info, b->s.client_info); 173 } 174 175 int 176 mesh_state_ref_compare(const void* ap, const void* bp) 177 { 178 struct mesh_state_ref* a = (struct mesh_state_ref*)ap; 179 struct mesh_state_ref* b = (struct mesh_state_ref*)bp; 180 return mesh_state_compare(a->s, b->s); 181 } 182 183 struct mesh_area* 184 mesh_create(struct module_stack* stack, struct module_env* env) 185 { 186 struct mesh_area* mesh = calloc(1, sizeof(struct mesh_area)); 187 if(!mesh) { 188 log_err("mesh area alloc: out of memory"); 189 return NULL; 190 } 191 mesh->histogram = timehist_setup(); 192 mesh->qbuf_bak = sldns_buffer_new(env->cfg->msg_buffer_size); 193 if(!mesh->histogram || !mesh->qbuf_bak) { 194 free(mesh); 195 log_err("mesh area alloc: out of memory"); 196 return NULL; 197 } 198 mesh->mods = *stack; 199 mesh->env = env; 200 rbtree_init(&mesh->run, &mesh_state_compare); 201 rbtree_init(&mesh->all, &mesh_state_compare); 202 mesh->num_reply_addrs = 0; 203 mesh->num_reply_states = 0; 204 mesh->num_detached_states = 0; 205 mesh->num_forever_states = 0; 206 mesh->stats_jostled = 0; 207 mesh->stats_dropped = 0; 208 mesh->ans_expired = 0; 209 mesh->ans_cachedb = 0; 210 mesh->max_reply_states = env->cfg->num_queries_per_thread; 211 mesh->max_forever_states = (mesh->max_reply_states+1)/2; 212 #ifndef S_SPLINT_S 213 mesh->jostle_max.tv_sec = (time_t)(env->cfg->jostle_time / 1000); 214 mesh->jostle_max.tv_usec = (time_t)((env->cfg->jostle_time % 1000) 215 *1000); 216 #endif 217 return mesh; 218 } 219 220 /** help mesh delete delete mesh states */ 221 static void 222 mesh_delete_helper(rbnode_type* n) 223 { 224 struct mesh_state* mstate = (struct mesh_state*)n->key; 225 /* perform a full delete, not only 'cleanup' routine, 226 * because other callbacks expect a clean state in the mesh. 227 * For 're-entrant' calls */ 228 mesh_state_delete(&mstate->s); 229 /* but because these delete the items from the tree, postorder 230 * traversal and rbtree rebalancing do not work together */ 231 } 232 233 void 234 mesh_delete(struct mesh_area* mesh) 235 { 236 if(!mesh) 237 return; 238 /* free all query states */ 239 while(mesh->all.count) 240 mesh_delete_helper(mesh->all.root); 241 timehist_delete(mesh->histogram); 242 sldns_buffer_free(mesh->qbuf_bak); 243 free(mesh); 244 } 245 246 void 247 mesh_delete_all(struct mesh_area* mesh) 248 { 249 /* free all query states */ 250 while(mesh->all.count) 251 mesh_delete_helper(mesh->all.root); 252 mesh->stats_dropped += mesh->num_reply_addrs; 253 /* clear mesh area references */ 254 rbtree_init(&mesh->run, &mesh_state_compare); 255 rbtree_init(&mesh->all, &mesh_state_compare); 256 mesh->num_reply_addrs = 0; 257 mesh->num_reply_states = 0; 258 mesh->num_detached_states = 0; 259 mesh->num_forever_states = 0; 260 mesh->forever_first = NULL; 261 mesh->forever_last = NULL; 262 mesh->jostle_first = NULL; 263 mesh->jostle_last = NULL; 264 } 265 266 int mesh_make_new_space(struct mesh_area* mesh, sldns_buffer* qbuf) 267 { 268 struct mesh_state* m = mesh->jostle_first; 269 /* free space is available */ 270 if(mesh->num_reply_states < mesh->max_reply_states) 271 return 1; 272 /* try to kick out a jostle-list item */ 273 if(m && m->reply_list && m->list_select == mesh_jostle_list) { 274 /* how old is it? */ 275 struct timeval age; 276 timeval_subtract(&age, mesh->env->now_tv, 277 &m->reply_list->start_time); 278 if(timeval_smaller(&mesh->jostle_max, &age)) { 279 /* its a goner */ 280 log_nametypeclass(VERB_ALGO, "query jostled out to " 281 "make space for a new one", 282 m->s.qinfo.qname, m->s.qinfo.qtype, 283 m->s.qinfo.qclass); 284 /* backup the query */ 285 if(qbuf) sldns_buffer_copy(mesh->qbuf_bak, qbuf); 286 /* notify supers */ 287 if(m->super_set.count > 0) { 288 verbose(VERB_ALGO, "notify supers of failure"); 289 m->s.return_msg = NULL; 290 m->s.return_rcode = LDNS_RCODE_SERVFAIL; 291 mesh_walk_supers(mesh, m); 292 } 293 mesh->stats_jostled ++; 294 mesh_state_delete(&m->s); 295 /* restore the query - note that the qinfo ptr to 296 * the querybuffer is then correct again. */ 297 if(qbuf) sldns_buffer_copy(qbuf, mesh->qbuf_bak); 298 return 1; 299 } 300 } 301 /* no space for new item */ 302 return 0; 303 } 304 305 struct dns_msg* 306 mesh_serve_expired_lookup(struct module_qstate* qstate, 307 struct query_info* lookup_qinfo) 308 { 309 hashvalue_type h; 310 struct lruhash_entry* e; 311 struct dns_msg* msg; 312 struct reply_info* data; 313 struct msgreply_entry* key; 314 time_t timenow = *qstate->env->now; 315 int must_validate = (!(qstate->query_flags&BIT_CD) 316 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate; 317 /* Lookup cache */ 318 h = query_info_hash(lookup_qinfo, qstate->query_flags); 319 e = slabhash_lookup(qstate->env->msg_cache, h, lookup_qinfo, 0); 320 if(!e) return NULL; 321 322 key = (struct msgreply_entry*)e->key; 323 data = (struct reply_info*)e->data; 324 msg = tomsg(qstate->env, &key->key, data, qstate->region, timenow, 325 qstate->env->cfg->serve_expired, qstate->env->scratch); 326 if(!msg) 327 goto bail_out; 328 329 /* Check CNAME chain (if any) 330 * This is part of tomsg above; no need to check now. */ 331 332 /* Check security status of the cached answer. 333 * tomsg above has a subset of these checks, so we are leaving 334 * these as is. 335 * In case of bogus or revalidation we don't care to reply here. */ 336 if(must_validate && (msg->rep->security == sec_status_bogus || 337 msg->rep->security == sec_status_secure_sentinel_fail)) { 338 verbose(VERB_ALGO, "Serve expired: bogus answer found in cache"); 339 goto bail_out; 340 } else if(msg->rep->security == sec_status_unchecked && must_validate) { 341 verbose(VERB_ALGO, "Serve expired: unchecked entry needs " 342 "validation"); 343 goto bail_out; /* need to validate cache entry first */ 344 } else if(msg->rep->security == sec_status_secure && 345 !reply_all_rrsets_secure(msg->rep) && must_validate) { 346 verbose(VERB_ALGO, "Serve expired: secure entry" 347 " changed status"); 348 goto bail_out; /* rrset changed, re-verify */ 349 } 350 351 lock_rw_unlock(&e->lock); 352 return msg; 353 354 bail_out: 355 lock_rw_unlock(&e->lock); 356 return NULL; 357 } 358 359 360 /** Init the serve expired data structure */ 361 static int 362 mesh_serve_expired_init(struct mesh_state* mstate, int timeout) 363 { 364 struct timeval t; 365 366 /* Create serve_expired_data if not there yet */ 367 if(!mstate->s.serve_expired_data) { 368 mstate->s.serve_expired_data = (struct serve_expired_data*) 369 regional_alloc_zero( 370 mstate->s.region, sizeof(struct serve_expired_data)); 371 if(!mstate->s.serve_expired_data) 372 return 0; 373 } 374 375 /* Don't overwrite the function if already set */ 376 mstate->s.serve_expired_data->get_cached_answer = 377 mstate->s.serve_expired_data->get_cached_answer? 378 mstate->s.serve_expired_data->get_cached_answer: 379 &mesh_serve_expired_lookup; 380 381 /* In case this timer already popped, start it again */ 382 if(!mstate->s.serve_expired_data->timer) { 383 mstate->s.serve_expired_data->timer = comm_timer_create( 384 mstate->s.env->worker_base, mesh_serve_expired_callback, mstate); 385 if(!mstate->s.serve_expired_data->timer) 386 return 0; 387 #ifndef S_SPLINT_S 388 t.tv_sec = timeout/1000; 389 t.tv_usec = (timeout%1000)*1000; 390 #endif 391 comm_timer_set(mstate->s.serve_expired_data->timer, &t); 392 } 393 return 1; 394 } 395 396 void mesh_new_client(struct mesh_area* mesh, struct query_info* qinfo, 397 struct respip_client_info* cinfo, uint16_t qflags, 398 struct edns_data* edns, struct comm_reply* rep, uint16_t qid, 399 int rpz_passthru) 400 { 401 struct mesh_state* s = NULL; 402 int unique = unique_mesh_state(edns->opt_list_in, mesh->env); 403 int was_detached = 0; 404 int was_noreply = 0; 405 int added = 0; 406 int timeout = mesh->env->cfg->serve_expired? 407 mesh->env->cfg->serve_expired_client_timeout:0; 408 struct sldns_buffer* r_buffer = rep->c->buffer; 409 if(rep->c->tcp_req_info) { 410 r_buffer = rep->c->tcp_req_info->spool_buffer; 411 } 412 if(!unique) 413 s = mesh_area_find(mesh, cinfo, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0); 414 /* does this create a new reply state? */ 415 if(!s || s->list_select == mesh_no_list) { 416 if(!mesh_make_new_space(mesh, rep->c->buffer)) { 417 verbose(VERB_ALGO, "Too many queries. dropping " 418 "incoming query."); 419 comm_point_drop_reply(rep); 420 mesh->stats_dropped++; 421 return; 422 } 423 /* for this new reply state, the reply address is free, 424 * so the limit of reply addresses does not stop reply states*/ 425 } else { 426 /* protect our memory usage from storing reply addresses */ 427 if(mesh->num_reply_addrs > mesh->max_reply_states*16) { 428 verbose(VERB_ALGO, "Too many requests queued. " 429 "dropping incoming query."); 430 comm_point_drop_reply(rep); 431 mesh->stats_dropped++; 432 return; 433 } 434 } 435 /* see if it already exists, if not, create one */ 436 if(!s) { 437 #ifdef UNBOUND_DEBUG 438 struct rbnode_type* n; 439 #endif 440 s = mesh_state_create(mesh->env, qinfo, cinfo, 441 qflags&(BIT_RD|BIT_CD), 0, 0); 442 if(!s) { 443 log_err("mesh_state_create: out of memory; SERVFAIL"); 444 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, NULL, 445 LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv)) 446 edns->opt_list_inplace_cb_out = NULL; 447 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 448 qinfo, qid, qflags, edns); 449 comm_point_send_reply(rep); 450 return; 451 } 452 /* set detached (it is now) */ 453 mesh->num_detached_states++; 454 if(unique) 455 mesh_state_make_unique(s); 456 s->s.rpz_passthru = rpz_passthru; 457 /* copy the edns options we got from the front */ 458 if(edns->opt_list_in) { 459 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in, 460 s->s.region); 461 if(!s->s.edns_opts_front_in) { 462 log_err("edns_opt_copy_region: out of memory; SERVFAIL"); 463 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, NULL, 464 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv)) 465 edns->opt_list_inplace_cb_out = NULL; 466 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 467 qinfo, qid, qflags, edns); 468 comm_point_send_reply(rep); 469 mesh_state_delete(&s->s); 470 return; 471 } 472 } 473 474 #ifdef UNBOUND_DEBUG 475 n = 476 #else 477 (void) 478 #endif 479 rbtree_insert(&mesh->all, &s->node); 480 log_assert(n != NULL); 481 added = 1; 482 } 483 if(!s->reply_list && !s->cb_list) { 484 was_noreply = 1; 485 if(s->super_set.count == 0) { 486 was_detached = 1; 487 } 488 } 489 /* add reply to s */ 490 if(!mesh_state_add_reply(s, edns, rep, qid, qflags, qinfo)) { 491 log_err("mesh_new_client: out of memory; SERVFAIL"); 492 goto servfail_mem; 493 } 494 if(rep->c->tcp_req_info) { 495 if(!tcp_req_info_add_meshstate(rep->c->tcp_req_info, mesh, s)) { 496 log_err("mesh_new_client: out of memory add tcpreqinfo"); 497 goto servfail_mem; 498 } 499 } 500 if(rep->c->use_h2) { 501 http2_stream_add_meshstate(rep->c->h2_stream, mesh, s); 502 } 503 /* add serve expired timer if required and not already there */ 504 if(timeout && !mesh_serve_expired_init(s, timeout)) { 505 log_err("mesh_new_client: out of memory initializing serve expired"); 506 goto servfail_mem; 507 } 508 /* update statistics */ 509 if(was_detached) { 510 log_assert(mesh->num_detached_states > 0); 511 mesh->num_detached_states--; 512 } 513 if(was_noreply) { 514 mesh->num_reply_states ++; 515 } 516 mesh->num_reply_addrs++; 517 if(s->list_select == mesh_no_list) { 518 /* move to either the forever or the jostle_list */ 519 if(mesh->num_forever_states < mesh->max_forever_states) { 520 mesh->num_forever_states ++; 521 mesh_list_insert(s, &mesh->forever_first, 522 &mesh->forever_last); 523 s->list_select = mesh_forever_list; 524 } else { 525 mesh_list_insert(s, &mesh->jostle_first, 526 &mesh->jostle_last); 527 s->list_select = mesh_jostle_list; 528 } 529 } 530 if(added) 531 mesh_run(mesh, s, module_event_new, NULL); 532 return; 533 534 servfail_mem: 535 if(!inplace_cb_reply_servfail_call(mesh->env, qinfo, &s->s, 536 NULL, LDNS_RCODE_SERVFAIL, edns, rep, mesh->env->scratch, mesh->env->now_tv)) 537 edns->opt_list_inplace_cb_out = NULL; 538 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 539 qinfo, qid, qflags, edns); 540 comm_point_send_reply(rep); 541 if(added) 542 mesh_state_delete(&s->s); 543 return; 544 } 545 546 int 547 mesh_new_callback(struct mesh_area* mesh, struct query_info* qinfo, 548 uint16_t qflags, struct edns_data* edns, sldns_buffer* buf, 549 uint16_t qid, mesh_cb_func_type cb, void* cb_arg, int rpz_passthru) 550 { 551 struct mesh_state* s = NULL; 552 int unique = unique_mesh_state(edns->opt_list_in, mesh->env); 553 int timeout = mesh->env->cfg->serve_expired? 554 mesh->env->cfg->serve_expired_client_timeout:0; 555 int was_detached = 0; 556 int was_noreply = 0; 557 int added = 0; 558 if(!unique) 559 s = mesh_area_find(mesh, NULL, qinfo, qflags&(BIT_RD|BIT_CD), 0, 0); 560 561 /* there are no limits on the number of callbacks */ 562 563 /* see if it already exists, if not, create one */ 564 if(!s) { 565 #ifdef UNBOUND_DEBUG 566 struct rbnode_type* n; 567 #endif 568 s = mesh_state_create(mesh->env, qinfo, NULL, 569 qflags&(BIT_RD|BIT_CD), 0, 0); 570 if(!s) { 571 return 0; 572 } 573 /* set detached (it is now) */ 574 mesh->num_detached_states++; 575 if(unique) 576 mesh_state_make_unique(s); 577 s->s.rpz_passthru = rpz_passthru; 578 if(edns->opt_list_in) { 579 s->s.edns_opts_front_in = edns_opt_copy_region(edns->opt_list_in, 580 s->s.region); 581 if(!s->s.edns_opts_front_in) { 582 mesh_state_delete(&s->s); 583 return 0; 584 } 585 } 586 #ifdef UNBOUND_DEBUG 587 n = 588 #else 589 (void) 590 #endif 591 rbtree_insert(&mesh->all, &s->node); 592 log_assert(n != NULL); 593 added = 1; 594 } 595 if(!s->reply_list && !s->cb_list) { 596 was_noreply = 1; 597 if(s->super_set.count == 0) { 598 was_detached = 1; 599 } 600 } 601 /* add reply to s */ 602 if(!mesh_state_add_cb(s, edns, buf, cb, cb_arg, qid, qflags)) { 603 if(added) 604 mesh_state_delete(&s->s); 605 return 0; 606 } 607 /* add serve expired timer if not already there */ 608 if(timeout && !mesh_serve_expired_init(s, timeout)) { 609 if(added) 610 mesh_state_delete(&s->s); 611 return 0; 612 } 613 /* update statistics */ 614 if(was_detached) { 615 log_assert(mesh->num_detached_states > 0); 616 mesh->num_detached_states--; 617 } 618 if(was_noreply) { 619 mesh->num_reply_states ++; 620 } 621 mesh->num_reply_addrs++; 622 if(added) 623 mesh_run(mesh, s, module_event_new, NULL); 624 return 1; 625 } 626 627 /* Internal backend routine of mesh_new_prefetch(). It takes one additional 628 * parameter, 'run', which controls whether to run the prefetch state 629 * immediately. When this function is called internally 'run' could be 630 * 0 (false), in which case the new state is only made runnable so it 631 * will not be run recursively on top of the current state. */ 632 static void mesh_schedule_prefetch(struct mesh_area* mesh, 633 struct query_info* qinfo, uint16_t qflags, time_t leeway, int run, 634 int rpz_passthru) 635 { 636 struct mesh_state* s = mesh_area_find(mesh, NULL, qinfo, 637 qflags&(BIT_RD|BIT_CD), 0, 0); 638 #ifdef UNBOUND_DEBUG 639 struct rbnode_type* n; 640 #endif 641 /* already exists, and for a different purpose perhaps. 642 * if mesh_no_list, keep it that way. */ 643 if(s) { 644 /* make it ignore the cache from now on */ 645 if(!s->s.blacklist) 646 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 647 if(s->s.prefetch_leeway < leeway) 648 s->s.prefetch_leeway = leeway; 649 return; 650 } 651 if(!mesh_make_new_space(mesh, NULL)) { 652 verbose(VERB_ALGO, "Too many queries. dropped prefetch."); 653 mesh->stats_dropped ++; 654 return; 655 } 656 657 s = mesh_state_create(mesh->env, qinfo, NULL, 658 qflags&(BIT_RD|BIT_CD), 0, 0); 659 if(!s) { 660 log_err("prefetch mesh_state_create: out of memory"); 661 return; 662 } 663 #ifdef UNBOUND_DEBUG 664 n = 665 #else 666 (void) 667 #endif 668 rbtree_insert(&mesh->all, &s->node); 669 log_assert(n != NULL); 670 /* set detached (it is now) */ 671 mesh->num_detached_states++; 672 /* make it ignore the cache */ 673 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 674 s->s.prefetch_leeway = leeway; 675 676 if(s->list_select == mesh_no_list) { 677 /* move to either the forever or the jostle_list */ 678 if(mesh->num_forever_states < mesh->max_forever_states) { 679 mesh->num_forever_states ++; 680 mesh_list_insert(s, &mesh->forever_first, 681 &mesh->forever_last); 682 s->list_select = mesh_forever_list; 683 } else { 684 mesh_list_insert(s, &mesh->jostle_first, 685 &mesh->jostle_last); 686 s->list_select = mesh_jostle_list; 687 } 688 } 689 s->s.rpz_passthru = rpz_passthru; 690 691 if(!run) { 692 #ifdef UNBOUND_DEBUG 693 n = 694 #else 695 (void) 696 #endif 697 rbtree_insert(&mesh->run, &s->run_node); 698 log_assert(n != NULL); 699 return; 700 } 701 702 mesh_run(mesh, s, module_event_new, NULL); 703 } 704 705 #ifdef CLIENT_SUBNET 706 /* Same logic as mesh_schedule_prefetch but tailored to the subnet module logic 707 * like passing along the comm_reply info. This will be faked into an EDNS 708 * option for processing by the subnet module if the client has not already 709 * attached its own ECS data. */ 710 static void mesh_schedule_prefetch_subnet(struct mesh_area* mesh, 711 struct query_info* qinfo, uint16_t qflags, time_t leeway, int run, 712 int rpz_passthru, struct sockaddr_storage* addr, struct edns_option* edns_list) 713 { 714 struct mesh_state* s = NULL; 715 struct edns_option* opt = NULL; 716 #ifdef UNBOUND_DEBUG 717 struct rbnode_type* n; 718 #endif 719 if(!mesh_make_new_space(mesh, NULL)) { 720 verbose(VERB_ALGO, "Too many queries. dropped prefetch."); 721 mesh->stats_dropped ++; 722 return; 723 } 724 725 s = mesh_state_create(mesh->env, qinfo, NULL, 726 qflags&(BIT_RD|BIT_CD), 0, 0); 727 if(!s) { 728 log_err("prefetch_subnet mesh_state_create: out of memory"); 729 return; 730 } 731 mesh_state_make_unique(s); 732 733 opt = edns_opt_list_find(edns_list, mesh->env->cfg->client_subnet_opcode); 734 if(opt) { 735 /* Use the client's ECS data */ 736 if(!edns_opt_list_append(&s->s.edns_opts_front_in, opt->opt_code, 737 opt->opt_len, opt->opt_data, s->s.region)) { 738 log_err("prefetch_subnet edns_opt_list_append: out of memory"); 739 return; 740 } 741 } else { 742 /* Store the client's address. Later in the subnet module, 743 * it is decided whether to include an ECS option or not. 744 */ 745 s->s.client_addr = *addr; 746 } 747 #ifdef UNBOUND_DEBUG 748 n = 749 #else 750 (void) 751 #endif 752 rbtree_insert(&mesh->all, &s->node); 753 log_assert(n != NULL); 754 /* set detached (it is now) */ 755 mesh->num_detached_states++; 756 /* make it ignore the cache */ 757 sock_list_insert(&s->s.blacklist, NULL, 0, s->s.region); 758 s->s.prefetch_leeway = leeway; 759 760 if(s->list_select == mesh_no_list) { 761 /* move to either the forever or the jostle_list */ 762 if(mesh->num_forever_states < mesh->max_forever_states) { 763 mesh->num_forever_states ++; 764 mesh_list_insert(s, &mesh->forever_first, 765 &mesh->forever_last); 766 s->list_select = mesh_forever_list; 767 } else { 768 mesh_list_insert(s, &mesh->jostle_first, 769 &mesh->jostle_last); 770 s->list_select = mesh_jostle_list; 771 } 772 } 773 s->s.rpz_passthru = rpz_passthru; 774 775 if(!run) { 776 #ifdef UNBOUND_DEBUG 777 n = 778 #else 779 (void) 780 #endif 781 rbtree_insert(&mesh->run, &s->run_node); 782 log_assert(n != NULL); 783 return; 784 } 785 786 mesh_run(mesh, s, module_event_new, NULL); 787 } 788 #endif /* CLIENT_SUBNET */ 789 790 void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo, 791 uint16_t qflags, time_t leeway, int rpz_passthru, 792 struct sockaddr_storage* addr, struct edns_option* opt_list) 793 { 794 (void)addr; 795 (void)opt_list; 796 #ifdef CLIENT_SUBNET 797 if(addr) 798 mesh_schedule_prefetch_subnet(mesh, qinfo, qflags, leeway, 1, 799 rpz_passthru, addr, opt_list); 800 else 801 #endif 802 mesh_schedule_prefetch(mesh, qinfo, qflags, leeway, 1, 803 rpz_passthru); 804 } 805 806 void mesh_report_reply(struct mesh_area* mesh, struct outbound_entry* e, 807 struct comm_reply* reply, int what) 808 { 809 enum module_ev event = module_event_reply; 810 e->qstate->reply = reply; 811 if(what != NETEVENT_NOERROR) { 812 event = module_event_noreply; 813 if(what == NETEVENT_CAPSFAIL) 814 event = module_event_capsfail; 815 } 816 mesh_run(mesh, e->qstate->mesh_info, event, e); 817 } 818 819 struct mesh_state* 820 mesh_state_create(struct module_env* env, struct query_info* qinfo, 821 struct respip_client_info* cinfo, uint16_t qflags, int prime, 822 int valrec) 823 { 824 struct regional* region = alloc_reg_obtain(env->alloc); 825 struct mesh_state* mstate; 826 int i; 827 if(!region) 828 return NULL; 829 mstate = (struct mesh_state*)regional_alloc(region, 830 sizeof(struct mesh_state)); 831 if(!mstate) { 832 alloc_reg_release(env->alloc, region); 833 return NULL; 834 } 835 memset(mstate, 0, sizeof(*mstate)); 836 mstate->node = *RBTREE_NULL; 837 mstate->run_node = *RBTREE_NULL; 838 mstate->node.key = mstate; 839 mstate->run_node.key = mstate; 840 mstate->reply_list = NULL; 841 mstate->list_select = mesh_no_list; 842 mstate->replies_sent = 0; 843 rbtree_init(&mstate->super_set, &mesh_state_ref_compare); 844 rbtree_init(&mstate->sub_set, &mesh_state_ref_compare); 845 mstate->num_activated = 0; 846 mstate->unique = NULL; 847 /* init module qstate */ 848 mstate->s.qinfo.qtype = qinfo->qtype; 849 mstate->s.qinfo.qclass = qinfo->qclass; 850 mstate->s.qinfo.local_alias = NULL; 851 mstate->s.qinfo.qname_len = qinfo->qname_len; 852 mstate->s.qinfo.qname = regional_alloc_init(region, qinfo->qname, 853 qinfo->qname_len); 854 if(!mstate->s.qinfo.qname) { 855 alloc_reg_release(env->alloc, region); 856 return NULL; 857 } 858 if(cinfo) { 859 mstate->s.client_info = regional_alloc_init(region, cinfo, 860 sizeof(*cinfo)); 861 if(!mstate->s.client_info) { 862 alloc_reg_release(env->alloc, region); 863 return NULL; 864 } 865 } 866 /* remove all weird bits from qflags */ 867 mstate->s.query_flags = (qflags & (BIT_RD|BIT_CD)); 868 mstate->s.is_priming = prime; 869 mstate->s.is_valrec = valrec; 870 mstate->s.reply = NULL; 871 mstate->s.region = region; 872 mstate->s.curmod = 0; 873 mstate->s.return_msg = 0; 874 mstate->s.return_rcode = LDNS_RCODE_NOERROR; 875 mstate->s.env = env; 876 mstate->s.mesh_info = mstate; 877 mstate->s.prefetch_leeway = 0; 878 mstate->s.serve_expired_data = NULL; 879 mstate->s.no_cache_lookup = 0; 880 mstate->s.no_cache_store = 0; 881 mstate->s.need_refetch = 0; 882 mstate->s.was_ratelimited = 0; 883 mstate->s.qstarttime = *env->now; 884 885 /* init modules */ 886 for(i=0; i<env->mesh->mods.num; i++) { 887 mstate->s.minfo[i] = NULL; 888 mstate->s.ext_state[i] = module_state_initial; 889 } 890 /* init edns option lists */ 891 mstate->s.edns_opts_front_in = NULL; 892 mstate->s.edns_opts_back_out = NULL; 893 mstate->s.edns_opts_back_in = NULL; 894 mstate->s.edns_opts_front_out = NULL; 895 896 return mstate; 897 } 898 899 void 900 mesh_state_make_unique(struct mesh_state* mstate) 901 { 902 mstate->unique = mstate; 903 } 904 905 void 906 mesh_state_cleanup(struct mesh_state* mstate) 907 { 908 struct mesh_area* mesh; 909 int i; 910 if(!mstate) 911 return; 912 mesh = mstate->s.env->mesh; 913 /* Stop and delete the serve expired timer */ 914 if(mstate->s.serve_expired_data && mstate->s.serve_expired_data->timer) { 915 comm_timer_delete(mstate->s.serve_expired_data->timer); 916 mstate->s.serve_expired_data->timer = NULL; 917 } 918 /* drop unsent replies */ 919 if(!mstate->replies_sent) { 920 struct mesh_reply* rep = mstate->reply_list; 921 struct mesh_cb* cb; 922 /* in tcp_req_info, the mstates linked are removed, but 923 * the reply_list is now NULL, so the remove-from-empty-list 924 * takes no time and also it does not do the mesh accounting */ 925 mstate->reply_list = NULL; 926 for(; rep; rep=rep->next) { 927 comm_point_drop_reply(&rep->query_reply); 928 log_assert(mesh->num_reply_addrs > 0); 929 mesh->num_reply_addrs--; 930 } 931 while((cb = mstate->cb_list)!=NULL) { 932 mstate->cb_list = cb->next; 933 fptr_ok(fptr_whitelist_mesh_cb(cb->cb)); 934 (*cb->cb)(cb->cb_arg, LDNS_RCODE_SERVFAIL, NULL, 935 sec_status_unchecked, NULL, 0); 936 log_assert(mesh->num_reply_addrs > 0); 937 mesh->num_reply_addrs--; 938 } 939 } 940 941 /* de-init modules */ 942 for(i=0; i<mesh->mods.num; i++) { 943 fptr_ok(fptr_whitelist_mod_clear(mesh->mods.mod[i]->clear)); 944 (*mesh->mods.mod[i]->clear)(&mstate->s, i); 945 mstate->s.minfo[i] = NULL; 946 mstate->s.ext_state[i] = module_finished; 947 } 948 alloc_reg_release(mstate->s.env->alloc, mstate->s.region); 949 } 950 951 void 952 mesh_state_delete(struct module_qstate* qstate) 953 { 954 struct mesh_area* mesh; 955 struct mesh_state_ref* super, ref; 956 struct mesh_state* mstate; 957 if(!qstate) 958 return; 959 mstate = qstate->mesh_info; 960 mesh = mstate->s.env->mesh; 961 mesh_detach_subs(&mstate->s); 962 if(mstate->list_select == mesh_forever_list) { 963 mesh->num_forever_states --; 964 mesh_list_remove(mstate, &mesh->forever_first, 965 &mesh->forever_last); 966 } else if(mstate->list_select == mesh_jostle_list) { 967 mesh_list_remove(mstate, &mesh->jostle_first, 968 &mesh->jostle_last); 969 } 970 if(!mstate->reply_list && !mstate->cb_list 971 && mstate->super_set.count == 0) { 972 log_assert(mesh->num_detached_states > 0); 973 mesh->num_detached_states--; 974 } 975 if(mstate->reply_list || mstate->cb_list) { 976 log_assert(mesh->num_reply_states > 0); 977 mesh->num_reply_states--; 978 } 979 ref.node.key = &ref; 980 ref.s = mstate; 981 RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) { 982 (void)rbtree_delete(&super->s->sub_set, &ref); 983 } 984 (void)rbtree_delete(&mesh->run, mstate); 985 (void)rbtree_delete(&mesh->all, mstate); 986 mesh_state_cleanup(mstate); 987 } 988 989 /** helper recursive rbtree find routine */ 990 static int 991 find_in_subsub(struct mesh_state* m, struct mesh_state* tofind, size_t *c) 992 { 993 struct mesh_state_ref* r; 994 if((*c)++ > MESH_MAX_SUBSUB) 995 return 1; 996 RBTREE_FOR(r, struct mesh_state_ref*, &m->sub_set) { 997 if(r->s == tofind || find_in_subsub(r->s, tofind, c)) 998 return 1; 999 } 1000 return 0; 1001 } 1002 1003 /** find cycle for already looked up mesh_state */ 1004 static int 1005 mesh_detect_cycle_found(struct module_qstate* qstate, struct mesh_state* dep_m) 1006 { 1007 struct mesh_state* cyc_m = qstate->mesh_info; 1008 size_t counter = 0; 1009 if(!dep_m) 1010 return 0; 1011 if(dep_m == cyc_m || find_in_subsub(dep_m, cyc_m, &counter)) { 1012 if(counter > MESH_MAX_SUBSUB) 1013 return 2; 1014 return 1; 1015 } 1016 return 0; 1017 } 1018 1019 void mesh_detach_subs(struct module_qstate* qstate) 1020 { 1021 struct mesh_area* mesh = qstate->env->mesh; 1022 struct mesh_state_ref* ref, lookup; 1023 #ifdef UNBOUND_DEBUG 1024 struct rbnode_type* n; 1025 #endif 1026 lookup.node.key = &lookup; 1027 lookup.s = qstate->mesh_info; 1028 RBTREE_FOR(ref, struct mesh_state_ref*, &qstate->mesh_info->sub_set) { 1029 #ifdef UNBOUND_DEBUG 1030 n = 1031 #else 1032 (void) 1033 #endif 1034 rbtree_delete(&ref->s->super_set, &lookup); 1035 log_assert(n != NULL); /* must have been present */ 1036 if(!ref->s->reply_list && !ref->s->cb_list 1037 && ref->s->super_set.count == 0) { 1038 mesh->num_detached_states++; 1039 log_assert(mesh->num_detached_states + 1040 mesh->num_reply_states <= mesh->all.count); 1041 } 1042 } 1043 rbtree_init(&qstate->mesh_info->sub_set, &mesh_state_ref_compare); 1044 } 1045 1046 int mesh_add_sub(struct module_qstate* qstate, struct query_info* qinfo, 1047 uint16_t qflags, int prime, int valrec, struct module_qstate** newq, 1048 struct mesh_state** sub) 1049 { 1050 /* find it, if not, create it */ 1051 struct mesh_area* mesh = qstate->env->mesh; 1052 *sub = mesh_area_find(mesh, NULL, qinfo, qflags, 1053 prime, valrec); 1054 if(mesh_detect_cycle_found(qstate, *sub)) { 1055 verbose(VERB_ALGO, "attach failed, cycle detected"); 1056 return 0; 1057 } 1058 if(!*sub) { 1059 #ifdef UNBOUND_DEBUG 1060 struct rbnode_type* n; 1061 #endif 1062 /* create a new one */ 1063 *sub = mesh_state_create(qstate->env, qinfo, NULL, qflags, prime, 1064 valrec); 1065 if(!*sub) { 1066 log_err("mesh_attach_sub: out of memory"); 1067 return 0; 1068 } 1069 #ifdef UNBOUND_DEBUG 1070 n = 1071 #else 1072 (void) 1073 #endif 1074 rbtree_insert(&mesh->all, &(*sub)->node); 1075 log_assert(n != NULL); 1076 /* set detached (it is now) */ 1077 mesh->num_detached_states++; 1078 /* set new query state to run */ 1079 #ifdef UNBOUND_DEBUG 1080 n = 1081 #else 1082 (void) 1083 #endif 1084 rbtree_insert(&mesh->run, &(*sub)->run_node); 1085 log_assert(n != NULL); 1086 *newq = &(*sub)->s; 1087 } else 1088 *newq = NULL; 1089 return 1; 1090 } 1091 1092 int mesh_attach_sub(struct module_qstate* qstate, struct query_info* qinfo, 1093 uint16_t qflags, int prime, int valrec, struct module_qstate** newq) 1094 { 1095 struct mesh_area* mesh = qstate->env->mesh; 1096 struct mesh_state* sub = NULL; 1097 int was_detached; 1098 if(!mesh_add_sub(qstate, qinfo, qflags, prime, valrec, newq, &sub)) 1099 return 0; 1100 was_detached = (sub->super_set.count == 0); 1101 if(!mesh_state_attachment(qstate->mesh_info, sub)) 1102 return 0; 1103 /* if it was a duplicate attachment, the count was not zero before */ 1104 if(!sub->reply_list && !sub->cb_list && was_detached && 1105 sub->super_set.count == 1) { 1106 /* it used to be detached, before this one got added */ 1107 log_assert(mesh->num_detached_states > 0); 1108 mesh->num_detached_states--; 1109 } 1110 /* *newq will be run when inited after the current module stops */ 1111 return 1; 1112 } 1113 1114 int mesh_state_attachment(struct mesh_state* super, struct mesh_state* sub) 1115 { 1116 #ifdef UNBOUND_DEBUG 1117 struct rbnode_type* n; 1118 #endif 1119 struct mesh_state_ref* subref; /* points to sub, inserted in super */ 1120 struct mesh_state_ref* superref; /* points to super, inserted in sub */ 1121 if( !(subref = regional_alloc(super->s.region, 1122 sizeof(struct mesh_state_ref))) || 1123 !(superref = regional_alloc(sub->s.region, 1124 sizeof(struct mesh_state_ref))) ) { 1125 log_err("mesh_state_attachment: out of memory"); 1126 return 0; 1127 } 1128 superref->node.key = superref; 1129 superref->s = super; 1130 subref->node.key = subref; 1131 subref->s = sub; 1132 if(!rbtree_insert(&sub->super_set, &superref->node)) { 1133 /* this should not happen, iterator and validator do not 1134 * attach subqueries that are identical. */ 1135 /* already attached, we are done, nothing todo. 1136 * since superref and subref already allocated in region, 1137 * we cannot free them */ 1138 return 1; 1139 } 1140 #ifdef UNBOUND_DEBUG 1141 n = 1142 #else 1143 (void) 1144 #endif 1145 rbtree_insert(&super->sub_set, &subref->node); 1146 log_assert(n != NULL); /* we checked above if statement, the reverse 1147 administration should not fail now, unless they are out of sync */ 1148 return 1; 1149 } 1150 1151 /** 1152 * callback results to mesh cb entry 1153 * @param m: mesh state to send it for. 1154 * @param rcode: if not 0, error code. 1155 * @param rep: reply to send (or NULL if rcode is set). 1156 * @param r: callback entry 1157 * @param start_time: the time to pass to callback functions, it is 0 or 1158 * a value from one of the packets if the mesh state had packets. 1159 */ 1160 static void 1161 mesh_do_callback(struct mesh_state* m, int rcode, struct reply_info* rep, 1162 struct mesh_cb* r, struct timeval* start_time) 1163 { 1164 int secure; 1165 char* reason = NULL; 1166 int was_ratelimited = m->s.was_ratelimited; 1167 /* bogus messages are not made into servfail, sec_status passed 1168 * to the callback function */ 1169 if(rep && rep->security == sec_status_secure) 1170 secure = 1; 1171 else secure = 0; 1172 if(!rep && rcode == LDNS_RCODE_NOERROR) 1173 rcode = LDNS_RCODE_SERVFAIL; 1174 if(!rcode && rep && (rep->security == sec_status_bogus || 1175 rep->security == sec_status_secure_sentinel_fail)) { 1176 if(!(reason = errinf_to_str_bogus(&m->s))) 1177 rcode = LDNS_RCODE_SERVFAIL; 1178 } 1179 /* send the reply */ 1180 if(rcode) { 1181 if(rcode == LDNS_RCODE_SERVFAIL) { 1182 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1183 rep, rcode, &r->edns, NULL, m->s.region, start_time)) 1184 r->edns.opt_list_inplace_cb_out = NULL; 1185 } else { 1186 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode, 1187 &r->edns, NULL, m->s.region, start_time)) 1188 r->edns.opt_list_inplace_cb_out = NULL; 1189 } 1190 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1191 (*r->cb)(r->cb_arg, rcode, r->buf, sec_status_unchecked, NULL, 1192 was_ratelimited); 1193 } else { 1194 size_t udp_size = r->edns.udp_size; 1195 sldns_buffer_clear(r->buf); 1196 r->edns.edns_version = EDNS_ADVERTISED_VERSION; 1197 r->edns.udp_size = EDNS_ADVERTISED_SIZE; 1198 r->edns.ext_rcode = 0; 1199 r->edns.bits &= EDNS_DO; 1200 if(m->s.env->cfg->disable_edns_do && (r->edns.bits&EDNS_DO)) 1201 r->edns.edns_present = 0; 1202 1203 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, 1204 LDNS_RCODE_NOERROR, &r->edns, NULL, m->s.region, start_time) || 1205 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 1206 r->qflags, r->buf, 0, 1, 1207 m->s.env->scratch, udp_size, &r->edns, 1208 (int)(r->edns.bits & EDNS_DO), secure)) 1209 { 1210 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1211 (*r->cb)(r->cb_arg, LDNS_RCODE_SERVFAIL, r->buf, 1212 sec_status_unchecked, NULL, 0); 1213 } else { 1214 fptr_ok(fptr_whitelist_mesh_cb(r->cb)); 1215 (*r->cb)(r->cb_arg, LDNS_RCODE_NOERROR, r->buf, 1216 (rep?rep->security:sec_status_unchecked), 1217 reason, was_ratelimited); 1218 } 1219 } 1220 free(reason); 1221 log_assert(m->s.env->mesh->num_reply_addrs > 0); 1222 m->s.env->mesh->num_reply_addrs--; 1223 } 1224 1225 static inline int 1226 mesh_is_rpz_respip_tcponly_action(struct mesh_state const* m) 1227 { 1228 struct respip_action_info const* respip_info = m->s.respip_action_info; 1229 return (respip_info == NULL 1230 ? 0 1231 : (respip_info->rpz_used 1232 && !respip_info->rpz_disabled 1233 && respip_info->action == respip_truncate)) 1234 || m->s.tcp_required; 1235 } 1236 1237 static inline int 1238 mesh_is_udp(struct mesh_reply const* r) 1239 { 1240 return r->query_reply.c->type == comm_udp; 1241 } 1242 1243 static inline void 1244 mesh_find_and_attach_ede_and_reason(struct mesh_state* m, 1245 struct reply_info* rep, struct mesh_reply* r) 1246 { 1247 /* OLD note: 1248 * During validation the EDE code can be received via two 1249 * code paths. One code path fills the reply_info EDE, and 1250 * the other fills it in the errinf_strlist. These paths 1251 * intersect at some points, but where is opaque due to 1252 * the complexity of the validator. At the time of writing 1253 * we make the choice to prefer the EDE from errinf_strlist 1254 * but a compelling reason to do otherwise is just as valid 1255 * NEW note: 1256 * The compelling reason is that with caching support, the value 1257 * in the reply_info is cached. 1258 * The reason members of the reply_info struct should be 1259 * updated as they are already cached. No reason to 1260 * try and find the EDE information in errinf anymore. 1261 */ 1262 if(rep->reason_bogus != LDNS_EDE_NONE) { 1263 edns_opt_list_append_ede(&r->edns.opt_list_out, 1264 m->s.region, rep->reason_bogus, rep->reason_bogus_str); 1265 } 1266 } 1267 1268 /** 1269 * Send reply to mesh reply entry 1270 * @param m: mesh state to send it for. 1271 * @param rcode: if not 0, error code. 1272 * @param rep: reply to send (or NULL if rcode is set). 1273 * @param r: reply entry 1274 * @param r_buffer: buffer to use for reply entry. 1275 * @param prev: previous reply, already has its answer encoded in buffer. 1276 * @param prev_buffer: buffer for previous reply. 1277 */ 1278 static void 1279 mesh_send_reply(struct mesh_state* m, int rcode, struct reply_info* rep, 1280 struct mesh_reply* r, struct sldns_buffer* r_buffer, 1281 struct mesh_reply* prev, struct sldns_buffer* prev_buffer) 1282 { 1283 struct timeval end_time; 1284 struct timeval duration; 1285 int secure; 1286 /* briefly set the replylist to null in case the 1287 * meshsendreply calls tcpreqinfo sendreply that 1288 * comm_point_drops because of size, and then the 1289 * null stops the mesh state remove and thus 1290 * reply_list modification and accounting */ 1291 struct mesh_reply* rlist = m->reply_list; 1292 1293 /* rpz: apply actions */ 1294 rcode = mesh_is_udp(r) && mesh_is_rpz_respip_tcponly_action(m) 1295 ? (rcode|BIT_TC) : rcode; 1296 1297 /* examine security status */ 1298 if(m->s.env->need_to_validate && (!(r->qflags&BIT_CD) || 1299 m->s.env->cfg->ignore_cd) && rep && 1300 (rep->security <= sec_status_bogus || 1301 rep->security == sec_status_secure_sentinel_fail)) { 1302 rcode = LDNS_RCODE_SERVFAIL; 1303 if(m->s.env->cfg->stat_extended) 1304 m->s.env->mesh->ans_bogus++; 1305 } 1306 if(rep && rep->security == sec_status_secure) 1307 secure = 1; 1308 else secure = 0; 1309 if(!rep && rcode == LDNS_RCODE_NOERROR) 1310 rcode = LDNS_RCODE_SERVFAIL; 1311 if(r->query_reply.c->use_h2) { 1312 r->query_reply.c->h2_stream = r->h2_stream; 1313 /* Mesh reply won't exist for long anymore. Make it impossible 1314 * for HTTP/2 stream to refer to mesh state, in case 1315 * connection gets cleanup before HTTP/2 stream close. */ 1316 r->h2_stream->mesh_state = NULL; 1317 } 1318 /* send the reply */ 1319 /* We don't reuse the encoded answer if: 1320 * - either the previous or current response has a local alias. We could 1321 * compare the alias records and still reuse the previous answer if they 1322 * are the same, but that would be complicated and error prone for the 1323 * relatively minor case. So we err on the side of safety. 1324 * - there are registered callback functions for the given rcode, as these 1325 * need to be called for each reply. */ 1326 if(((rcode != LDNS_RCODE_SERVFAIL && 1327 !m->s.env->inplace_cb_lists[inplace_cb_reply]) || 1328 (rcode == LDNS_RCODE_SERVFAIL && 1329 !m->s.env->inplace_cb_lists[inplace_cb_reply_servfail])) && 1330 prev && prev_buffer && prev->qflags == r->qflags && 1331 !prev->local_alias && !r->local_alias && 1332 prev->edns.edns_present == r->edns.edns_present && 1333 prev->edns.bits == r->edns.bits && 1334 prev->edns.udp_size == r->edns.udp_size && 1335 edns_opt_list_compare(prev->edns.opt_list_out, r->edns.opt_list_out) == 0 && 1336 edns_opt_list_compare(prev->edns.opt_list_inplace_cb_out, r->edns.opt_list_inplace_cb_out) == 0 1337 ) { 1338 /* if the previous reply is identical to this one, fix ID */ 1339 if(prev_buffer != r_buffer) 1340 sldns_buffer_copy(r_buffer, prev_buffer); 1341 sldns_buffer_write_at(r_buffer, 0, &r->qid, sizeof(uint16_t)); 1342 sldns_buffer_write_at(r_buffer, 12, r->qname, 1343 m->s.qinfo.qname_len); 1344 m->reply_list = NULL; 1345 comm_point_send_reply(&r->query_reply); 1346 m->reply_list = rlist; 1347 } else if(rcode) { 1348 m->s.qinfo.qname = r->qname; 1349 m->s.qinfo.local_alias = r->local_alias; 1350 if(rcode == LDNS_RCODE_SERVFAIL) { 1351 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1352 rep, rcode, &r->edns, &r->query_reply, m->s.region, &r->start_time)) 1353 r->edns.opt_list_inplace_cb_out = NULL; 1354 } else { 1355 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, rcode, 1356 &r->edns, &r->query_reply, m->s.region, &r->start_time)) 1357 r->edns.opt_list_inplace_cb_out = NULL; 1358 } 1359 /* Send along EDE EDNS0 option when SERVFAILing; usually 1360 * DNSSEC validation failures */ 1361 /* Since we are SERVFAILing here, CD bit and rep->security 1362 * is already handled. */ 1363 if(m->s.env->cfg->ede && rep) { 1364 mesh_find_and_attach_ede_and_reason(m, rep, r); 1365 } 1366 error_encode(r_buffer, rcode, &m->s.qinfo, r->qid, 1367 r->qflags, &r->edns); 1368 m->reply_list = NULL; 1369 comm_point_send_reply(&r->query_reply); 1370 m->reply_list = rlist; 1371 } else { 1372 size_t udp_size = r->edns.udp_size; 1373 r->edns.edns_version = EDNS_ADVERTISED_VERSION; 1374 r->edns.udp_size = EDNS_ADVERTISED_SIZE; 1375 r->edns.ext_rcode = 0; 1376 r->edns.bits &= EDNS_DO; 1377 if(m->s.env->cfg->disable_edns_do && (r->edns.bits&EDNS_DO)) 1378 r->edns.edns_present = 0; 1379 m->s.qinfo.qname = r->qname; 1380 m->s.qinfo.local_alias = r->local_alias; 1381 1382 /* Attach EDE without SERVFAIL if the validation failed. 1383 * Need to explicitly check for rep->security otherwise failed 1384 * validation paths may attach to a secure answer. */ 1385 if(m->s.env->cfg->ede && rep && 1386 (rep->security <= sec_status_bogus || 1387 rep->security == sec_status_secure_sentinel_fail)) { 1388 mesh_find_and_attach_ede_and_reason(m, rep, r); 1389 } 1390 1391 if(!inplace_cb_reply_call(m->s.env, &m->s.qinfo, &m->s, rep, 1392 LDNS_RCODE_NOERROR, &r->edns, &r->query_reply, m->s.region, &r->start_time) || 1393 !reply_info_answer_encode(&m->s.qinfo, rep, r->qid, 1394 r->qflags, r_buffer, 0, 1, m->s.env->scratch, 1395 udp_size, &r->edns, (int)(r->edns.bits & EDNS_DO), 1396 secure)) 1397 { 1398 if(!inplace_cb_reply_servfail_call(m->s.env, &m->s.qinfo, &m->s, 1399 rep, LDNS_RCODE_SERVFAIL, &r->edns, &r->query_reply, m->s.region, &r->start_time)) 1400 r->edns.opt_list_inplace_cb_out = NULL; 1401 /* internal server error (probably malloc failure) so no 1402 * EDE (RFC8914) needed */ 1403 error_encode(r_buffer, LDNS_RCODE_SERVFAIL, 1404 &m->s.qinfo, r->qid, r->qflags, &r->edns); 1405 } 1406 m->reply_list = NULL; 1407 comm_point_send_reply(&r->query_reply); 1408 m->reply_list = rlist; 1409 } 1410 /* account */ 1411 log_assert(m->s.env->mesh->num_reply_addrs > 0); 1412 m->s.env->mesh->num_reply_addrs--; 1413 end_time = *m->s.env->now_tv; 1414 timeval_subtract(&duration, &end_time, &r->start_time); 1415 verbose(VERB_ALGO, "query took " ARG_LL "d.%6.6d sec", 1416 (long long)duration.tv_sec, (int)duration.tv_usec); 1417 m->s.env->mesh->replies_sent++; 1418 timeval_add(&m->s.env->mesh->replies_sum_wait, &duration); 1419 timehist_insert(m->s.env->mesh->histogram, &duration); 1420 if(m->s.env->cfg->stat_extended) { 1421 uint16_t rc = FLAGS_GET_RCODE(sldns_buffer_read_u16_at( 1422 r_buffer, 2)); 1423 if(secure) m->s.env->mesh->ans_secure++; 1424 m->s.env->mesh->ans_rcode[ rc ] ++; 1425 if(rc == 0 && LDNS_ANCOUNT(sldns_buffer_begin(r_buffer)) == 0) 1426 m->s.env->mesh->ans_nodata++; 1427 } 1428 /* Log reply sent */ 1429 if(m->s.env->cfg->log_replies) { 1430 log_reply_info(NO_VERBOSE, &m->s.qinfo, 1431 &r->query_reply.client_addr, 1432 r->query_reply.client_addrlen, duration, 0, r_buffer); 1433 } 1434 } 1435 1436 void mesh_query_done(struct mesh_state* mstate) 1437 { 1438 struct mesh_reply* r; 1439 struct mesh_reply* prev = NULL; 1440 struct sldns_buffer* prev_buffer = NULL; 1441 struct mesh_cb* c; 1442 struct reply_info* rep = (mstate->s.return_msg? 1443 mstate->s.return_msg->rep:NULL); 1444 struct timeval tv = {0, 0}; 1445 int i = 0; 1446 /* No need for the serve expired timer anymore; we are going to reply. */ 1447 if(mstate->s.serve_expired_data) { 1448 comm_timer_delete(mstate->s.serve_expired_data->timer); 1449 mstate->s.serve_expired_data->timer = NULL; 1450 } 1451 if(mstate->s.return_rcode == LDNS_RCODE_SERVFAIL || 1452 (rep && FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_SERVFAIL)) { 1453 /* we are SERVFAILing; check for expired answer here */ 1454 mesh_serve_expired_callback(mstate); 1455 if((mstate->reply_list || mstate->cb_list) 1456 && mstate->s.env->cfg->log_servfail 1457 && !mstate->s.env->cfg->val_log_squelch) { 1458 char* err = errinf_to_str_servfail(&mstate->s); 1459 if(err) 1460 log_err("%s", err); 1461 free(err); 1462 } 1463 } 1464 for(r = mstate->reply_list; r; r = r->next) { 1465 i++; 1466 tv = r->start_time; 1467 1468 /* if a response-ip address block has been stored the 1469 * information should be logged for each client. */ 1470 if(mstate->s.respip_action_info && 1471 mstate->s.respip_action_info->addrinfo) { 1472 respip_inform_print(mstate->s.respip_action_info, 1473 r->qname, mstate->s.qinfo.qtype, 1474 mstate->s.qinfo.qclass, r->local_alias, 1475 &r->query_reply.client_addr, 1476 r->query_reply.client_addrlen); 1477 } 1478 1479 /* if this query is determined to be dropped during the 1480 * mesh processing, this is the point to take that action. */ 1481 if(mstate->s.is_drop) { 1482 /* briefly set the reply_list to NULL, so that the 1483 * tcp req info cleanup routine that calls the mesh 1484 * to deregister the meshstate for it is not done 1485 * because the list is NULL and also accounting is not 1486 * done there, but instead we do that here. */ 1487 struct mesh_reply* reply_list = mstate->reply_list; 1488 mstate->reply_list = NULL; 1489 comm_point_drop_reply(&r->query_reply); 1490 mstate->reply_list = reply_list; 1491 } else { 1492 struct sldns_buffer* r_buffer = r->query_reply.c->buffer; 1493 if(r->query_reply.c->tcp_req_info) { 1494 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer; 1495 prev_buffer = NULL; 1496 } 1497 mesh_send_reply(mstate, mstate->s.return_rcode, rep, 1498 r, r_buffer, prev, prev_buffer); 1499 if(r->query_reply.c->tcp_req_info) { 1500 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate); 1501 r_buffer = NULL; 1502 } 1503 prev = r; 1504 prev_buffer = r_buffer; 1505 } 1506 } 1507 /* Account for each reply sent. */ 1508 if(i > 0 && mstate->s.respip_action_info && 1509 mstate->s.respip_action_info->addrinfo && 1510 mstate->s.env->cfg->stat_extended && 1511 mstate->s.respip_action_info->rpz_used) { 1512 if(mstate->s.respip_action_info->rpz_disabled) 1513 mstate->s.env->mesh->rpz_action[RPZ_DISABLED_ACTION] += i; 1514 if(mstate->s.respip_action_info->rpz_cname_override) 1515 mstate->s.env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION] += i; 1516 else 1517 mstate->s.env->mesh->rpz_action[respip_action_to_rpz_action( 1518 mstate->s.respip_action_info->action)] += i; 1519 } 1520 if(!mstate->s.is_drop && i > 0) { 1521 if(mstate->s.env->cfg->stat_extended 1522 && mstate->s.is_cachedb_answer) { 1523 mstate->s.env->mesh->ans_cachedb += i; 1524 } 1525 } 1526 1527 /* Mesh area accounting */ 1528 if(mstate->reply_list) { 1529 mstate->reply_list = NULL; 1530 if(!mstate->reply_list && !mstate->cb_list) { 1531 /* was a reply state, not anymore */ 1532 log_assert(mstate->s.env->mesh->num_reply_states > 0); 1533 mstate->s.env->mesh->num_reply_states--; 1534 } 1535 if(!mstate->reply_list && !mstate->cb_list && 1536 mstate->super_set.count == 0) 1537 mstate->s.env->mesh->num_detached_states++; 1538 } 1539 mstate->replies_sent = 1; 1540 1541 while((c = mstate->cb_list) != NULL) { 1542 /* take this cb off the list; so that the list can be 1543 * changed, eg. by adds from the callback routine */ 1544 if(!mstate->reply_list && mstate->cb_list && !c->next) { 1545 /* was a reply state, not anymore */ 1546 log_assert(mstate->s.env->mesh->num_reply_states > 0); 1547 mstate->s.env->mesh->num_reply_states--; 1548 } 1549 mstate->cb_list = c->next; 1550 if(!mstate->reply_list && !mstate->cb_list && 1551 mstate->super_set.count == 0) 1552 mstate->s.env->mesh->num_detached_states++; 1553 mesh_do_callback(mstate, mstate->s.return_rcode, rep, c, &tv); 1554 } 1555 } 1556 1557 void mesh_walk_supers(struct mesh_area* mesh, struct mesh_state* mstate) 1558 { 1559 struct mesh_state_ref* ref; 1560 RBTREE_FOR(ref, struct mesh_state_ref*, &mstate->super_set) 1561 { 1562 /* make super runnable */ 1563 (void)rbtree_insert(&mesh->run, &ref->s->run_node); 1564 /* callback the function to inform super of result */ 1565 fptr_ok(fptr_whitelist_mod_inform_super( 1566 mesh->mods.mod[ref->s->s.curmod]->inform_super)); 1567 (*mesh->mods.mod[ref->s->s.curmod]->inform_super)(&mstate->s, 1568 ref->s->s.curmod, &ref->s->s); 1569 /* copy state that is always relevant to super */ 1570 copy_state_to_super(&mstate->s, ref->s->s.curmod, &ref->s->s); 1571 } 1572 } 1573 1574 struct mesh_state* mesh_area_find(struct mesh_area* mesh, 1575 struct respip_client_info* cinfo, struct query_info* qinfo, 1576 uint16_t qflags, int prime, int valrec) 1577 { 1578 struct mesh_state key; 1579 struct mesh_state* result; 1580 1581 key.node.key = &key; 1582 key.s.is_priming = prime; 1583 key.s.is_valrec = valrec; 1584 key.s.qinfo = *qinfo; 1585 key.s.query_flags = qflags; 1586 /* We are searching for a similar mesh state when we DO want to 1587 * aggregate the state. Thus unique is set to NULL. (default when we 1588 * desire aggregation).*/ 1589 key.unique = NULL; 1590 key.s.client_info = cinfo; 1591 1592 result = (struct mesh_state*)rbtree_search(&mesh->all, &key); 1593 return result; 1594 } 1595 1596 int mesh_state_add_cb(struct mesh_state* s, struct edns_data* edns, 1597 sldns_buffer* buf, mesh_cb_func_type cb, void* cb_arg, 1598 uint16_t qid, uint16_t qflags) 1599 { 1600 struct mesh_cb* r = regional_alloc(s->s.region, 1601 sizeof(struct mesh_cb)); 1602 if(!r) 1603 return 0; 1604 r->buf = buf; 1605 log_assert(fptr_whitelist_mesh_cb(cb)); /* early failure ifmissing*/ 1606 r->cb = cb; 1607 r->cb_arg = cb_arg; 1608 r->edns = *edns; 1609 if(edns->opt_list_in && !(r->edns.opt_list_in = 1610 edns_opt_copy_region(edns->opt_list_in, s->s.region))) 1611 return 0; 1612 if(edns->opt_list_out && !(r->edns.opt_list_out = 1613 edns_opt_copy_region(edns->opt_list_out, s->s.region))) 1614 return 0; 1615 if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out = 1616 edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region))) 1617 return 0; 1618 r->qid = qid; 1619 r->qflags = qflags; 1620 r->next = s->cb_list; 1621 s->cb_list = r; 1622 return 1; 1623 1624 } 1625 1626 int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns, 1627 struct comm_reply* rep, uint16_t qid, uint16_t qflags, 1628 const struct query_info* qinfo) 1629 { 1630 struct mesh_reply* r = regional_alloc(s->s.region, 1631 sizeof(struct mesh_reply)); 1632 if(!r) 1633 return 0; 1634 r->query_reply = *rep; 1635 r->edns = *edns; 1636 if(edns->opt_list_in && !(r->edns.opt_list_in = 1637 edns_opt_copy_region(edns->opt_list_in, s->s.region))) 1638 return 0; 1639 if(edns->opt_list_out && !(r->edns.opt_list_out = 1640 edns_opt_copy_region(edns->opt_list_out, s->s.region))) 1641 return 0; 1642 if(edns->opt_list_inplace_cb_out && !(r->edns.opt_list_inplace_cb_out = 1643 edns_opt_copy_region(edns->opt_list_inplace_cb_out, s->s.region))) 1644 return 0; 1645 r->qid = qid; 1646 r->qflags = qflags; 1647 r->start_time = *s->s.env->now_tv; 1648 r->next = s->reply_list; 1649 r->qname = regional_alloc_init(s->s.region, qinfo->qname, 1650 s->s.qinfo.qname_len); 1651 if(!r->qname) 1652 return 0; 1653 if(rep->c->use_h2) 1654 r->h2_stream = rep->c->h2_stream; 1655 1656 /* Data related to local alias stored in 'qinfo' (if any) is ephemeral 1657 * and can be different for different original queries (even if the 1658 * replaced query name is the same). So we need to make a deep copy 1659 * and store the copy for each reply info. */ 1660 if(qinfo->local_alias) { 1661 struct packed_rrset_data* d; 1662 struct packed_rrset_data* dsrc; 1663 r->local_alias = regional_alloc_zero(s->s.region, 1664 sizeof(*qinfo->local_alias)); 1665 if(!r->local_alias) 1666 return 0; 1667 r->local_alias->rrset = regional_alloc_init(s->s.region, 1668 qinfo->local_alias->rrset, 1669 sizeof(*qinfo->local_alias->rrset)); 1670 if(!r->local_alias->rrset) 1671 return 0; 1672 dsrc = qinfo->local_alias->rrset->entry.data; 1673 1674 /* In the current implementation, a local alias must be 1675 * a single CNAME RR (see worker_handle_request()). */ 1676 log_assert(!qinfo->local_alias->next && dsrc->count == 1 && 1677 qinfo->local_alias->rrset->rk.type == 1678 htons(LDNS_RR_TYPE_CNAME)); 1679 /* we should make a local copy for the owner name of 1680 * the RRset */ 1681 r->local_alias->rrset->rk.dname_len = 1682 qinfo->local_alias->rrset->rk.dname_len; 1683 r->local_alias->rrset->rk.dname = regional_alloc_init( 1684 s->s.region, qinfo->local_alias->rrset->rk.dname, 1685 qinfo->local_alias->rrset->rk.dname_len); 1686 if(!r->local_alias->rrset->rk.dname) 1687 return 0; 1688 1689 /* the rrset is not packed, like in the cache, but it is 1690 * individually allocated with an allocator from localzone. */ 1691 d = regional_alloc_zero(s->s.region, sizeof(*d)); 1692 if(!d) 1693 return 0; 1694 r->local_alias->rrset->entry.data = d; 1695 if(!rrset_insert_rr(s->s.region, d, dsrc->rr_data[0], 1696 dsrc->rr_len[0], dsrc->rr_ttl[0], "CNAME local alias")) 1697 return 0; 1698 } else 1699 r->local_alias = NULL; 1700 1701 s->reply_list = r; 1702 return 1; 1703 } 1704 1705 /* Extract the query info and flags from 'mstate' into '*qinfop' and '*qflags'. 1706 * Since this is only used for internal refetch of otherwise-expired answer, 1707 * we simply ignore the rare failure mode when memory allocation fails. */ 1708 static void 1709 mesh_copy_qinfo(struct mesh_state* mstate, struct query_info** qinfop, 1710 uint16_t* qflags) 1711 { 1712 struct regional* region = mstate->s.env->scratch; 1713 struct query_info* qinfo; 1714 1715 qinfo = regional_alloc_init(region, &mstate->s.qinfo, sizeof(*qinfo)); 1716 if(!qinfo) 1717 return; 1718 qinfo->qname = regional_alloc_init(region, qinfo->qname, 1719 qinfo->qname_len); 1720 if(!qinfo->qname) 1721 return; 1722 *qinfop = qinfo; 1723 *qflags = mstate->s.query_flags; 1724 } 1725 1726 /** 1727 * Continue processing the mesh state at another module. 1728 * Handles module to modules transfer of control. 1729 * Handles module finished. 1730 * @param mesh: the mesh area. 1731 * @param mstate: currently active mesh state. 1732 * Deleted if finished, calls _done and _supers to 1733 * send replies to clients and inform other mesh states. 1734 * This in turn may create additional runnable mesh states. 1735 * @param s: state at which the current module exited. 1736 * @param ev: the event sent to the module. 1737 * returned is the event to send to the next module. 1738 * @return true if continue processing at the new module. 1739 * false if not continued processing is needed. 1740 */ 1741 static int 1742 mesh_continue(struct mesh_area* mesh, struct mesh_state* mstate, 1743 enum module_ext_state s, enum module_ev* ev) 1744 { 1745 mstate->num_activated++; 1746 if(mstate->num_activated > MESH_MAX_ACTIVATION) { 1747 /* module is looping. Stop it. */ 1748 log_err("internal error: looping module (%s) stopped", 1749 mesh->mods.mod[mstate->s.curmod]->name); 1750 log_query_info(NO_VERBOSE, "pass error for qstate", 1751 &mstate->s.qinfo); 1752 s = module_error; 1753 } 1754 if(s == module_wait_module || s == module_restart_next) { 1755 /* start next module */ 1756 mstate->s.curmod++; 1757 if(mesh->mods.num == mstate->s.curmod) { 1758 log_err("Cannot pass to next module; at last module"); 1759 log_query_info(VERB_QUERY, "pass error for qstate", 1760 &mstate->s.qinfo); 1761 mstate->s.curmod--; 1762 return mesh_continue(mesh, mstate, module_error, ev); 1763 } 1764 if(s == module_restart_next) { 1765 int curmod = mstate->s.curmod; 1766 for(; mstate->s.curmod < mesh->mods.num; 1767 mstate->s.curmod++) { 1768 fptr_ok(fptr_whitelist_mod_clear( 1769 mesh->mods.mod[mstate->s.curmod]->clear)); 1770 (*mesh->mods.mod[mstate->s.curmod]->clear) 1771 (&mstate->s, mstate->s.curmod); 1772 mstate->s.minfo[mstate->s.curmod] = NULL; 1773 } 1774 mstate->s.curmod = curmod; 1775 } 1776 *ev = module_event_pass; 1777 return 1; 1778 } 1779 if(s == module_wait_subquery && mstate->sub_set.count == 0) { 1780 log_err("module cannot wait for subquery, subquery list empty"); 1781 log_query_info(VERB_QUERY, "pass error for qstate", 1782 &mstate->s.qinfo); 1783 s = module_error; 1784 } 1785 if(s == module_error && mstate->s.return_rcode == LDNS_RCODE_NOERROR) { 1786 /* error is bad, handle pass back up below */ 1787 mstate->s.return_rcode = LDNS_RCODE_SERVFAIL; 1788 } 1789 if(s == module_error) { 1790 mesh_query_done(mstate); 1791 mesh_walk_supers(mesh, mstate); 1792 mesh_state_delete(&mstate->s); 1793 return 0; 1794 } 1795 if(s == module_finished) { 1796 if(mstate->s.curmod == 0) { 1797 struct query_info* qinfo = NULL; 1798 struct edns_option* opt_list = NULL; 1799 struct sockaddr_storage addr; 1800 uint16_t qflags; 1801 int rpz_p = 0; 1802 1803 #ifdef CLIENT_SUBNET 1804 struct edns_option* ecs; 1805 if(mstate->s.need_refetch && mstate->reply_list && 1806 modstack_find(&mesh->mods, "subnetcache") != -1 && 1807 mstate->s.env->unique_mesh) { 1808 addr = mstate->reply_list->query_reply.client_addr; 1809 } else 1810 #endif 1811 memset(&addr, 0, sizeof(addr)); 1812 1813 mesh_query_done(mstate); 1814 mesh_walk_supers(mesh, mstate); 1815 1816 /* If the answer to the query needs to be refetched 1817 * from an external DNS server, we'll need to schedule 1818 * a prefetch after removing the current state, so 1819 * we need to make a copy of the query info here. */ 1820 if(mstate->s.need_refetch) { 1821 mesh_copy_qinfo(mstate, &qinfo, &qflags); 1822 #ifdef CLIENT_SUBNET 1823 /* Make also a copy of the ecs option if any */ 1824 if((ecs = edns_opt_list_find( 1825 mstate->s.edns_opts_front_in, 1826 mstate->s.env->cfg->client_subnet_opcode)) != NULL) { 1827 (void)edns_opt_list_append(&opt_list, 1828 ecs->opt_code, ecs->opt_len, 1829 ecs->opt_data, 1830 mstate->s.env->scratch); 1831 } 1832 #endif 1833 rpz_p = mstate->s.rpz_passthru; 1834 } 1835 1836 if(qinfo) { 1837 mesh_state_delete(&mstate->s); 1838 mesh_new_prefetch(mesh, qinfo, qflags, 0, 1839 rpz_p, 1840 addr.ss_family!=AF_UNSPEC?&addr:NULL, 1841 opt_list); 1842 } else { 1843 mesh_state_delete(&mstate->s); 1844 } 1845 return 0; 1846 } 1847 /* pass along the locus of control */ 1848 mstate->s.curmod --; 1849 *ev = module_event_moddone; 1850 return 1; 1851 } 1852 return 0; 1853 } 1854 1855 void mesh_run(struct mesh_area* mesh, struct mesh_state* mstate, 1856 enum module_ev ev, struct outbound_entry* e) 1857 { 1858 enum module_ext_state s; 1859 verbose(VERB_ALGO, "mesh_run: start"); 1860 while(mstate) { 1861 /* run the module */ 1862 fptr_ok(fptr_whitelist_mod_operate( 1863 mesh->mods.mod[mstate->s.curmod]->operate)); 1864 (*mesh->mods.mod[mstate->s.curmod]->operate) 1865 (&mstate->s, ev, mstate->s.curmod, e); 1866 1867 /* examine results */ 1868 mstate->s.reply = NULL; 1869 regional_free_all(mstate->s.env->scratch); 1870 s = mstate->s.ext_state[mstate->s.curmod]; 1871 verbose(VERB_ALGO, "mesh_run: %s module exit state is %s", 1872 mesh->mods.mod[mstate->s.curmod]->name, strextstate(s)); 1873 e = NULL; 1874 if(mesh_continue(mesh, mstate, s, &ev)) 1875 continue; 1876 1877 /* run more modules */ 1878 ev = module_event_pass; 1879 if(mesh->run.count > 0) { 1880 /* pop random element off the runnable tree */ 1881 mstate = (struct mesh_state*)mesh->run.root->key; 1882 (void)rbtree_delete(&mesh->run, mstate); 1883 } else mstate = NULL; 1884 } 1885 if(verbosity >= VERB_ALGO) { 1886 mesh_stats(mesh, "mesh_run: end"); 1887 mesh_log_list(mesh); 1888 } 1889 } 1890 1891 void 1892 mesh_log_list(struct mesh_area* mesh) 1893 { 1894 char buf[30]; 1895 struct mesh_state* m; 1896 int num = 0; 1897 RBTREE_FOR(m, struct mesh_state*, &mesh->all) { 1898 snprintf(buf, sizeof(buf), "%d%s%s%s%s%s%s mod%d %s%s", 1899 num++, (m->s.is_priming)?"p":"", /* prime */ 1900 (m->s.is_valrec)?"v":"", /* prime */ 1901 (m->s.query_flags&BIT_RD)?"RD":"", 1902 (m->s.query_flags&BIT_CD)?"CD":"", 1903 (m->super_set.count==0)?"d":"", /* detached */ 1904 (m->sub_set.count!=0)?"c":"", /* children */ 1905 m->s.curmod, (m->reply_list)?"rep":"", /*hasreply*/ 1906 (m->cb_list)?"cb":"" /* callbacks */ 1907 ); 1908 log_query_info(VERB_ALGO, buf, &m->s.qinfo); 1909 } 1910 } 1911 1912 void 1913 mesh_stats(struct mesh_area* mesh, const char* str) 1914 { 1915 verbose(VERB_DETAIL, "%s %u recursion states (%u with reply, " 1916 "%u detached), %u waiting replies, %u recursion replies " 1917 "sent, %d replies dropped, %d states jostled out", 1918 str, (unsigned)mesh->all.count, 1919 (unsigned)mesh->num_reply_states, 1920 (unsigned)mesh->num_detached_states, 1921 (unsigned)mesh->num_reply_addrs, 1922 (unsigned)mesh->replies_sent, 1923 (unsigned)mesh->stats_dropped, 1924 (unsigned)mesh->stats_jostled); 1925 if(mesh->replies_sent > 0) { 1926 struct timeval avg; 1927 timeval_divide(&avg, &mesh->replies_sum_wait, 1928 mesh->replies_sent); 1929 log_info("average recursion processing time " 1930 ARG_LL "d.%6.6d sec", 1931 (long long)avg.tv_sec, (int)avg.tv_usec); 1932 log_info("histogram of recursion processing times"); 1933 timehist_log(mesh->histogram, "recursions"); 1934 } 1935 } 1936 1937 void 1938 mesh_stats_clear(struct mesh_area* mesh) 1939 { 1940 if(!mesh) 1941 return; 1942 mesh->replies_sent = 0; 1943 mesh->replies_sum_wait.tv_sec = 0; 1944 mesh->replies_sum_wait.tv_usec = 0; 1945 mesh->stats_jostled = 0; 1946 mesh->stats_dropped = 0; 1947 timehist_clear(mesh->histogram); 1948 mesh->ans_secure = 0; 1949 mesh->ans_bogus = 0; 1950 mesh->ans_expired = 0; 1951 mesh->ans_cachedb = 0; 1952 memset(&mesh->ans_rcode[0], 0, sizeof(size_t)*UB_STATS_RCODE_NUM); 1953 memset(&mesh->rpz_action[0], 0, sizeof(size_t)*UB_STATS_RPZ_ACTION_NUM); 1954 mesh->ans_nodata = 0; 1955 } 1956 1957 size_t 1958 mesh_get_mem(struct mesh_area* mesh) 1959 { 1960 struct mesh_state* m; 1961 size_t s = sizeof(*mesh) + sizeof(struct timehist) + 1962 sizeof(struct th_buck)*mesh->histogram->num + 1963 sizeof(sldns_buffer) + sldns_buffer_capacity(mesh->qbuf_bak); 1964 RBTREE_FOR(m, struct mesh_state*, &mesh->all) { 1965 /* all, including m itself allocated in qstate region */ 1966 s += regional_get_mem(m->s.region); 1967 } 1968 return s; 1969 } 1970 1971 int 1972 mesh_detect_cycle(struct module_qstate* qstate, struct query_info* qinfo, 1973 uint16_t flags, int prime, int valrec) 1974 { 1975 struct mesh_area* mesh = qstate->env->mesh; 1976 struct mesh_state* dep_m = NULL; 1977 dep_m = mesh_area_find(mesh, NULL, qinfo, flags, prime, valrec); 1978 return mesh_detect_cycle_found(qstate, dep_m); 1979 } 1980 1981 void mesh_list_insert(struct mesh_state* m, struct mesh_state** fp, 1982 struct mesh_state** lp) 1983 { 1984 /* insert as last element */ 1985 m->prev = *lp; 1986 m->next = NULL; 1987 if(*lp) 1988 (*lp)->next = m; 1989 else *fp = m; 1990 *lp = m; 1991 } 1992 1993 void mesh_list_remove(struct mesh_state* m, struct mesh_state** fp, 1994 struct mesh_state** lp) 1995 { 1996 if(m->next) 1997 m->next->prev = m->prev; 1998 else *lp = m->prev; 1999 if(m->prev) 2000 m->prev->next = m->next; 2001 else *fp = m->next; 2002 } 2003 2004 void mesh_state_remove_reply(struct mesh_area* mesh, struct mesh_state* m, 2005 struct comm_point* cp) 2006 { 2007 struct mesh_reply* n, *prev = NULL; 2008 n = m->reply_list; 2009 /* when in mesh_cleanup, it sets the reply_list to NULL, so that 2010 * there is no accounting twice */ 2011 if(!n) return; /* nothing to remove, also no accounting needed */ 2012 while(n) { 2013 if(n->query_reply.c == cp) { 2014 /* unlink it */ 2015 if(prev) prev->next = n->next; 2016 else m->reply_list = n->next; 2017 /* delete it, but allocated in m region */ 2018 log_assert(mesh->num_reply_addrs > 0); 2019 mesh->num_reply_addrs--; 2020 2021 /* prev = prev; */ 2022 n = n->next; 2023 continue; 2024 } 2025 prev = n; 2026 n = n->next; 2027 } 2028 /* it was not detached (because it had a reply list), could be now */ 2029 if(!m->reply_list && !m->cb_list 2030 && m->super_set.count == 0) { 2031 mesh->num_detached_states++; 2032 } 2033 /* if not replies any more in mstate, it is no longer a reply_state */ 2034 if(!m->reply_list && !m->cb_list) { 2035 log_assert(mesh->num_reply_states > 0); 2036 mesh->num_reply_states--; 2037 } 2038 } 2039 2040 2041 static int 2042 apply_respip_action(struct module_qstate* qstate, 2043 const struct query_info* qinfo, struct respip_client_info* cinfo, 2044 struct respip_action_info* actinfo, struct reply_info* rep, 2045 struct ub_packed_rrset_key** alias_rrset, 2046 struct reply_info** encode_repp, struct auth_zones* az) 2047 { 2048 if(qinfo->qtype != LDNS_RR_TYPE_A && 2049 qinfo->qtype != LDNS_RR_TYPE_AAAA && 2050 qinfo->qtype != LDNS_RR_TYPE_ANY) 2051 return 1; 2052 2053 if(!respip_rewrite_reply(qinfo, cinfo, rep, encode_repp, actinfo, 2054 alias_rrset, 0, qstate->region, az, NULL)) 2055 return 0; 2056 2057 /* xxx_deny actions mean dropping the reply, unless the original reply 2058 * was redirected to response-ip data. */ 2059 if((actinfo->action == respip_deny || 2060 actinfo->action == respip_inform_deny) && 2061 *encode_repp == rep) 2062 *encode_repp = NULL; 2063 2064 return 1; 2065 } 2066 2067 void 2068 mesh_serve_expired_callback(void* arg) 2069 { 2070 struct mesh_state* mstate = (struct mesh_state*) arg; 2071 struct module_qstate* qstate = &mstate->s; 2072 struct mesh_reply* r; 2073 struct mesh_area* mesh = qstate->env->mesh; 2074 struct dns_msg* msg; 2075 struct mesh_cb* c; 2076 struct mesh_reply* prev = NULL; 2077 struct sldns_buffer* prev_buffer = NULL; 2078 struct sldns_buffer* r_buffer = NULL; 2079 struct reply_info* partial_rep = NULL; 2080 struct ub_packed_rrset_key* alias_rrset = NULL; 2081 struct reply_info* encode_rep = NULL; 2082 struct respip_action_info actinfo; 2083 struct query_info* lookup_qinfo = &qstate->qinfo; 2084 struct query_info qinfo_tmp; 2085 struct timeval tv = {0, 0}; 2086 int must_validate = (!(qstate->query_flags&BIT_CD) 2087 || qstate->env->cfg->ignore_cd) && qstate->env->need_to_validate; 2088 int i = 0; 2089 if(!qstate->serve_expired_data) return; 2090 verbose(VERB_ALGO, "Serve expired: Trying to reply with expired data"); 2091 comm_timer_delete(qstate->serve_expired_data->timer); 2092 qstate->serve_expired_data->timer = NULL; 2093 /* If is_drop or no_cache_lookup (modules that handle their own cache e.g., 2094 * subnetmod) ignore stale data from the main cache. */ 2095 if(qstate->no_cache_lookup || qstate->is_drop) { 2096 verbose(VERB_ALGO, 2097 "Serve expired: Not allowed to look into cache for stale"); 2098 return; 2099 } 2100 /* The following while is used instead of the `goto lookup_cache` 2101 * like in the worker. */ 2102 while(1) { 2103 fptr_ok(fptr_whitelist_serve_expired_lookup( 2104 qstate->serve_expired_data->get_cached_answer)); 2105 msg = (*qstate->serve_expired_data->get_cached_answer)(qstate, 2106 lookup_qinfo); 2107 if(!msg) 2108 return; 2109 /* Reset these in case we pass a second time from here. */ 2110 encode_rep = msg->rep; 2111 memset(&actinfo, 0, sizeof(actinfo)); 2112 actinfo.action = respip_none; 2113 alias_rrset = NULL; 2114 if((mesh->use_response_ip || mesh->use_rpz) && 2115 !partial_rep && !apply_respip_action(qstate, &qstate->qinfo, 2116 qstate->client_info, &actinfo, msg->rep, &alias_rrset, &encode_rep, 2117 qstate->env->auth_zones)) { 2118 return; 2119 } else if(partial_rep && 2120 !respip_merge_cname(partial_rep, &qstate->qinfo, msg->rep, 2121 qstate->client_info, must_validate, &encode_rep, qstate->region, 2122 qstate->env->auth_zones)) { 2123 return; 2124 } 2125 if(!encode_rep || alias_rrset) { 2126 if(!encode_rep) { 2127 /* Needs drop */ 2128 return; 2129 } else { 2130 /* A partial CNAME chain is found. */ 2131 partial_rep = encode_rep; 2132 } 2133 } 2134 /* We've found a partial reply ending with an 2135 * alias. Replace the lookup qinfo for the 2136 * alias target and lookup the cache again to 2137 * (possibly) complete the reply. As we're 2138 * passing the "base" reply, there will be no 2139 * more alias chasing. */ 2140 if(partial_rep) { 2141 memset(&qinfo_tmp, 0, sizeof(qinfo_tmp)); 2142 get_cname_target(alias_rrset, &qinfo_tmp.qname, 2143 &qinfo_tmp.qname_len); 2144 if(!qinfo_tmp.qname) { 2145 log_err("Serve expired: unexpected: invalid answer alias"); 2146 return; 2147 } 2148 qinfo_tmp.qtype = qstate->qinfo.qtype; 2149 qinfo_tmp.qclass = qstate->qinfo.qclass; 2150 lookup_qinfo = &qinfo_tmp; 2151 continue; 2152 } 2153 break; 2154 } 2155 2156 if(verbosity >= VERB_ALGO) 2157 log_dns_msg("Serve expired lookup", &qstate->qinfo, msg->rep); 2158 2159 for(r = mstate->reply_list; r; r = r->next) { 2160 i++; 2161 tv = r->start_time; 2162 2163 /* If address info is returned, it means the action should be an 2164 * 'inform' variant and the information should be logged. */ 2165 if(actinfo.addrinfo) { 2166 respip_inform_print(&actinfo, r->qname, 2167 qstate->qinfo.qtype, qstate->qinfo.qclass, 2168 r->local_alias, &r->query_reply.client_addr, 2169 r->query_reply.client_addrlen); 2170 } 2171 2172 /* Add EDE Stale Answer (RCF8914). Ignore global ede as this is 2173 * warning instead of an error */ 2174 if (r->edns.edns_present && qstate->env->cfg->ede_serve_expired && 2175 qstate->env->cfg->ede) { 2176 edns_opt_list_append_ede(&r->edns.opt_list_out, 2177 mstate->s.region, LDNS_EDE_STALE_ANSWER, NULL); 2178 } 2179 2180 r_buffer = r->query_reply.c->buffer; 2181 if(r->query_reply.c->tcp_req_info) 2182 r_buffer = r->query_reply.c->tcp_req_info->spool_buffer; 2183 mesh_send_reply(mstate, LDNS_RCODE_NOERROR, msg->rep, 2184 r, r_buffer, prev, prev_buffer); 2185 if(r->query_reply.c->tcp_req_info) 2186 tcp_req_info_remove_mesh_state(r->query_reply.c->tcp_req_info, mstate); 2187 prev = r; 2188 prev_buffer = r_buffer; 2189 } 2190 /* Account for each reply sent. */ 2191 if(i > 0) { 2192 mesh->ans_expired += i; 2193 if(actinfo.addrinfo && qstate->env->cfg->stat_extended && 2194 actinfo.rpz_used) { 2195 if(actinfo.rpz_disabled) 2196 qstate->env->mesh->rpz_action[RPZ_DISABLED_ACTION] += i; 2197 if(actinfo.rpz_cname_override) 2198 qstate->env->mesh->rpz_action[RPZ_CNAME_OVERRIDE_ACTION] += i; 2199 else 2200 qstate->env->mesh->rpz_action[ 2201 respip_action_to_rpz_action(actinfo.action)] += i; 2202 } 2203 } 2204 2205 /* Mesh area accounting */ 2206 if(mstate->reply_list) { 2207 mstate->reply_list = NULL; 2208 if(!mstate->reply_list && !mstate->cb_list) { 2209 log_assert(mesh->num_reply_states > 0); 2210 mesh->num_reply_states--; 2211 if(mstate->super_set.count == 0) { 2212 mesh->num_detached_states++; 2213 } 2214 } 2215 } 2216 2217 while((c = mstate->cb_list) != NULL) { 2218 /* take this cb off the list; so that the list can be 2219 * changed, eg. by adds from the callback routine */ 2220 if(!mstate->reply_list && mstate->cb_list && !c->next) { 2221 /* was a reply state, not anymore */ 2222 log_assert(qstate->env->mesh->num_reply_states > 0); 2223 qstate->env->mesh->num_reply_states--; 2224 } 2225 mstate->cb_list = c->next; 2226 if(!mstate->reply_list && !mstate->cb_list && 2227 mstate->super_set.count == 0) 2228 qstate->env->mesh->num_detached_states++; 2229 mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv); 2230 } 2231 } 2232 2233 int mesh_jostle_exceeded(struct mesh_area* mesh) 2234 { 2235 if(mesh->all.count < mesh->max_reply_states) 2236 return 0; 2237 return 1; 2238 } 2239