1 /* 2 * libunbound/worker.c - worker thread or process that resolves 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains the worker process or thread that performs 40 * the DNS resolving and validation. The worker is called by a procedure 41 * and if in the background continues until exit, if in the foreground 42 * returns from the procedure when done. 43 */ 44 #include "config.h" 45 #ifdef HAVE_SSL 46 #include <openssl/ssl.h> 47 #endif 48 #include "libunbound/libworker.h" 49 #include "libunbound/context.h" 50 #include "libunbound/unbound.h" 51 #include "libunbound/worker.h" 52 #include "libunbound/unbound-event.h" 53 #include "services/outside_network.h" 54 #include "services/mesh.h" 55 #include "services/localzone.h" 56 #include "services/cache/rrset.h" 57 #include "services/outbound_list.h" 58 #include "services/authzone.h" 59 #include "util/fptr_wlist.h" 60 #include "util/module.h" 61 #include "util/regional.h" 62 #include "util/random.h" 63 #include "util/config_file.h" 64 #include "util/netevent.h" 65 #include "util/storage/lookup3.h" 66 #include "util/storage/slabhash.h" 67 #include "util/net_help.h" 68 #include "util/data/dname.h" 69 #include "util/data/msgreply.h" 70 #include "util/data/msgencode.h" 71 #include "util/tube.h" 72 #include "iterator/iter_fwd.h" 73 #include "iterator/iter_hints.h" 74 #include "sldns/sbuffer.h" 75 #include "sldns/str2wire.h" 76 77 #ifdef HAVE_TARGETCONDITIONALS_H 78 #include <TargetConditionals.h> 79 #endif 80 81 #if defined(TARGET_OS_TV) || defined(TARGET_OS_WATCH) 82 #undef HAVE_FORK 83 #endif 84 85 /** handle new query command for bg worker */ 86 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len); 87 88 /** delete libworker env */ 89 static void 90 libworker_delete_env(struct libworker* w) 91 { 92 if(w->env) { 93 outside_network_quit_prepare(w->back); 94 mesh_delete(w->env->mesh); 95 context_release_alloc(w->ctx, w->env->alloc, 96 !w->is_bg || w->is_bg_thread); 97 sldns_buffer_free(w->env->scratch_buffer); 98 regional_destroy(w->env->scratch); 99 forwards_delete(w->env->fwds); 100 hints_delete(w->env->hints); 101 ub_randfree(w->env->rnd); 102 free(w->env); 103 } 104 #ifdef HAVE_SSL 105 SSL_CTX_free(w->sslctx); 106 #endif 107 outside_network_delete(w->back); 108 } 109 110 /** delete libworker struct */ 111 static void 112 libworker_delete(struct libworker* w) 113 { 114 if(!w) return; 115 libworker_delete_env(w); 116 comm_base_delete(w->base); 117 free(w); 118 } 119 120 void 121 libworker_delete_event(struct libworker* w) 122 { 123 if(!w) return; 124 libworker_delete_env(w); 125 comm_base_delete_no_base(w->base); 126 free(w); 127 } 128 129 /** setup fresh libworker struct */ 130 static struct libworker* 131 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb) 132 { 133 struct libworker* w = (struct libworker*)calloc(1, sizeof(*w)); 134 struct config_file* cfg = ctx->env->cfg; 135 int* ports; 136 int numports; 137 if(!w) return NULL; 138 w->is_bg = is_bg; 139 w->ctx = ctx; 140 w->env = (struct module_env*)malloc(sizeof(*w->env)); 141 if(!w->env) { 142 free(w); 143 return NULL; 144 } 145 *w->env = *ctx->env; 146 w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread); 147 if(!w->env->alloc) { 148 libworker_delete(w); 149 return NULL; 150 } 151 w->thread_num = w->env->alloc->thread_num; 152 alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w); 153 if(!w->is_bg || w->is_bg_thread) { 154 lock_basic_lock(&ctx->cfglock); 155 } 156 w->env->scratch = regional_create_custom(cfg->msg_buffer_size); 157 w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size); 158 w->env->fwds = forwards_create(); 159 if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) { 160 forwards_delete(w->env->fwds); 161 w->env->fwds = NULL; 162 } 163 w->env->hints = hints_create(); 164 if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) { 165 hints_delete(w->env->hints); 166 w->env->hints = NULL; 167 } 168 if(cfg->ssl_upstream || (cfg->tls_cert_bundle && cfg->tls_cert_bundle[0]) || cfg->tls_win_cert) { 169 w->sslctx = connect_sslctx_create(NULL, NULL, 170 cfg->tls_cert_bundle, cfg->tls_win_cert); 171 if(!w->sslctx) { 172 /* to make the setup fail after unlock */ 173 hints_delete(w->env->hints); 174 w->env->hints = NULL; 175 } 176 } 177 if(!w->is_bg || w->is_bg_thread) { 178 lock_basic_unlock(&ctx->cfglock); 179 } 180 if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds || 181 !w->env->hints) { 182 libworker_delete(w); 183 return NULL; 184 } 185 w->env->worker = (struct worker*)w; 186 w->env->probe_timer = NULL; 187 if(!w->is_bg || w->is_bg_thread) { 188 lock_basic_lock(&ctx->cfglock); 189 } 190 if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) { 191 if(!w->is_bg || w->is_bg_thread) { 192 lock_basic_unlock(&ctx->cfglock); 193 } 194 libworker_delete(w); 195 return NULL; 196 } 197 if(!w->is_bg || w->is_bg_thread) { 198 lock_basic_unlock(&ctx->cfglock); 199 } 200 if(1) { 201 /* primitive lockout for threading: if it overwrites another 202 * thread it is like wiping the cache (which is likely empty 203 * at the start) */ 204 /* note we are holding the ctx lock in normal threaded 205 * cases so that is solved properly, it is only for many ctx 206 * in different threads that this may clash */ 207 static int done_raninit = 0; 208 if(!done_raninit) { 209 done_raninit = 1; 210 hash_set_raninit((uint32_t)ub_random(w->env->rnd)); 211 } 212 } 213 214 if(eb) 215 w->base = comm_base_create_event(eb); 216 else w->base = comm_base_create(0); 217 if(!w->base) { 218 libworker_delete(w); 219 return NULL; 220 } 221 w->env->worker_base = w->base; 222 if(!w->is_bg || w->is_bg_thread) { 223 lock_basic_lock(&ctx->cfglock); 224 } 225 numports = cfg_condense_ports(cfg, &ports); 226 if(numports == 0) { 227 if(!w->is_bg || w->is_bg_thread) { 228 lock_basic_unlock(&ctx->cfglock); 229 } 230 libworker_delete(w); 231 return NULL; 232 } 233 w->back = outside_network_create(w->base, cfg->msg_buffer_size, 234 (size_t)cfg->outgoing_num_ports, cfg->out_ifs, 235 cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6, 236 cfg->do_tcp?cfg->outgoing_num_tcp:0, cfg->ip_dscp, 237 w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id, 238 ports, numports, cfg->unwanted_threshold, 239 cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w, 240 cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx, 241 cfg->delay_close, cfg->tls_use_sni, NULL); 242 w->env->outnet = w->back; 243 if(!w->is_bg || w->is_bg_thread) { 244 lock_basic_unlock(&ctx->cfglock); 245 } 246 free(ports); 247 if(!w->back) { 248 libworker_delete(w); 249 return NULL; 250 } 251 w->env->mesh = mesh_create(&ctx->mods, w->env); 252 if(!w->env->mesh) { 253 libworker_delete(w); 254 return NULL; 255 } 256 w->env->send_query = &libworker_send_query; 257 w->env->detach_subs = &mesh_detach_subs; 258 w->env->attach_sub = &mesh_attach_sub; 259 w->env->add_sub = &mesh_add_sub; 260 w->env->kill_sub = &mesh_state_delete; 261 w->env->detect_cycle = &mesh_detect_cycle; 262 comm_base_timept(w->base, &w->env->now, &w->env->now_tv); 263 return w; 264 } 265 266 struct libworker* libworker_create_event(struct ub_ctx* ctx, 267 struct ub_event_base* eb) 268 { 269 return libworker_setup(ctx, 0, eb); 270 } 271 272 /** handle cancel command for bg worker */ 273 static void 274 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len) 275 { 276 struct ctx_query* q; 277 if(w->is_bg_thread) { 278 lock_basic_lock(&w->ctx->cfglock); 279 q = context_deserialize_cancel(w->ctx, buf, len); 280 lock_basic_unlock(&w->ctx->cfglock); 281 } else { 282 q = context_deserialize_cancel(w->ctx, buf, len); 283 } 284 if(!q) { 285 /* probably simply lookup failed, i.e. the message had been 286 * processed and answered before the cancel arrived */ 287 return; 288 } 289 q->cancelled = 1; 290 free(buf); 291 } 292 293 /** do control command coming into bg server */ 294 static void 295 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len) 296 { 297 switch(context_serial_getcmd(msg, len)) { 298 default: 299 case UB_LIBCMD_ANSWER: 300 log_err("unknown command for bg worker %d", 301 (int)context_serial_getcmd(msg, len)); 302 /* and fall through to quit */ 303 /* fallthrough */ 304 case UB_LIBCMD_QUIT: 305 free(msg); 306 comm_base_exit(w->base); 307 break; 308 case UB_LIBCMD_NEWQUERY: 309 handle_newq(w, msg, len); 310 break; 311 case UB_LIBCMD_CANCEL: 312 handle_cancel(w, msg, len); 313 break; 314 } 315 } 316 317 /** handle control command coming into server */ 318 void 319 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 320 uint8_t* msg, size_t len, int err, void* arg) 321 { 322 struct libworker* w = (struct libworker*)arg; 323 324 if(err != 0) { 325 free(msg); 326 /* it is of no use to go on, exit */ 327 comm_base_exit(w->base); 328 return; 329 } 330 libworker_do_cmd(w, msg, len); /* also frees the buf */ 331 } 332 333 /** the background thread func */ 334 static void* 335 libworker_dobg(void* arg) 336 { 337 /* setup */ 338 uint32_t m; 339 struct libworker* w = (struct libworker*)arg; 340 struct ub_ctx* ctx; 341 if(!w) { 342 log_err("libunbound bg worker init failed, nomem"); 343 return NULL; 344 } 345 ctx = w->ctx; 346 log_thread_set(&w->thread_num); 347 #ifdef THREADS_DISABLED 348 /* we are forked */ 349 w->is_bg_thread = 0; 350 /* close non-used parts of the pipes */ 351 tube_close_write(ctx->qq_pipe); 352 tube_close_read(ctx->rr_pipe); 353 #endif 354 if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, 355 libworker_handle_control_cmd, w)) { 356 log_err("libunbound bg worker init failed, no bglisten"); 357 return NULL; 358 } 359 if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) { 360 log_err("libunbound bg worker init failed, no bgwrite"); 361 return NULL; 362 } 363 364 /* do the work */ 365 comm_base_dispatch(w->base); 366 367 /* cleanup */ 368 m = UB_LIBCMD_QUIT; 369 w->want_quit = 1; 370 tube_remove_bg_listen(w->ctx->qq_pipe); 371 tube_remove_bg_write(w->ctx->rr_pipe); 372 libworker_delete(w); 373 (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, 374 (uint32_t)sizeof(m), 0); 375 #ifdef THREADS_DISABLED 376 /* close pipes from forked process before exit */ 377 tube_close_read(ctx->qq_pipe); 378 tube_close_write(ctx->rr_pipe); 379 #endif 380 return NULL; 381 } 382 383 int libworker_bg(struct ub_ctx* ctx) 384 { 385 struct libworker* w; 386 /* fork or threadcreate */ 387 lock_basic_lock(&ctx->cfglock); 388 if(ctx->dothread) { 389 lock_basic_unlock(&ctx->cfglock); 390 w = libworker_setup(ctx, 1, NULL); 391 if(!w) return UB_NOMEM; 392 w->is_bg_thread = 1; 393 #ifdef ENABLE_LOCK_CHECKS 394 w->thread_num = 1; /* for nicer DEBUG checklocks */ 395 #endif 396 ub_thread_create(&ctx->bg_tid, libworker_dobg, w); 397 } else { 398 lock_basic_unlock(&ctx->cfglock); 399 #ifndef HAVE_FORK 400 /* no fork on windows */ 401 return UB_FORKFAIL; 402 #else /* HAVE_FORK */ 403 switch((ctx->bg_pid=fork())) { 404 case 0: 405 w = libworker_setup(ctx, 1, NULL); 406 if(!w) fatal_exit("out of memory"); 407 /* close non-used parts of the pipes */ 408 tube_close_write(ctx->qq_pipe); 409 tube_close_read(ctx->rr_pipe); 410 (void)libworker_dobg(w); 411 exit(0); 412 break; 413 case -1: 414 return UB_FORKFAIL; 415 default: 416 /* close non-used parts, so that the worker 417 * bgprocess gets 'pipe closed' when the 418 * main process exits */ 419 tube_close_read(ctx->qq_pipe); 420 tube_close_write(ctx->rr_pipe); 421 break; 422 } 423 #endif /* HAVE_FORK */ 424 } 425 return UB_NOERROR; 426 } 427 428 /** insert canonname */ 429 static int 430 fill_canon(struct ub_result* res, uint8_t* s) 431 { 432 char buf[255+2]; 433 dname_str(s, buf); 434 res->canonname = strdup(buf); 435 return res->canonname != 0; 436 } 437 438 /** fill data into result */ 439 static int 440 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer, 441 uint8_t* finalcname, struct query_info* rq, struct reply_info* rep) 442 { 443 size_t i; 444 struct packed_rrset_data* data; 445 res->ttl = 0; 446 if(!answer) { 447 if(finalcname) { 448 if(!fill_canon(res, finalcname)) 449 return 0; /* out of memory */ 450 } 451 if(rep->rrset_count != 0) 452 res->ttl = (int)rep->ttl; 453 res->data = (char**)calloc(1, sizeof(char*)); 454 res->len = (int*)calloc(1, sizeof(int)); 455 return (res->data && res->len); 456 } 457 data = (struct packed_rrset_data*)answer->entry.data; 458 if(query_dname_compare(rq->qname, answer->rk.dname) != 0) { 459 if(!fill_canon(res, answer->rk.dname)) 460 return 0; /* out of memory */ 461 } else res->canonname = NULL; 462 res->data = (char**)calloc(data->count+1, sizeof(char*)); 463 res->len = (int*)calloc(data->count+1, sizeof(int)); 464 if(!res->data || !res->len) 465 return 0; /* out of memory */ 466 for(i=0; i<data->count; i++) { 467 /* remove rdlength from rdata */ 468 res->len[i] = (int)(data->rr_len[i] - 2); 469 res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]); 470 if(!res->data[i]) 471 return 0; /* out of memory */ 472 } 473 /* ttl for positive answers, from CNAME and answer RRs */ 474 if(data->count != 0) { 475 size_t j; 476 res->ttl = (int)data->ttl; 477 for(j=0; j<rep->an_numrrsets; j++) { 478 struct packed_rrset_data* d = 479 (struct packed_rrset_data*)rep->rrsets[j]-> 480 entry.data; 481 if((int)d->ttl < res->ttl) 482 res->ttl = (int)d->ttl; 483 } 484 } 485 /* ttl for negative answers */ 486 if(data->count == 0 && rep->rrset_count != 0) 487 res->ttl = (int)rep->ttl; 488 res->data[data->count] = NULL; 489 res->len[data->count] = 0; 490 return 1; 491 } 492 493 /** fill result from parsed message, on error fills servfail */ 494 void 495 libworker_enter_result(struct ub_result* res, sldns_buffer* buf, 496 struct regional* temp, enum sec_status msg_security) 497 { 498 struct query_info rq; 499 struct reply_info* rep; 500 res->rcode = LDNS_RCODE_SERVFAIL; 501 rep = parse_reply_in_temp_region(buf, temp, &rq); 502 if(!rep) { 503 log_err("cannot parse buf"); 504 return; /* error parsing buf, or out of memory */ 505 } 506 if(!fill_res(res, reply_find_answer_rrset(&rq, rep), 507 reply_find_final_cname_target(&rq, rep), &rq, rep)) 508 return; /* out of memory */ 509 /* rcode, havedata, nxdomain, secure, bogus */ 510 res->rcode = (int)FLAGS_GET_RCODE(rep->flags); 511 if(res->data && res->data[0]) 512 res->havedata = 1; 513 if(res->rcode == LDNS_RCODE_NXDOMAIN) 514 res->nxdomain = 1; 515 if(msg_security == sec_status_secure) 516 res->secure = 1; 517 if(msg_security == sec_status_bogus || 518 msg_security == sec_status_secure_sentinel_fail) 519 res->bogus = 1; 520 } 521 522 /** fillup fg results */ 523 static void 524 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf, 525 enum sec_status s, char* why_bogus, int was_ratelimited) 526 { 527 q->res->was_ratelimited = was_ratelimited; 528 if(why_bogus) 529 q->res->why_bogus = strdup(why_bogus); 530 if(rcode != 0) { 531 q->res->rcode = rcode; 532 q->msg_security = s; 533 return; 534 } 535 536 q->res->rcode = LDNS_RCODE_SERVFAIL; 537 q->msg_security = sec_status_unchecked; 538 q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf)); 539 q->msg_len = sldns_buffer_limit(buf); 540 if(!q->msg) { 541 return; /* the error is in the rcode */ 542 } 543 544 /* canonname and results */ 545 q->msg_security = s; 546 libworker_enter_result(q->res, buf, q->w->env->scratch, s); 547 } 548 549 void 550 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 551 char* why_bogus, int was_ratelimited) 552 { 553 struct ctx_query* q = (struct ctx_query*)arg; 554 /* fg query is done; exit comm base */ 555 comm_base_exit(q->w->base); 556 557 libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited); 558 } 559 560 /** setup qinfo and edns */ 561 static int 562 setup_qinfo_edns(struct libworker* w, struct ctx_query* q, 563 struct query_info* qinfo, struct edns_data* edns) 564 { 565 qinfo->qtype = (uint16_t)q->res->qtype; 566 qinfo->qclass = (uint16_t)q->res->qclass; 567 qinfo->local_alias = NULL; 568 qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len); 569 if(!qinfo->qname) { 570 return 0; 571 } 572 edns->edns_present = 1; 573 edns->ext_rcode = 0; 574 edns->edns_version = 0; 575 edns->bits = EDNS_DO; 576 edns->opt_list = NULL; 577 if(sldns_buffer_capacity(w->back->udp_buff) < 65535) 578 edns->udp_size = (uint16_t)sldns_buffer_capacity( 579 w->back->udp_buff); 580 else edns->udp_size = 65535; 581 return 1; 582 } 583 584 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q) 585 { 586 struct libworker* w = libworker_setup(ctx, 0, NULL); 587 uint16_t qflags, qid; 588 struct query_info qinfo; 589 struct edns_data edns; 590 if(!w) 591 return UB_INITFAIL; 592 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 593 libworker_delete(w); 594 return UB_SYNTAX; 595 } 596 qid = 0; 597 qflags = BIT_RD; 598 q->w = w; 599 /* see if there is a fixed answer */ 600 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 601 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 602 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 603 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 604 NULL, 0, NULL, 0, NULL)) { 605 regional_free_all(w->env->scratch); 606 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 607 w->back->udp_buff, sec_status_insecure, NULL, 0); 608 libworker_delete(w); 609 free(qinfo.qname); 610 return UB_NOERROR; 611 } 612 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 613 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 614 regional_free_all(w->env->scratch); 615 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 616 w->back->udp_buff, sec_status_insecure, NULL, 0); 617 libworker_delete(w); 618 free(qinfo.qname); 619 return UB_NOERROR; 620 } 621 /* process new query */ 622 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 623 w->back->udp_buff, qid, libworker_fg_done_cb, q)) { 624 free(qinfo.qname); 625 return UB_NOMEM; 626 } 627 free(qinfo.qname); 628 629 /* wait for reply */ 630 comm_base_dispatch(w->base); 631 632 libworker_delete(w); 633 return UB_NOERROR; 634 } 635 636 void 637 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, 638 enum sec_status s, char* why_bogus, int was_ratelimited) 639 { 640 struct ctx_query* q = (struct ctx_query*)arg; 641 ub_event_callback_type cb = q->cb_event; 642 void* cb_arg = q->cb_arg; 643 int cancelled = q->cancelled; 644 645 /* delete it now */ 646 struct ub_ctx* ctx = q->w->ctx; 647 lock_basic_lock(&ctx->cfglock); 648 (void)rbtree_delete(&ctx->queries, q->node.key); 649 ctx->num_async--; 650 context_query_delete(q); 651 lock_basic_unlock(&ctx->cfglock); 652 653 if(!cancelled) { 654 /* call callback */ 655 int sec = 0; 656 if(s == sec_status_bogus) 657 sec = 1; 658 else if(s == sec_status_secure) 659 sec = 2; 660 (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL), 661 (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited); 662 } 663 } 664 665 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q, 666 int* async_id) 667 { 668 struct libworker* w = ctx->event_worker; 669 uint16_t qflags, qid; 670 struct query_info qinfo; 671 struct edns_data edns; 672 if(!w) 673 return UB_INITFAIL; 674 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) 675 return UB_SYNTAX; 676 qid = 0; 677 qflags = BIT_RD; 678 q->w = w; 679 /* see if there is a fixed answer */ 680 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 681 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 682 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 683 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 684 NULL, 0, NULL, 0, NULL)) { 685 regional_free_all(w->env->scratch); 686 free(qinfo.qname); 687 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 688 w->back->udp_buff, sec_status_insecure, NULL, 0); 689 return UB_NOERROR; 690 } 691 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 692 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 693 regional_free_all(w->env->scratch); 694 free(qinfo.qname); 695 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 696 w->back->udp_buff, sec_status_insecure, NULL, 0); 697 return UB_NOERROR; 698 } 699 /* process new query */ 700 if(async_id) 701 *async_id = q->querynum; 702 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 703 w->back->udp_buff, qid, libworker_event_done_cb, q)) { 704 free(qinfo.qname); 705 return UB_NOMEM; 706 } 707 free(qinfo.qname); 708 return UB_NOERROR; 709 } 710 711 /** add result to the bg worker result queue */ 712 static void 713 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt, 714 int err, char* reason, int was_ratelimited) 715 { 716 uint8_t* msg = NULL; 717 uint32_t len = 0; 718 719 if(w->want_quit) { 720 context_query_delete(q); 721 return; 722 } 723 /* serialize and delete unneeded q */ 724 if(w->is_bg_thread) { 725 lock_basic_lock(&w->ctx->cfglock); 726 if(reason) 727 q->res->why_bogus = strdup(reason); 728 q->res->was_ratelimited = was_ratelimited; 729 if(pkt) { 730 q->msg_len = sldns_buffer_remaining(pkt); 731 q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len); 732 if(!q->msg) { 733 msg = context_serialize_answer(q, UB_NOMEM, NULL, &len); 734 } else { 735 msg = context_serialize_answer(q, err, NULL, &len); 736 } 737 } else { 738 msg = context_serialize_answer(q, err, NULL, &len); 739 } 740 lock_basic_unlock(&w->ctx->cfglock); 741 } else { 742 if(reason) 743 q->res->why_bogus = strdup(reason); 744 q->res->was_ratelimited = was_ratelimited; 745 msg = context_serialize_answer(q, err, pkt, &len); 746 (void)rbtree_delete(&w->ctx->queries, q->node.key); 747 w->ctx->num_async--; 748 context_query_delete(q); 749 } 750 751 if(!msg) { 752 log_err("out of memory for async answer"); 753 return; 754 } 755 if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) { 756 log_err("out of memory for async answer"); 757 return; 758 } 759 } 760 761 void 762 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 763 char* why_bogus, int was_ratelimited) 764 { 765 struct ctx_query* q = (struct ctx_query*)arg; 766 767 if(q->cancelled || q->w->back->want_to_quit) { 768 if(q->w->is_bg_thread) { 769 /* delete it now */ 770 struct ub_ctx* ctx = q->w->ctx; 771 lock_basic_lock(&ctx->cfglock); 772 (void)rbtree_delete(&ctx->queries, q->node.key); 773 ctx->num_async--; 774 context_query_delete(q); 775 lock_basic_unlock(&ctx->cfglock); 776 } 777 /* cancelled, do not give answer */ 778 return; 779 } 780 q->msg_security = s; 781 if(!buf) { 782 buf = q->w->env->scratch_buffer; 783 } 784 if(rcode != 0) { 785 error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); 786 } 787 add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited); 788 } 789 790 791 /** handle new query command for bg worker */ 792 static void 793 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len) 794 { 795 uint16_t qflags, qid; 796 struct query_info qinfo; 797 struct edns_data edns; 798 struct ctx_query* q; 799 if(w->is_bg_thread) { 800 lock_basic_lock(&w->ctx->cfglock); 801 q = context_lookup_new_query(w->ctx, buf, len); 802 lock_basic_unlock(&w->ctx->cfglock); 803 } else { 804 q = context_deserialize_new_query(w->ctx, buf, len); 805 } 806 free(buf); 807 if(!q) { 808 log_err("failed to deserialize newq"); 809 return; 810 } 811 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 812 add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0); 813 return; 814 } 815 qid = 0; 816 qflags = BIT_RD; 817 /* see if there is a fixed answer */ 818 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 819 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 820 if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns, 821 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 822 NULL, 0, NULL, 0, NULL)) { 823 regional_free_all(w->env->scratch); 824 q->msg_security = sec_status_insecure; 825 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 826 free(qinfo.qname); 827 return; 828 } 829 if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones, 830 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 831 regional_free_all(w->env->scratch); 832 q->msg_security = sec_status_insecure; 833 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 834 free(qinfo.qname); 835 return; 836 } 837 q->w = w; 838 /* process new query */ 839 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 840 w->back->udp_buff, qid, libworker_bg_done_cb, q)) { 841 add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0); 842 } 843 free(qinfo.qname); 844 } 845 846 void libworker_alloc_cleanup(void* arg) 847 { 848 struct libworker* w = (struct libworker*)arg; 849 slabhash_clear(&w->env->rrset_cache->table); 850 slabhash_clear(w->env->msg_cache); 851 } 852 853 struct outbound_entry* libworker_send_query(struct query_info* qinfo, 854 uint16_t flags, int dnssec, int want_dnssec, int nocaps, 855 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, 856 size_t zonelen, int ssl_upstream, char* tls_auth_name, 857 struct module_qstate* q) 858 { 859 struct libworker* w = (struct libworker*)q->env->worker; 860 struct outbound_entry* e = (struct outbound_entry*)regional_alloc( 861 q->region, sizeof(*e)); 862 if(!e) 863 return NULL; 864 e->qstate = q; 865 e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec, 866 want_dnssec, nocaps, q->env->cfg->tcp_upstream, ssl_upstream, 867 tls_auth_name, addr, addrlen, zone, zonelen, q, 868 libworker_handle_service_reply, e, w->back->udp_buff, q->env); 869 if(!e->qsent) { 870 return NULL; 871 } 872 return e; 873 } 874 875 int 876 libworker_handle_reply(struct comm_point* c, void* arg, int error, 877 struct comm_reply* reply_info) 878 { 879 struct module_qstate* q = (struct module_qstate*)arg; 880 struct libworker* lw = (struct libworker*)q->env->worker; 881 struct outbound_entry e; 882 e.qstate = q; 883 e.qsent = NULL; 884 885 if(error != 0) { 886 mesh_report_reply(lw->env->mesh, &e, reply_info, error); 887 return 0; 888 } 889 /* sanity check. */ 890 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) 891 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != 892 LDNS_PACKET_QUERY 893 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { 894 /* error becomes timeout for the module as if this reply 895 * never arrived. */ 896 mesh_report_reply(lw->env->mesh, &e, reply_info, 897 NETEVENT_TIMEOUT); 898 return 0; 899 } 900 mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR); 901 return 0; 902 } 903 904 int 905 libworker_handle_service_reply(struct comm_point* c, void* arg, int error, 906 struct comm_reply* reply_info) 907 { 908 struct outbound_entry* e = (struct outbound_entry*)arg; 909 struct libworker* lw = (struct libworker*)e->qstate->env->worker; 910 911 if(error != 0) { 912 mesh_report_reply(lw->env->mesh, e, reply_info, error); 913 return 0; 914 } 915 /* sanity check. */ 916 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) 917 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != 918 LDNS_PACKET_QUERY 919 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { 920 /* error becomes timeout for the module as if this reply 921 * never arrived. */ 922 mesh_report_reply(lw->env->mesh, e, reply_info, 923 NETEVENT_TIMEOUT); 924 return 0; 925 } 926 mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR); 927 return 0; 928 } 929 930 /* --- fake callbacks for fptr_wlist to work --- */ 931 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 932 uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len), 933 int ATTR_UNUSED(error), void* ATTR_UNUSED(arg)) 934 { 935 log_assert(0); 936 } 937 938 int worker_handle_request(struct comm_point* ATTR_UNUSED(c), 939 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 940 struct comm_reply* ATTR_UNUSED(repinfo)) 941 { 942 log_assert(0); 943 return 0; 944 } 945 946 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c), 947 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 948 struct comm_reply* ATTR_UNUSED(reply_info)) 949 { 950 log_assert(0); 951 return 0; 952 } 953 954 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c), 955 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 956 struct comm_reply* ATTR_UNUSED(reply_info)) 957 { 958 log_assert(0); 959 return 0; 960 } 961 962 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c), 963 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 964 struct comm_reply* ATTR_UNUSED(repinfo)) 965 { 966 log_assert(0); 967 return 0; 968 } 969 970 int remote_control_callback(struct comm_point* ATTR_UNUSED(c), 971 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 972 struct comm_reply* ATTR_UNUSED(repinfo)) 973 { 974 log_assert(0); 975 return 0; 976 } 977 978 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg)) 979 { 980 log_assert(0); 981 } 982 983 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo), 984 uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec), 985 int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps), 986 struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen), 987 uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), 988 int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name), 989 struct module_qstate* ATTR_UNUSED(q)) 990 { 991 log_assert(0); 992 return 0; 993 } 994 995 void 996 worker_alloc_cleanup(void* ATTR_UNUSED(arg)) 997 { 998 log_assert(0); 999 } 1000 1001 void worker_stat_timer_cb(void* ATTR_UNUSED(arg)) 1002 { 1003 log_assert(0); 1004 } 1005 1006 void worker_probe_timer_cb(void* ATTR_UNUSED(arg)) 1007 { 1008 log_assert(0); 1009 } 1010 1011 void worker_start_accept(void* ATTR_UNUSED(arg)) 1012 { 1013 log_assert(0); 1014 } 1015 1016 void worker_stop_accept(void* ATTR_UNUSED(arg)) 1017 { 1018 log_assert(0); 1019 } 1020 1021 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2)) 1022 { 1023 log_assert(0); 1024 return 0; 1025 } 1026 1027 int 1028 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1029 { 1030 log_assert(0); 1031 return 0; 1032 } 1033 1034 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1035 { 1036 log_assert(0); 1037 return 0; 1038 } 1039 1040 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg)) 1041 { 1042 log_assert(0); 1043 } 1044 1045 #ifdef UB_ON_WINDOWS 1046 void 1047 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void* 1048 ATTR_UNUSED(arg)) { 1049 log_assert(0); 1050 } 1051 1052 void 1053 wsvc_cron_cb(void* ATTR_UNUSED(arg)) 1054 { 1055 log_assert(0); 1056 } 1057 #endif /* UB_ON_WINDOWS */ 1058 1059 #ifdef USE_DNSTAP 1060 void dtio_tap_callback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), 1061 void* ATTR_UNUSED(arg)) 1062 { 1063 log_assert(0); 1064 } 1065 #endif 1066 1067 #ifdef USE_DNSTAP 1068 void dtio_mainfdcallback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), 1069 void* ATTR_UNUSED(arg)) 1070 { 1071 log_assert(0); 1072 } 1073 #endif 1074