1 /* 2 * libunbound/worker.c - worker thread or process that resolves 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains the worker process or thread that performs 40 * the DNS resolving and validation. The worker is called by a procedure 41 * and if in the background continues until exit, if in the foreground 42 * returns from the procedure when done. 43 */ 44 #include "config.h" 45 #ifdef HAVE_SSL 46 #include <openssl/ssl.h> 47 #endif 48 #include "libunbound/libworker.h" 49 #include "libunbound/context.h" 50 #include "libunbound/unbound.h" 51 #include "libunbound/worker.h" 52 #include "libunbound/unbound-event.h" 53 #include "services/outside_network.h" 54 #include "services/mesh.h" 55 #include "services/localzone.h" 56 #include "services/cache/rrset.h" 57 #include "services/outbound_list.h" 58 #include "services/authzone.h" 59 #include "util/fptr_wlist.h" 60 #include "util/module.h" 61 #include "util/regional.h" 62 #include "util/random.h" 63 #include "util/config_file.h" 64 #include "util/netevent.h" 65 #include "util/proxy_protocol.h" 66 #include "util/storage/lookup3.h" 67 #include "util/storage/slabhash.h" 68 #include "util/net_help.h" 69 #include "util/data/dname.h" 70 #include "util/data/msgreply.h" 71 #include "util/data/msgencode.h" 72 #include "util/tube.h" 73 #include "sldns/sbuffer.h" 74 #include "sldns/str2wire.h" 75 #ifdef USE_DNSTAP 76 #include "dnstap/dtstream.h" 77 #endif 78 79 #ifdef HAVE_TARGETCONDITIONALS_H 80 #include <TargetConditionals.h> 81 #endif 82 83 #if (defined(TARGET_OS_TV) && TARGET_OS_TV) || (defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) 84 #undef HAVE_FORK 85 #endif 86 87 /** handle new query command for bg worker */ 88 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len); 89 90 /** delete libworker env */ 91 static void 92 libworker_delete_env(struct libworker* w) 93 { 94 if(w->env) { 95 outside_network_quit_prepare(w->back); 96 mesh_delete(w->env->mesh); 97 context_release_alloc(w->ctx, w->env->alloc, 98 !w->is_bg || w->is_bg_thread); 99 sldns_buffer_free(w->env->scratch_buffer); 100 regional_destroy(w->env->scratch); 101 ub_randfree(w->env->rnd); 102 free(w->env); 103 } 104 #ifdef HAVE_SSL 105 SSL_CTX_free(w->sslctx); 106 #endif 107 outside_network_delete(w->back); 108 } 109 110 /** delete libworker struct */ 111 static void 112 libworker_delete(struct libworker* w) 113 { 114 if(!w) return; 115 libworker_delete_env(w); 116 comm_base_delete(w->base); 117 free(w); 118 } 119 120 void 121 libworker_delete_event(struct libworker* w) 122 { 123 if(!w) return; 124 libworker_delete_env(w); 125 comm_base_delete_no_base(w->base); 126 free(w); 127 } 128 129 /** setup fresh libworker struct */ 130 static struct libworker* 131 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb) 132 { 133 struct libworker* w = (struct libworker*)calloc(1, sizeof(*w)); 134 struct config_file* cfg = ctx->env->cfg; 135 int* ports; 136 int numports; 137 if(!w) return NULL; 138 w->is_bg = is_bg; 139 w->ctx = ctx; 140 w->env = (struct module_env*)malloc(sizeof(*w->env)); 141 if(!w->env) { 142 free(w); 143 return NULL; 144 } 145 *w->env = *ctx->env; 146 w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread); 147 if(!w->env->alloc) { 148 libworker_delete(w); 149 return NULL; 150 } 151 w->thread_num = w->env->alloc->thread_num; 152 alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w); 153 if(!w->is_bg || w->is_bg_thread) { 154 lock_basic_lock(&ctx->cfglock); 155 } 156 w->env->scratch = regional_create_custom(cfg->msg_buffer_size); 157 w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size); 158 #ifdef HAVE_SSL 159 w->sslctx = connect_sslctx_create(NULL, NULL, 160 cfg->tls_cert_bundle, cfg->tls_win_cert); 161 if(!w->sslctx) { 162 /* to make the setup fail after unlock */ 163 sldns_buffer_free(w->env->scratch_buffer); 164 w->env->scratch_buffer = NULL; 165 } 166 #endif 167 if(!w->is_bg || w->is_bg_thread) { 168 lock_basic_unlock(&ctx->cfglock); 169 } 170 if(!w->env->scratch || !w->env->scratch_buffer) { 171 libworker_delete(w); 172 return NULL; 173 } 174 w->env->worker = (struct worker*)w; 175 w->env->probe_timer = NULL; 176 if(!w->is_bg || w->is_bg_thread) { 177 lock_basic_lock(&ctx->cfglock); 178 } 179 if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) { 180 if(!w->is_bg || w->is_bg_thread) { 181 lock_basic_unlock(&ctx->cfglock); 182 } 183 libworker_delete(w); 184 return NULL; 185 } 186 if(!w->is_bg || w->is_bg_thread) { 187 lock_basic_unlock(&ctx->cfglock); 188 } 189 if(1) { 190 /* primitive lockout for threading: if it overwrites another 191 * thread it is like wiping the cache (which is likely empty 192 * at the start) */ 193 /* note we are holding the ctx lock in normal threaded 194 * cases so that is solved properly, it is only for many ctx 195 * in different threads that this may clash */ 196 static int done_raninit = 0; 197 if(!done_raninit) { 198 done_raninit = 1; 199 hash_set_raninit((uint32_t)ub_random(w->env->rnd)); 200 } 201 } 202 203 if(eb) 204 w->base = comm_base_create_event(eb); 205 else w->base = comm_base_create(0); 206 if(!w->base) { 207 libworker_delete(w); 208 return NULL; 209 } 210 w->env->worker_base = w->base; 211 if(!w->is_bg || w->is_bg_thread) { 212 lock_basic_lock(&ctx->cfglock); 213 } 214 numports = cfg_condense_ports(cfg, &ports); 215 if(numports == 0) { 216 if(!w->is_bg || w->is_bg_thread) { 217 lock_basic_unlock(&ctx->cfglock); 218 } 219 libworker_delete(w); 220 return NULL; 221 } 222 w->back = outside_network_create(w->base, cfg->msg_buffer_size, 223 (size_t)cfg->outgoing_num_ports, cfg->out_ifs, 224 cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6, 225 cfg->do_tcp?cfg->outgoing_num_tcp:0, cfg->ip_dscp, 226 w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id, 227 ports, numports, cfg->unwanted_threshold, 228 cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w, 229 cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx, 230 cfg->delay_close, cfg->tls_use_sni, NULL, cfg->udp_connect, 231 cfg->max_reuse_tcp_queries, cfg->tcp_reuse_timeout, 232 cfg->tcp_auth_query_timeout); 233 w->env->outnet = w->back; 234 if(!w->is_bg || w->is_bg_thread) { 235 lock_basic_unlock(&ctx->cfglock); 236 } 237 free(ports); 238 if(!w->back) { 239 libworker_delete(w); 240 return NULL; 241 } 242 w->env->mesh = mesh_create(&ctx->mods, w->env); 243 if(!w->env->mesh) { 244 libworker_delete(w); 245 return NULL; 246 } 247 w->env->send_query = &libworker_send_query; 248 w->env->detach_subs = &mesh_detach_subs; 249 w->env->attach_sub = &mesh_attach_sub; 250 w->env->add_sub = &mesh_add_sub; 251 w->env->kill_sub = &mesh_state_delete; 252 w->env->detect_cycle = &mesh_detect_cycle; 253 comm_base_timept(w->base, &w->env->now, &w->env->now_tv); 254 pp_init(&sldns_write_uint16, &sldns_write_uint32); 255 return w; 256 } 257 258 struct libworker* libworker_create_event(struct ub_ctx* ctx, 259 struct ub_event_base* eb) 260 { 261 return libworker_setup(ctx, 0, eb); 262 } 263 264 /** handle cancel command for bg worker */ 265 static void 266 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len) 267 { 268 struct ctx_query* q; 269 if(w->is_bg_thread) { 270 lock_basic_lock(&w->ctx->cfglock); 271 q = context_deserialize_cancel(w->ctx, buf, len); 272 lock_basic_unlock(&w->ctx->cfglock); 273 } else { 274 q = context_deserialize_cancel(w->ctx, buf, len); 275 } 276 if(!q) { 277 /* probably simply lookup failed, i.e. the message had been 278 * processed and answered before the cancel arrived */ 279 return; 280 } 281 q->cancelled = 1; 282 free(buf); 283 } 284 285 /** do control command coming into bg server */ 286 static void 287 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len) 288 { 289 switch(context_serial_getcmd(msg, len)) { 290 default: 291 case UB_LIBCMD_ANSWER: 292 log_err("unknown command for bg worker %d", 293 (int)context_serial_getcmd(msg, len)); 294 /* and fall through to quit */ 295 /* fallthrough */ 296 case UB_LIBCMD_QUIT: 297 free(msg); 298 comm_base_exit(w->base); 299 break; 300 case UB_LIBCMD_NEWQUERY: 301 handle_newq(w, msg, len); 302 break; 303 case UB_LIBCMD_CANCEL: 304 handle_cancel(w, msg, len); 305 break; 306 } 307 } 308 309 /** handle control command coming into server */ 310 void 311 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 312 uint8_t* msg, size_t len, int err, void* arg) 313 { 314 struct libworker* w = (struct libworker*)arg; 315 316 if(err != 0) { 317 free(msg); 318 /* it is of no use to go on, exit */ 319 comm_base_exit(w->base); 320 return; 321 } 322 libworker_do_cmd(w, msg, len); /* also frees the buf */ 323 } 324 325 /** the background thread func */ 326 static void* 327 libworker_dobg(void* arg) 328 { 329 /* setup */ 330 uint32_t m; 331 struct libworker* w = (struct libworker*)arg; 332 struct ub_ctx* ctx; 333 if(!w) { 334 log_err("libunbound bg worker init failed, nomem"); 335 return NULL; 336 } 337 ctx = w->ctx; 338 log_thread_set(&w->thread_num); 339 #ifdef THREADS_DISABLED 340 /* we are forked */ 341 w->is_bg_thread = 0; 342 /* close non-used parts of the pipes */ 343 tube_close_write(ctx->qq_pipe); 344 tube_close_read(ctx->rr_pipe); 345 #endif 346 if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, 347 libworker_handle_control_cmd, w)) { 348 log_err("libunbound bg worker init failed, no bglisten"); 349 return NULL; 350 } 351 if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) { 352 log_err("libunbound bg worker init failed, no bgwrite"); 353 return NULL; 354 } 355 356 /* do the work */ 357 comm_base_dispatch(w->base); 358 359 /* cleanup */ 360 m = UB_LIBCMD_QUIT; 361 w->want_quit = 1; 362 tube_remove_bg_listen(w->ctx->qq_pipe); 363 tube_remove_bg_write(w->ctx->rr_pipe); 364 libworker_delete(w); 365 (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, 366 (uint32_t)sizeof(m), 0); 367 #ifdef THREADS_DISABLED 368 /* close pipes from forked process before exit */ 369 tube_close_read(ctx->qq_pipe); 370 tube_close_write(ctx->rr_pipe); 371 #endif 372 return NULL; 373 } 374 375 int libworker_bg(struct ub_ctx* ctx) 376 { 377 struct libworker* w; 378 /* fork or threadcreate */ 379 lock_basic_lock(&ctx->cfglock); 380 if(ctx->dothread) { 381 lock_basic_unlock(&ctx->cfglock); 382 w = libworker_setup(ctx, 1, NULL); 383 if(!w) return UB_NOMEM; 384 w->is_bg_thread = 1; 385 ctx->thread_worker = w; 386 #ifdef ENABLE_LOCK_CHECKS 387 w->thread_num = 1; /* for nicer DEBUG checklocks */ 388 #endif 389 ub_thread_create(&ctx->bg_tid, libworker_dobg, w); 390 } else { 391 lock_basic_unlock(&ctx->cfglock); 392 #ifndef HAVE_FORK 393 /* no fork on windows */ 394 return UB_FORKFAIL; 395 #else /* HAVE_FORK */ 396 switch((ctx->bg_pid=fork())) { 397 case 0: 398 w = libworker_setup(ctx, 1, NULL); 399 if(!w) fatal_exit("out of memory"); 400 /* close non-used parts of the pipes */ 401 tube_close_write(ctx->qq_pipe); 402 tube_close_read(ctx->rr_pipe); 403 (void)libworker_dobg(w); 404 exit(0); 405 break; 406 case -1: 407 return UB_FORKFAIL; 408 default: 409 /* close non-used parts, so that the worker 410 * bgprocess gets 'pipe closed' when the 411 * main process exits */ 412 tube_close_read(ctx->qq_pipe); 413 tube_close_write(ctx->rr_pipe); 414 break; 415 } 416 #endif /* HAVE_FORK */ 417 } 418 return UB_NOERROR; 419 } 420 421 /** insert canonname */ 422 static int 423 fill_canon(struct ub_result* res, uint8_t* s) 424 { 425 char buf[255+2]; 426 dname_str(s, buf); 427 res->canonname = strdup(buf); 428 return res->canonname != 0; 429 } 430 431 /** fill data into result */ 432 static int 433 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer, 434 uint8_t* finalcname, struct query_info* rq, struct reply_info* rep) 435 { 436 size_t i; 437 struct packed_rrset_data* data; 438 res->ttl = 0; 439 if(!answer) { 440 if(finalcname) { 441 if(!fill_canon(res, finalcname)) 442 return 0; /* out of memory */ 443 } 444 if(rep->rrset_count != 0) 445 res->ttl = (int)rep->ttl; 446 res->data = (char**)calloc(1, sizeof(char*)); 447 if(!res->data) 448 return 0; /* out of memory */ 449 res->len = (int*)calloc(1, sizeof(int)); 450 if(!res->len) { 451 free(res->data); 452 res->data = NULL; 453 return 0; /* out of memory */ 454 } 455 return 1; 456 } 457 data = (struct packed_rrset_data*)answer->entry.data; 458 if(query_dname_compare(rq->qname, answer->rk.dname) != 0) { 459 if(!fill_canon(res, answer->rk.dname)) 460 return 0; /* out of memory */ 461 } else res->canonname = NULL; 462 res->data = (char**)calloc(data->count+1, sizeof(char*)); 463 if(!res->data) 464 return 0; /* out of memory */ 465 res->len = (int*)calloc(data->count+1, sizeof(int)); 466 if(!res->len) { 467 free(res->data); 468 res->data = NULL; 469 return 0; /* out of memory */ 470 } 471 for(i=0; i<data->count; i++) { 472 /* remove rdlength from rdata */ 473 res->len[i] = (int)(data->rr_len[i] - 2); 474 res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]); 475 if(!res->data[i]) { 476 size_t j; 477 for(j=0; j<i; j++) { 478 free(res->data[j]); 479 res->data[j] = NULL; 480 } 481 free(res->data); 482 res->data = NULL; 483 free(res->len); 484 res->len = NULL; 485 return 0; /* out of memory */ 486 } 487 } 488 /* ttl for positive answers, from CNAME and answer RRs */ 489 if(data->count != 0) { 490 size_t j; 491 res->ttl = (int)data->ttl; 492 for(j=0; j<rep->an_numrrsets; j++) { 493 struct packed_rrset_data* d = 494 (struct packed_rrset_data*)rep->rrsets[j]-> 495 entry.data; 496 if((int)d->ttl < res->ttl) 497 res->ttl = (int)d->ttl; 498 } 499 } 500 /* ttl for negative answers */ 501 if(data->count == 0 && rep->rrset_count != 0) 502 res->ttl = (int)rep->ttl; 503 res->data[data->count] = NULL; 504 res->len[data->count] = 0; 505 return 1; 506 } 507 508 /** fill result from parsed message, on error fills servfail */ 509 void 510 libworker_enter_result(struct ub_result* res, sldns_buffer* buf, 511 struct regional* temp, enum sec_status msg_security) 512 { 513 struct query_info rq; 514 struct reply_info* rep; 515 res->rcode = LDNS_RCODE_SERVFAIL; 516 rep = parse_reply_in_temp_region(buf, temp, &rq); 517 if(!rep) { 518 log_err("cannot parse buf"); 519 return; /* error parsing buf, or out of memory */ 520 } 521 if(!fill_res(res, reply_find_answer_rrset(&rq, rep), 522 reply_find_final_cname_target(&rq, rep), &rq, rep)) 523 return; /* out of memory */ 524 /* rcode, havedata, nxdomain, secure, bogus */ 525 res->rcode = (int)FLAGS_GET_RCODE(rep->flags); 526 if(res->data && res->data[0]) 527 res->havedata = 1; 528 if(res->rcode == LDNS_RCODE_NXDOMAIN) 529 res->nxdomain = 1; 530 if(msg_security == sec_status_secure) 531 res->secure = 1; 532 if(msg_security == sec_status_bogus || 533 msg_security == sec_status_secure_sentinel_fail) 534 res->bogus = 1; 535 } 536 537 /** fillup fg results */ 538 static void 539 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf, 540 enum sec_status s, char* why_bogus, int was_ratelimited) 541 { 542 q->res->was_ratelimited = was_ratelimited; 543 if(why_bogus) 544 q->res->why_bogus = strdup(why_bogus); 545 if(rcode != 0) { 546 q->res->rcode = rcode; 547 q->msg_security = s; 548 return; 549 } 550 551 q->res->rcode = LDNS_RCODE_SERVFAIL; 552 q->msg_security = sec_status_unchecked; 553 q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf)); 554 q->msg_len = sldns_buffer_limit(buf); 555 if(!q->msg) { 556 return; /* the error is in the rcode */ 557 } 558 559 /* canonname and results */ 560 q->msg_security = s; 561 libworker_enter_result(q->res, buf, q->w->env->scratch, s); 562 } 563 564 void 565 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 566 char* why_bogus, int was_ratelimited) 567 { 568 struct ctx_query* q = (struct ctx_query*)arg; 569 /* fg query is done; exit comm base */ 570 comm_base_exit(q->w->base); 571 572 libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited); 573 } 574 575 /** setup qinfo and edns */ 576 static int 577 setup_qinfo_edns(struct libworker* w, struct ctx_query* q, 578 struct query_info* qinfo, struct edns_data* edns) 579 { 580 qinfo->qtype = (uint16_t)q->res->qtype; 581 qinfo->qclass = (uint16_t)q->res->qclass; 582 qinfo->local_alias = NULL; 583 qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len); 584 if(!qinfo->qname) { 585 return 0; 586 } 587 edns->edns_present = 1; 588 edns->ext_rcode = 0; 589 edns->edns_version = 0; 590 edns->bits = EDNS_DO; 591 edns->opt_list_in = NULL; 592 edns->opt_list_out = NULL; 593 edns->opt_list_inplace_cb_out = NULL; 594 edns->padding_block_size = 0; 595 edns->cookie_present = 0; 596 edns->cookie_valid = 0; 597 if(sldns_buffer_capacity(w->back->udp_buff) < 65535) 598 edns->udp_size = (uint16_t)sldns_buffer_capacity( 599 w->back->udp_buff); 600 else edns->udp_size = 65535; 601 return 1; 602 } 603 604 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q) 605 { 606 struct libworker* w = libworker_setup(ctx, 0, NULL); 607 uint16_t qflags, qid; 608 struct query_info qinfo; 609 struct edns_data edns; 610 if(!w) 611 return UB_INITFAIL; 612 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 613 libworker_delete(w); 614 return UB_SYNTAX; 615 } 616 qid = 0; 617 qflags = BIT_RD; 618 q->w = w; 619 /* see if there is a fixed answer */ 620 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 621 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 622 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 623 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 624 NULL, 0, NULL, 0, NULL)) { 625 regional_free_all(w->env->scratch); 626 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 627 w->back->udp_buff, sec_status_insecure, NULL, 0); 628 libworker_delete(w); 629 free(qinfo.qname); 630 return UB_NOERROR; 631 } 632 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 633 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 634 regional_free_all(w->env->scratch); 635 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 636 w->back->udp_buff, sec_status_insecure, NULL, 0); 637 libworker_delete(w); 638 free(qinfo.qname); 639 return UB_NOERROR; 640 } 641 /* process new query */ 642 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 643 w->back->udp_buff, qid, libworker_fg_done_cb, q, 0)) { 644 free(qinfo.qname); 645 return UB_NOMEM; 646 } 647 free(qinfo.qname); 648 649 /* wait for reply */ 650 comm_base_dispatch(w->base); 651 652 libworker_delete(w); 653 return UB_NOERROR; 654 } 655 656 void 657 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, 658 enum sec_status s, char* why_bogus, int was_ratelimited) 659 { 660 struct ctx_query* q = (struct ctx_query*)arg; 661 ub_event_callback_type cb = q->cb_event; 662 void* cb_arg = q->cb_arg; 663 int cancelled = q->cancelled; 664 665 /* delete it now */ 666 struct ub_ctx* ctx = q->w->ctx; 667 lock_basic_lock(&ctx->cfglock); 668 (void)rbtree_delete(&ctx->queries, q->node.key); 669 ctx->num_async--; 670 context_query_delete(q); 671 lock_basic_unlock(&ctx->cfglock); 672 673 if(!cancelled) { 674 /* call callback */ 675 int sec = 0; 676 if(s == sec_status_bogus) 677 sec = 1; 678 else if(s == sec_status_secure) 679 sec = 2; 680 (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL), 681 (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited); 682 } 683 } 684 685 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q, 686 int* async_id) 687 { 688 struct libworker* w = ctx->event_worker; 689 uint16_t qflags, qid; 690 struct query_info qinfo; 691 struct edns_data edns; 692 if(!w) 693 return UB_INITFAIL; 694 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) 695 return UB_SYNTAX; 696 qid = 0; 697 qflags = BIT_RD; 698 q->w = w; 699 /* see if there is a fixed answer */ 700 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 701 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 702 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 703 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 704 NULL, 0, NULL, 0, NULL)) { 705 regional_free_all(w->env->scratch); 706 free(qinfo.qname); 707 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 708 w->back->udp_buff, sec_status_insecure, NULL, 0); 709 return UB_NOERROR; 710 } 711 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 712 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 713 regional_free_all(w->env->scratch); 714 free(qinfo.qname); 715 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 716 w->back->udp_buff, sec_status_insecure, NULL, 0); 717 return UB_NOERROR; 718 } 719 /* process new query */ 720 if(async_id) 721 *async_id = q->querynum; 722 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 723 w->back->udp_buff, qid, libworker_event_done_cb, q, 0)) { 724 free(qinfo.qname); 725 return UB_NOMEM; 726 } 727 free(qinfo.qname); 728 return UB_NOERROR; 729 } 730 731 /** add result to the bg worker result queue */ 732 static void 733 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt, 734 int err, char* reason, int was_ratelimited) 735 { 736 uint8_t* msg = NULL; 737 uint32_t len = 0; 738 739 if(w->want_quit) { 740 context_query_delete(q); 741 return; 742 } 743 /* serialize and delete unneeded q */ 744 if(w->is_bg_thread) { 745 lock_basic_lock(&w->ctx->cfglock); 746 if(reason) 747 q->res->why_bogus = strdup(reason); 748 q->res->was_ratelimited = was_ratelimited; 749 if(pkt) { 750 q->msg_len = sldns_buffer_remaining(pkt); 751 q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len); 752 if(!q->msg) { 753 msg = context_serialize_answer(q, UB_NOMEM, NULL, &len); 754 } else { 755 msg = context_serialize_answer(q, err, NULL, &len); 756 } 757 } else { 758 msg = context_serialize_answer(q, err, NULL, &len); 759 } 760 lock_basic_unlock(&w->ctx->cfglock); 761 } else { 762 if(reason) 763 q->res->why_bogus = strdup(reason); 764 q->res->was_ratelimited = was_ratelimited; 765 msg = context_serialize_answer(q, err, pkt, &len); 766 (void)rbtree_delete(&w->ctx->queries, q->node.key); 767 w->ctx->num_async--; 768 context_query_delete(q); 769 } 770 771 if(!msg) { 772 log_err("out of memory for async answer"); 773 return; 774 } 775 if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) { 776 log_err("out of memory for async answer"); 777 return; 778 } 779 } 780 781 void 782 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 783 char* why_bogus, int was_ratelimited) 784 { 785 struct ctx_query* q = (struct ctx_query*)arg; 786 787 if(q->cancelled || q->w->back->want_to_quit) { 788 if(q->w->is_bg_thread) { 789 /* delete it now */ 790 struct ub_ctx* ctx = q->w->ctx; 791 lock_basic_lock(&ctx->cfglock); 792 (void)rbtree_delete(&ctx->queries, q->node.key); 793 ctx->num_async--; 794 context_query_delete(q); 795 lock_basic_unlock(&ctx->cfglock); 796 } 797 /* cancelled, do not give answer */ 798 return; 799 } 800 q->msg_security = s; 801 if(!buf) { 802 buf = q->w->env->scratch_buffer; 803 } 804 if(rcode != 0) { 805 error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); 806 } 807 add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited); 808 } 809 810 811 /** handle new query command for bg worker */ 812 static void 813 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len) 814 { 815 uint16_t qflags, qid; 816 struct query_info qinfo; 817 struct edns_data edns; 818 struct ctx_query* q; 819 if(w->is_bg_thread) { 820 lock_basic_lock(&w->ctx->cfglock); 821 q = context_lookup_new_query(w->ctx, buf, len); 822 lock_basic_unlock(&w->ctx->cfglock); 823 } else { 824 q = context_deserialize_new_query(w->ctx, buf, len); 825 } 826 free(buf); 827 if(!q) { 828 log_err("failed to deserialize newq"); 829 return; 830 } 831 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 832 add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0); 833 return; 834 } 835 qid = 0; 836 qflags = BIT_RD; 837 /* see if there is a fixed answer */ 838 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 839 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 840 if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns, 841 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 842 NULL, 0, NULL, 0, NULL)) { 843 regional_free_all(w->env->scratch); 844 q->msg_security = sec_status_insecure; 845 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 846 free(qinfo.qname); 847 return; 848 } 849 if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones, 850 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 851 regional_free_all(w->env->scratch); 852 q->msg_security = sec_status_insecure; 853 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 854 free(qinfo.qname); 855 return; 856 } 857 q->w = w; 858 /* process new query */ 859 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 860 w->back->udp_buff, qid, libworker_bg_done_cb, q, 0)) { 861 add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0); 862 } 863 free(qinfo.qname); 864 } 865 866 void libworker_alloc_cleanup(void* arg) 867 { 868 struct libworker* w = (struct libworker*)arg; 869 slabhash_clear(&w->env->rrset_cache->table); 870 slabhash_clear(w->env->msg_cache); 871 } 872 873 struct outbound_entry* libworker_send_query(struct query_info* qinfo, 874 uint16_t flags, int dnssec, int want_dnssec, int nocaps, 875 int check_ratelimit, 876 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, 877 size_t zonelen, int tcp_upstream, int ssl_upstream, char* tls_auth_name, 878 struct module_qstate* q, int* was_ratelimited) 879 { 880 struct libworker* w = (struct libworker*)q->env->worker; 881 struct outbound_entry* e = (struct outbound_entry*)regional_alloc( 882 q->region, sizeof(*e)); 883 if(!e) 884 return NULL; 885 e->qstate = q; 886 e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec, 887 want_dnssec, nocaps, check_ratelimit, tcp_upstream, ssl_upstream, 888 tls_auth_name, addr, addrlen, zone, zonelen, q, 889 libworker_handle_service_reply, e, w->back->udp_buff, q->env, 890 was_ratelimited); 891 if(!e->qsent) { 892 return NULL; 893 } 894 return e; 895 } 896 897 int 898 libworker_handle_service_reply(struct comm_point* c, void* arg, int error, 899 struct comm_reply* reply_info) 900 { 901 struct outbound_entry* e = (struct outbound_entry*)arg; 902 struct libworker* lw = (struct libworker*)e->qstate->env->worker; 903 904 if(error != 0) { 905 mesh_report_reply(lw->env->mesh, e, reply_info, error); 906 return 0; 907 } 908 /* sanity check. */ 909 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) 910 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != 911 LDNS_PACKET_QUERY 912 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { 913 /* error becomes timeout for the module as if this reply 914 * never arrived. */ 915 mesh_report_reply(lw->env->mesh, e, reply_info, 916 NETEVENT_TIMEOUT); 917 return 0; 918 } 919 mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR); 920 return 0; 921 } 922 923 /* --- fake callbacks for fptr_wlist to work --- */ 924 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 925 uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len), 926 int ATTR_UNUSED(error), void* ATTR_UNUSED(arg)) 927 { 928 log_assert(0); 929 } 930 931 int worker_handle_request(struct comm_point* ATTR_UNUSED(c), 932 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 933 struct comm_reply* ATTR_UNUSED(repinfo)) 934 { 935 log_assert(0); 936 return 0; 937 } 938 939 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c), 940 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 941 struct comm_reply* ATTR_UNUSED(reply_info)) 942 { 943 log_assert(0); 944 return 0; 945 } 946 947 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c), 948 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 949 struct comm_reply* ATTR_UNUSED(repinfo)) 950 { 951 log_assert(0); 952 return 0; 953 } 954 955 int remote_control_callback(struct comm_point* ATTR_UNUSED(c), 956 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 957 struct comm_reply* ATTR_UNUSED(repinfo)) 958 { 959 log_assert(0); 960 return 0; 961 } 962 963 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg)) 964 { 965 log_assert(0); 966 } 967 968 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo), 969 uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec), 970 int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps), 971 int ATTR_UNUSED(check_ratelimit), 972 struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen), 973 uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), int ATTR_UNUSED(tcp_upstream), 974 int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name), 975 struct module_qstate* ATTR_UNUSED(q), int* ATTR_UNUSED(was_ratelimited)) 976 { 977 log_assert(0); 978 return 0; 979 } 980 981 void 982 worker_alloc_cleanup(void* ATTR_UNUSED(arg)) 983 { 984 log_assert(0); 985 } 986 987 void worker_stat_timer_cb(void* ATTR_UNUSED(arg)) 988 { 989 log_assert(0); 990 } 991 992 void worker_probe_timer_cb(void* ATTR_UNUSED(arg)) 993 { 994 log_assert(0); 995 } 996 997 void worker_start_accept(void* ATTR_UNUSED(arg)) 998 { 999 log_assert(0); 1000 } 1001 1002 void worker_stop_accept(void* ATTR_UNUSED(arg)) 1003 { 1004 log_assert(0); 1005 } 1006 1007 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2)) 1008 { 1009 log_assert(0); 1010 return 0; 1011 } 1012 1013 int 1014 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1015 { 1016 log_assert(0); 1017 return 0; 1018 } 1019 1020 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1021 { 1022 log_assert(0); 1023 return 0; 1024 } 1025 1026 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg)) 1027 { 1028 log_assert(0); 1029 } 1030 1031 #ifdef UB_ON_WINDOWS 1032 void 1033 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void* 1034 ATTR_UNUSED(arg)) { 1035 log_assert(0); 1036 } 1037 1038 void 1039 wsvc_cron_cb(void* ATTR_UNUSED(arg)) 1040 { 1041 log_assert(0); 1042 } 1043 #endif /* UB_ON_WINDOWS */ 1044 1045 #ifdef USE_DNSTAP 1046 void dtio_tap_callback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), 1047 void* ATTR_UNUSED(arg)) 1048 { 1049 log_assert(0); 1050 } 1051 #endif 1052 1053 #ifdef USE_DNSTAP 1054 void dtio_mainfdcallback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), 1055 void* ATTR_UNUSED(arg)) 1056 { 1057 log_assert(0); 1058 } 1059 #endif 1060