1 /* 2 * libunbound/worker.c - worker thread or process that resolves 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains the worker process or thread that performs 40 * the DNS resolving and validation. The worker is called by a procedure 41 * and if in the background continues until exit, if in the foreground 42 * returns from the procedure when done. 43 */ 44 #include "config.h" 45 #ifdef HAVE_SSL 46 #include <openssl/ssl.h> 47 #endif 48 #include "libunbound/libworker.h" 49 #include "libunbound/context.h" 50 #include "libunbound/unbound.h" 51 #include "libunbound/worker.h" 52 #include "libunbound/unbound-event.h" 53 #include "services/outside_network.h" 54 #include "services/mesh.h" 55 #include "services/localzone.h" 56 #include "services/cache/rrset.h" 57 #include "services/outbound_list.h" 58 #include "services/authzone.h" 59 #include "util/fptr_wlist.h" 60 #include "util/module.h" 61 #include "util/regional.h" 62 #include "util/random.h" 63 #include "util/config_file.h" 64 #include "util/netevent.h" 65 #include "util/storage/lookup3.h" 66 #include "util/storage/slabhash.h" 67 #include "util/net_help.h" 68 #include "util/data/dname.h" 69 #include "util/data/msgreply.h" 70 #include "util/data/msgencode.h" 71 #include "util/tube.h" 72 #include "iterator/iter_fwd.h" 73 #include "iterator/iter_hints.h" 74 #include "sldns/sbuffer.h" 75 #include "sldns/str2wire.h" 76 77 /** handle new query command for bg worker */ 78 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len); 79 80 /** delete libworker env */ 81 static void 82 libworker_delete_env(struct libworker* w) 83 { 84 if(w->env) { 85 outside_network_quit_prepare(w->back); 86 mesh_delete(w->env->mesh); 87 context_release_alloc(w->ctx, w->env->alloc, 88 !w->is_bg || w->is_bg_thread); 89 sldns_buffer_free(w->env->scratch_buffer); 90 regional_destroy(w->env->scratch); 91 forwards_delete(w->env->fwds); 92 hints_delete(w->env->hints); 93 ub_randfree(w->env->rnd); 94 free(w->env); 95 } 96 #ifdef HAVE_SSL 97 SSL_CTX_free(w->sslctx); 98 #endif 99 outside_network_delete(w->back); 100 } 101 102 /** delete libworker struct */ 103 static void 104 libworker_delete(struct libworker* w) 105 { 106 if(!w) return; 107 libworker_delete_env(w); 108 comm_base_delete(w->base); 109 free(w); 110 } 111 112 void 113 libworker_delete_event(struct libworker* w) 114 { 115 if(!w) return; 116 libworker_delete_env(w); 117 comm_base_delete_no_base(w->base); 118 free(w); 119 } 120 121 /** setup fresh libworker struct */ 122 static struct libworker* 123 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb) 124 { 125 struct libworker* w = (struct libworker*)calloc(1, sizeof(*w)); 126 struct config_file* cfg = ctx->env->cfg; 127 int* ports; 128 int numports; 129 if(!w) return NULL; 130 w->is_bg = is_bg; 131 w->ctx = ctx; 132 w->env = (struct module_env*)malloc(sizeof(*w->env)); 133 if(!w->env) { 134 free(w); 135 return NULL; 136 } 137 *w->env = *ctx->env; 138 w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread); 139 if(!w->env->alloc) { 140 libworker_delete(w); 141 return NULL; 142 } 143 w->thread_num = w->env->alloc->thread_num; 144 alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w); 145 if(!w->is_bg || w->is_bg_thread) { 146 lock_basic_lock(&ctx->cfglock); 147 } 148 w->env->scratch = regional_create_custom(cfg->msg_buffer_size); 149 w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size); 150 w->env->fwds = forwards_create(); 151 if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) { 152 forwards_delete(w->env->fwds); 153 w->env->fwds = NULL; 154 } 155 w->env->hints = hints_create(); 156 if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) { 157 hints_delete(w->env->hints); 158 w->env->hints = NULL; 159 } 160 if(cfg->ssl_upstream || (cfg->tls_cert_bundle && cfg->tls_cert_bundle[0]) || cfg->tls_win_cert) { 161 w->sslctx = connect_sslctx_create(NULL, NULL, 162 cfg->tls_cert_bundle, cfg->tls_win_cert); 163 if(!w->sslctx) { 164 /* to make the setup fail after unlock */ 165 hints_delete(w->env->hints); 166 w->env->hints = NULL; 167 } 168 } 169 if(!w->is_bg || w->is_bg_thread) { 170 lock_basic_unlock(&ctx->cfglock); 171 } 172 if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds || 173 !w->env->hints) { 174 libworker_delete(w); 175 return NULL; 176 } 177 w->env->worker = (struct worker*)w; 178 w->env->probe_timer = NULL; 179 if(!w->is_bg || w->is_bg_thread) { 180 lock_basic_lock(&ctx->cfglock); 181 } 182 if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) { 183 if(!w->is_bg || w->is_bg_thread) { 184 lock_basic_unlock(&ctx->cfglock); 185 } 186 libworker_delete(w); 187 return NULL; 188 } 189 if(!w->is_bg || w->is_bg_thread) { 190 lock_basic_unlock(&ctx->cfglock); 191 } 192 if(1) { 193 /* primitive lockout for threading: if it overwrites another 194 * thread it is like wiping the cache (which is likely empty 195 * at the start) */ 196 /* note we are holding the ctx lock in normal threaded 197 * cases so that is solved properly, it is only for many ctx 198 * in different threads that this may clash */ 199 static int done_raninit = 0; 200 if(!done_raninit) { 201 done_raninit = 1; 202 hash_set_raninit((uint32_t)ub_random(w->env->rnd)); 203 } 204 } 205 206 if(eb) 207 w->base = comm_base_create_event(eb); 208 else w->base = comm_base_create(0); 209 if(!w->base) { 210 libworker_delete(w); 211 return NULL; 212 } 213 w->env->worker_base = w->base; 214 if(!w->is_bg || w->is_bg_thread) { 215 lock_basic_lock(&ctx->cfglock); 216 } 217 numports = cfg_condense_ports(cfg, &ports); 218 if(numports == 0) { 219 if(!w->is_bg || w->is_bg_thread) { 220 lock_basic_unlock(&ctx->cfglock); 221 } 222 libworker_delete(w); 223 return NULL; 224 } 225 w->back = outside_network_create(w->base, cfg->msg_buffer_size, 226 (size_t)cfg->outgoing_num_ports, cfg->out_ifs, 227 cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6, 228 cfg->do_tcp?cfg->outgoing_num_tcp:0, 229 w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id, 230 ports, numports, cfg->unwanted_threshold, 231 cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w, 232 cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx, 233 cfg->delay_close, NULL); 234 w->env->outnet = w->back; 235 if(!w->is_bg || w->is_bg_thread) { 236 lock_basic_unlock(&ctx->cfglock); 237 } 238 free(ports); 239 if(!w->back) { 240 libworker_delete(w); 241 return NULL; 242 } 243 w->env->mesh = mesh_create(&ctx->mods, w->env); 244 if(!w->env->mesh) { 245 libworker_delete(w); 246 return NULL; 247 } 248 w->env->send_query = &libworker_send_query; 249 w->env->detach_subs = &mesh_detach_subs; 250 w->env->attach_sub = &mesh_attach_sub; 251 w->env->add_sub = &mesh_add_sub; 252 w->env->kill_sub = &mesh_state_delete; 253 w->env->detect_cycle = &mesh_detect_cycle; 254 comm_base_timept(w->base, &w->env->now, &w->env->now_tv); 255 return w; 256 } 257 258 struct libworker* libworker_create_event(struct ub_ctx* ctx, 259 struct ub_event_base* eb) 260 { 261 return libworker_setup(ctx, 0, eb); 262 } 263 264 /** handle cancel command for bg worker */ 265 static void 266 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len) 267 { 268 struct ctx_query* q; 269 if(w->is_bg_thread) { 270 lock_basic_lock(&w->ctx->cfglock); 271 q = context_deserialize_cancel(w->ctx, buf, len); 272 lock_basic_unlock(&w->ctx->cfglock); 273 } else { 274 q = context_deserialize_cancel(w->ctx, buf, len); 275 } 276 if(!q) { 277 /* probably simply lookup failed, i.e. the message had been 278 * processed and answered before the cancel arrived */ 279 return; 280 } 281 q->cancelled = 1; 282 free(buf); 283 } 284 285 /** do control command coming into bg server */ 286 static void 287 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len) 288 { 289 switch(context_serial_getcmd(msg, len)) { 290 default: 291 case UB_LIBCMD_ANSWER: 292 log_err("unknown command for bg worker %d", 293 (int)context_serial_getcmd(msg, len)); 294 /* and fall through to quit */ 295 /* fallthrough */ 296 case UB_LIBCMD_QUIT: 297 free(msg); 298 comm_base_exit(w->base); 299 break; 300 case UB_LIBCMD_NEWQUERY: 301 handle_newq(w, msg, len); 302 break; 303 case UB_LIBCMD_CANCEL: 304 handle_cancel(w, msg, len); 305 break; 306 } 307 } 308 309 /** handle control command coming into server */ 310 void 311 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 312 uint8_t* msg, size_t len, int err, void* arg) 313 { 314 struct libworker* w = (struct libworker*)arg; 315 316 if(err != 0) { 317 free(msg); 318 /* it is of no use to go on, exit */ 319 comm_base_exit(w->base); 320 return; 321 } 322 libworker_do_cmd(w, msg, len); /* also frees the buf */ 323 } 324 325 /** the background thread func */ 326 static void* 327 libworker_dobg(void* arg) 328 { 329 /* setup */ 330 uint32_t m; 331 struct libworker* w = (struct libworker*)arg; 332 struct ub_ctx* ctx; 333 if(!w) { 334 log_err("libunbound bg worker init failed, nomem"); 335 return NULL; 336 } 337 ctx = w->ctx; 338 log_thread_set(&w->thread_num); 339 #ifdef THREADS_DISABLED 340 /* we are forked */ 341 w->is_bg_thread = 0; 342 /* close non-used parts of the pipes */ 343 tube_close_write(ctx->qq_pipe); 344 tube_close_read(ctx->rr_pipe); 345 #endif 346 if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, 347 libworker_handle_control_cmd, w)) { 348 log_err("libunbound bg worker init failed, no bglisten"); 349 return NULL; 350 } 351 if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) { 352 log_err("libunbound bg worker init failed, no bgwrite"); 353 return NULL; 354 } 355 356 /* do the work */ 357 comm_base_dispatch(w->base); 358 359 /* cleanup */ 360 m = UB_LIBCMD_QUIT; 361 w->want_quit = 1; 362 tube_remove_bg_listen(w->ctx->qq_pipe); 363 tube_remove_bg_write(w->ctx->rr_pipe); 364 libworker_delete(w); 365 (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, 366 (uint32_t)sizeof(m), 0); 367 #ifdef THREADS_DISABLED 368 /* close pipes from forked process before exit */ 369 tube_close_read(ctx->qq_pipe); 370 tube_close_write(ctx->rr_pipe); 371 #endif 372 return NULL; 373 } 374 375 int libworker_bg(struct ub_ctx* ctx) 376 { 377 struct libworker* w; 378 /* fork or threadcreate */ 379 lock_basic_lock(&ctx->cfglock); 380 if(ctx->dothread) { 381 lock_basic_unlock(&ctx->cfglock); 382 w = libworker_setup(ctx, 1, NULL); 383 if(!w) return UB_NOMEM; 384 w->is_bg_thread = 1; 385 #ifdef ENABLE_LOCK_CHECKS 386 w->thread_num = 1; /* for nicer DEBUG checklocks */ 387 #endif 388 ub_thread_create(&ctx->bg_tid, libworker_dobg, w); 389 } else { 390 lock_basic_unlock(&ctx->cfglock); 391 #ifndef HAVE_FORK 392 /* no fork on windows */ 393 return UB_FORKFAIL; 394 #else /* HAVE_FORK */ 395 switch((ctx->bg_pid=fork())) { 396 case 0: 397 w = libworker_setup(ctx, 1, NULL); 398 if(!w) fatal_exit("out of memory"); 399 /* close non-used parts of the pipes */ 400 tube_close_write(ctx->qq_pipe); 401 tube_close_read(ctx->rr_pipe); 402 (void)libworker_dobg(w); 403 exit(0); 404 break; 405 case -1: 406 return UB_FORKFAIL; 407 default: 408 /* close non-used parts, so that the worker 409 * bgprocess gets 'pipe closed' when the 410 * main process exits */ 411 tube_close_read(ctx->qq_pipe); 412 tube_close_write(ctx->rr_pipe); 413 break; 414 } 415 #endif /* HAVE_FORK */ 416 } 417 return UB_NOERROR; 418 } 419 420 /** insert canonname */ 421 static int 422 fill_canon(struct ub_result* res, uint8_t* s) 423 { 424 char buf[255+2]; 425 dname_str(s, buf); 426 res->canonname = strdup(buf); 427 return res->canonname != 0; 428 } 429 430 /** fill data into result */ 431 static int 432 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer, 433 uint8_t* finalcname, struct query_info* rq, struct reply_info* rep) 434 { 435 size_t i; 436 struct packed_rrset_data* data; 437 res->ttl = 0; 438 if(!answer) { 439 if(finalcname) { 440 if(!fill_canon(res, finalcname)) 441 return 0; /* out of memory */ 442 } 443 if(rep->rrset_count != 0) 444 res->ttl = (int)rep->ttl; 445 res->data = (char**)calloc(1, sizeof(char*)); 446 res->len = (int*)calloc(1, sizeof(int)); 447 return (res->data && res->len); 448 } 449 data = (struct packed_rrset_data*)answer->entry.data; 450 if(query_dname_compare(rq->qname, answer->rk.dname) != 0) { 451 if(!fill_canon(res, answer->rk.dname)) 452 return 0; /* out of memory */ 453 } else res->canonname = NULL; 454 res->data = (char**)calloc(data->count+1, sizeof(char*)); 455 res->len = (int*)calloc(data->count+1, sizeof(int)); 456 if(!res->data || !res->len) 457 return 0; /* out of memory */ 458 for(i=0; i<data->count; i++) { 459 /* remove rdlength from rdata */ 460 res->len[i] = (int)(data->rr_len[i] - 2); 461 res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]); 462 if(!res->data[i]) 463 return 0; /* out of memory */ 464 } 465 /* ttl for positive answers, from CNAME and answer RRs */ 466 if(data->count != 0) { 467 size_t j; 468 res->ttl = (int)data->ttl; 469 for(j=0; j<rep->an_numrrsets; j++) { 470 struct packed_rrset_data* d = 471 (struct packed_rrset_data*)rep->rrsets[j]-> 472 entry.data; 473 if((int)d->ttl < res->ttl) 474 res->ttl = (int)d->ttl; 475 } 476 } 477 /* ttl for negative answers */ 478 if(data->count == 0 && rep->rrset_count != 0) 479 res->ttl = (int)rep->ttl; 480 res->data[data->count] = NULL; 481 res->len[data->count] = 0; 482 return 1; 483 } 484 485 /** fill result from parsed message, on error fills servfail */ 486 void 487 libworker_enter_result(struct ub_result* res, sldns_buffer* buf, 488 struct regional* temp, enum sec_status msg_security) 489 { 490 struct query_info rq; 491 struct reply_info* rep; 492 res->rcode = LDNS_RCODE_SERVFAIL; 493 rep = parse_reply_in_temp_region(buf, temp, &rq); 494 if(!rep) { 495 log_err("cannot parse buf"); 496 return; /* error parsing buf, or out of memory */ 497 } 498 if(!fill_res(res, reply_find_answer_rrset(&rq, rep), 499 reply_find_final_cname_target(&rq, rep), &rq, rep)) 500 return; /* out of memory */ 501 /* rcode, havedata, nxdomain, secure, bogus */ 502 res->rcode = (int)FLAGS_GET_RCODE(rep->flags); 503 if(res->data && res->data[0]) 504 res->havedata = 1; 505 if(res->rcode == LDNS_RCODE_NXDOMAIN) 506 res->nxdomain = 1; 507 if(msg_security == sec_status_secure) 508 res->secure = 1; 509 if(msg_security == sec_status_bogus || 510 msg_security == sec_status_secure_sentinel_fail) 511 res->bogus = 1; 512 } 513 514 /** fillup fg results */ 515 static void 516 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf, 517 enum sec_status s, char* why_bogus, int was_ratelimited) 518 { 519 q->res->was_ratelimited = was_ratelimited; 520 if(why_bogus) 521 q->res->why_bogus = strdup(why_bogus); 522 if(rcode != 0) { 523 q->res->rcode = rcode; 524 q->msg_security = s; 525 return; 526 } 527 528 q->res->rcode = LDNS_RCODE_SERVFAIL; 529 q->msg_security = sec_status_unchecked; 530 q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf)); 531 q->msg_len = sldns_buffer_limit(buf); 532 if(!q->msg) { 533 return; /* the error is in the rcode */ 534 } 535 536 /* canonname and results */ 537 q->msg_security = s; 538 libworker_enter_result(q->res, buf, q->w->env->scratch, s); 539 } 540 541 void 542 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 543 char* why_bogus, int was_ratelimited) 544 { 545 struct ctx_query* q = (struct ctx_query*)arg; 546 /* fg query is done; exit comm base */ 547 comm_base_exit(q->w->base); 548 549 libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited); 550 } 551 552 /** setup qinfo and edns */ 553 static int 554 setup_qinfo_edns(struct libworker* w, struct ctx_query* q, 555 struct query_info* qinfo, struct edns_data* edns) 556 { 557 qinfo->qtype = (uint16_t)q->res->qtype; 558 qinfo->qclass = (uint16_t)q->res->qclass; 559 qinfo->local_alias = NULL; 560 qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len); 561 if(!qinfo->qname) { 562 return 0; 563 } 564 edns->edns_present = 1; 565 edns->ext_rcode = 0; 566 edns->edns_version = 0; 567 edns->bits = EDNS_DO; 568 edns->opt_list = NULL; 569 if(sldns_buffer_capacity(w->back->udp_buff) < 65535) 570 edns->udp_size = (uint16_t)sldns_buffer_capacity( 571 w->back->udp_buff); 572 else edns->udp_size = 65535; 573 return 1; 574 } 575 576 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q) 577 { 578 struct libworker* w = libworker_setup(ctx, 0, NULL); 579 uint16_t qflags, qid; 580 struct query_info qinfo; 581 struct edns_data edns; 582 if(!w) 583 return UB_INITFAIL; 584 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 585 libworker_delete(w); 586 return UB_SYNTAX; 587 } 588 qid = 0; 589 qflags = BIT_RD; 590 q->w = w; 591 /* see if there is a fixed answer */ 592 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 593 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 594 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 595 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 596 NULL, 0, NULL, 0, NULL)) { 597 regional_free_all(w->env->scratch); 598 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 599 w->back->udp_buff, sec_status_insecure, NULL, 0); 600 libworker_delete(w); 601 free(qinfo.qname); 602 return UB_NOERROR; 603 } 604 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 605 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 606 regional_free_all(w->env->scratch); 607 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 608 w->back->udp_buff, sec_status_insecure, NULL, 0); 609 libworker_delete(w); 610 free(qinfo.qname); 611 return UB_NOERROR; 612 } 613 /* process new query */ 614 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 615 w->back->udp_buff, qid, libworker_fg_done_cb, q)) { 616 free(qinfo.qname); 617 return UB_NOMEM; 618 } 619 free(qinfo.qname); 620 621 /* wait for reply */ 622 comm_base_dispatch(w->base); 623 624 libworker_delete(w); 625 return UB_NOERROR; 626 } 627 628 void 629 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, 630 enum sec_status s, char* why_bogus, int was_ratelimited) 631 { 632 struct ctx_query* q = (struct ctx_query*)arg; 633 ub_event_callback_type cb = q->cb_event; 634 void* cb_arg = q->cb_arg; 635 int cancelled = q->cancelled; 636 637 /* delete it now */ 638 struct ub_ctx* ctx = q->w->ctx; 639 lock_basic_lock(&ctx->cfglock); 640 (void)rbtree_delete(&ctx->queries, q->node.key); 641 ctx->num_async--; 642 context_query_delete(q); 643 lock_basic_unlock(&ctx->cfglock); 644 645 if(!cancelled) { 646 /* call callback */ 647 int sec = 0; 648 if(s == sec_status_bogus) 649 sec = 1; 650 else if(s == sec_status_secure) 651 sec = 2; 652 (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL), 653 (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited); 654 } 655 } 656 657 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q, 658 int* async_id) 659 { 660 struct libworker* w = ctx->event_worker; 661 uint16_t qflags, qid; 662 struct query_info qinfo; 663 struct edns_data edns; 664 if(!w) 665 return UB_INITFAIL; 666 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) 667 return UB_SYNTAX; 668 qid = 0; 669 qflags = BIT_RD; 670 q->w = w; 671 /* see if there is a fixed answer */ 672 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 673 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 674 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 675 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 676 NULL, 0, NULL, 0, NULL)) { 677 regional_free_all(w->env->scratch); 678 free(qinfo.qname); 679 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 680 w->back->udp_buff, sec_status_insecure, NULL, 0); 681 return UB_NOERROR; 682 } 683 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 684 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 685 regional_free_all(w->env->scratch); 686 free(qinfo.qname); 687 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 688 w->back->udp_buff, sec_status_insecure, NULL, 0); 689 return UB_NOERROR; 690 } 691 /* process new query */ 692 if(async_id) 693 *async_id = q->querynum; 694 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 695 w->back->udp_buff, qid, libworker_event_done_cb, q)) { 696 free(qinfo.qname); 697 return UB_NOMEM; 698 } 699 free(qinfo.qname); 700 return UB_NOERROR; 701 } 702 703 /** add result to the bg worker result queue */ 704 static void 705 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt, 706 int err, char* reason, int was_ratelimited) 707 { 708 uint8_t* msg = NULL; 709 uint32_t len = 0; 710 711 if(w->want_quit) { 712 context_query_delete(q); 713 return; 714 } 715 /* serialize and delete unneeded q */ 716 if(w->is_bg_thread) { 717 lock_basic_lock(&w->ctx->cfglock); 718 if(reason) 719 q->res->why_bogus = strdup(reason); 720 q->res->was_ratelimited = was_ratelimited; 721 if(pkt) { 722 q->msg_len = sldns_buffer_remaining(pkt); 723 q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len); 724 if(!q->msg) { 725 msg = context_serialize_answer(q, UB_NOMEM, NULL, &len); 726 } else { 727 msg = context_serialize_answer(q, err, NULL, &len); 728 } 729 } else { 730 msg = context_serialize_answer(q, err, NULL, &len); 731 } 732 lock_basic_unlock(&w->ctx->cfglock); 733 } else { 734 if(reason) 735 q->res->why_bogus = strdup(reason); 736 q->res->was_ratelimited = was_ratelimited; 737 msg = context_serialize_answer(q, err, pkt, &len); 738 (void)rbtree_delete(&w->ctx->queries, q->node.key); 739 w->ctx->num_async--; 740 context_query_delete(q); 741 } 742 743 if(!msg) { 744 log_err("out of memory for async answer"); 745 return; 746 } 747 if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) { 748 log_err("out of memory for async answer"); 749 return; 750 } 751 } 752 753 void 754 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 755 char* why_bogus, int was_ratelimited) 756 { 757 struct ctx_query* q = (struct ctx_query*)arg; 758 759 if(q->cancelled || q->w->back->want_to_quit) { 760 if(q->w->is_bg_thread) { 761 /* delete it now */ 762 struct ub_ctx* ctx = q->w->ctx; 763 lock_basic_lock(&ctx->cfglock); 764 (void)rbtree_delete(&ctx->queries, q->node.key); 765 ctx->num_async--; 766 context_query_delete(q); 767 lock_basic_unlock(&ctx->cfglock); 768 } 769 /* cancelled, do not give answer */ 770 return; 771 } 772 q->msg_security = s; 773 if(!buf) { 774 buf = q->w->env->scratch_buffer; 775 } 776 if(rcode != 0) { 777 error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); 778 } 779 add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited); 780 } 781 782 783 /** handle new query command for bg worker */ 784 static void 785 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len) 786 { 787 uint16_t qflags, qid; 788 struct query_info qinfo; 789 struct edns_data edns; 790 struct ctx_query* q; 791 if(w->is_bg_thread) { 792 lock_basic_lock(&w->ctx->cfglock); 793 q = context_lookup_new_query(w->ctx, buf, len); 794 lock_basic_unlock(&w->ctx->cfglock); 795 } else { 796 q = context_deserialize_new_query(w->ctx, buf, len); 797 } 798 free(buf); 799 if(!q) { 800 log_err("failed to deserialize newq"); 801 return; 802 } 803 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 804 add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0); 805 return; 806 } 807 qid = 0; 808 qflags = BIT_RD; 809 /* see if there is a fixed answer */ 810 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 811 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 812 if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns, 813 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 814 NULL, 0, NULL, 0, NULL)) { 815 regional_free_all(w->env->scratch); 816 q->msg_security = sec_status_insecure; 817 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 818 free(qinfo.qname); 819 return; 820 } 821 if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones, 822 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 823 regional_free_all(w->env->scratch); 824 q->msg_security = sec_status_insecure; 825 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 826 free(qinfo.qname); 827 return; 828 } 829 q->w = w; 830 /* process new query */ 831 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 832 w->back->udp_buff, qid, libworker_bg_done_cb, q)) { 833 add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0); 834 } 835 free(qinfo.qname); 836 } 837 838 void libworker_alloc_cleanup(void* arg) 839 { 840 struct libworker* w = (struct libworker*)arg; 841 slabhash_clear(&w->env->rrset_cache->table); 842 slabhash_clear(w->env->msg_cache); 843 } 844 845 struct outbound_entry* libworker_send_query(struct query_info* qinfo, 846 uint16_t flags, int dnssec, int want_dnssec, int nocaps, 847 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, 848 size_t zonelen, int ssl_upstream, char* tls_auth_name, 849 struct module_qstate* q) 850 { 851 struct libworker* w = (struct libworker*)q->env->worker; 852 struct outbound_entry* e = (struct outbound_entry*)regional_alloc( 853 q->region, sizeof(*e)); 854 if(!e) 855 return NULL; 856 e->qstate = q; 857 e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec, 858 want_dnssec, nocaps, q->env->cfg->tcp_upstream, ssl_upstream, 859 tls_auth_name, addr, addrlen, zone, zonelen, q, 860 libworker_handle_service_reply, e, w->back->udp_buff, q->env); 861 if(!e->qsent) { 862 return NULL; 863 } 864 return e; 865 } 866 867 int 868 libworker_handle_reply(struct comm_point* c, void* arg, int error, 869 struct comm_reply* reply_info) 870 { 871 struct module_qstate* q = (struct module_qstate*)arg; 872 struct libworker* lw = (struct libworker*)q->env->worker; 873 struct outbound_entry e; 874 e.qstate = q; 875 e.qsent = NULL; 876 877 if(error != 0) { 878 mesh_report_reply(lw->env->mesh, &e, reply_info, error); 879 return 0; 880 } 881 /* sanity check. */ 882 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) 883 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != 884 LDNS_PACKET_QUERY 885 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { 886 /* error becomes timeout for the module as if this reply 887 * never arrived. */ 888 mesh_report_reply(lw->env->mesh, &e, reply_info, 889 NETEVENT_TIMEOUT); 890 return 0; 891 } 892 mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR); 893 return 0; 894 } 895 896 int 897 libworker_handle_service_reply(struct comm_point* c, void* arg, int error, 898 struct comm_reply* reply_info) 899 { 900 struct outbound_entry* e = (struct outbound_entry*)arg; 901 struct libworker* lw = (struct libworker*)e->qstate->env->worker; 902 903 if(error != 0) { 904 mesh_report_reply(lw->env->mesh, e, reply_info, error); 905 return 0; 906 } 907 /* sanity check. */ 908 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) 909 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != 910 LDNS_PACKET_QUERY 911 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { 912 /* error becomes timeout for the module as if this reply 913 * never arrived. */ 914 mesh_report_reply(lw->env->mesh, e, reply_info, 915 NETEVENT_TIMEOUT); 916 return 0; 917 } 918 mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR); 919 return 0; 920 } 921 922 /* --- fake callbacks for fptr_wlist to work --- */ 923 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 924 uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len), 925 int ATTR_UNUSED(error), void* ATTR_UNUSED(arg)) 926 { 927 log_assert(0); 928 } 929 930 int worker_handle_request(struct comm_point* ATTR_UNUSED(c), 931 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 932 struct comm_reply* ATTR_UNUSED(repinfo)) 933 { 934 log_assert(0); 935 return 0; 936 } 937 938 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c), 939 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 940 struct comm_reply* ATTR_UNUSED(reply_info)) 941 { 942 log_assert(0); 943 return 0; 944 } 945 946 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c), 947 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 948 struct comm_reply* ATTR_UNUSED(reply_info)) 949 { 950 log_assert(0); 951 return 0; 952 } 953 954 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c), 955 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 956 struct comm_reply* ATTR_UNUSED(repinfo)) 957 { 958 log_assert(0); 959 return 0; 960 } 961 962 int remote_control_callback(struct comm_point* ATTR_UNUSED(c), 963 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 964 struct comm_reply* ATTR_UNUSED(repinfo)) 965 { 966 log_assert(0); 967 return 0; 968 } 969 970 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg)) 971 { 972 log_assert(0); 973 } 974 975 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo), 976 uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec), 977 int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps), 978 struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen), 979 uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), 980 int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name), 981 struct module_qstate* ATTR_UNUSED(q)) 982 { 983 log_assert(0); 984 return 0; 985 } 986 987 void 988 worker_alloc_cleanup(void* ATTR_UNUSED(arg)) 989 { 990 log_assert(0); 991 } 992 993 void worker_stat_timer_cb(void* ATTR_UNUSED(arg)) 994 { 995 log_assert(0); 996 } 997 998 void worker_probe_timer_cb(void* ATTR_UNUSED(arg)) 999 { 1000 log_assert(0); 1001 } 1002 1003 void worker_start_accept(void* ATTR_UNUSED(arg)) 1004 { 1005 log_assert(0); 1006 } 1007 1008 void worker_stop_accept(void* ATTR_UNUSED(arg)) 1009 { 1010 log_assert(0); 1011 } 1012 1013 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2)) 1014 { 1015 log_assert(0); 1016 return 0; 1017 } 1018 1019 int 1020 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1021 { 1022 log_assert(0); 1023 return 0; 1024 } 1025 1026 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1027 { 1028 log_assert(0); 1029 return 0; 1030 } 1031 1032 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg)) 1033 { 1034 log_assert(0); 1035 } 1036 1037 #ifdef UB_ON_WINDOWS 1038 void 1039 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void* 1040 ATTR_UNUSED(arg)) { 1041 log_assert(0); 1042 } 1043 1044 void 1045 wsvc_cron_cb(void* ATTR_UNUSED(arg)) 1046 { 1047 log_assert(0); 1048 } 1049 #endif /* UB_ON_WINDOWS */ 1050