1 /* 2 * libunbound/worker.c - worker thread or process that resolves 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains the worker process or thread that performs 40 * the DNS resolving and validation. The worker is called by a procedure 41 * and if in the background continues until exit, if in the foreground 42 * returns from the procedure when done. 43 */ 44 #include "config.h" 45 #ifdef HAVE_SSL 46 #include <openssl/ssl.h> 47 #endif 48 #include "libunbound/libworker.h" 49 #include "libunbound/context.h" 50 #include "libunbound/unbound.h" 51 #include "libunbound/worker.h" 52 #include "libunbound/unbound-event.h" 53 #include "services/outside_network.h" 54 #include "services/mesh.h" 55 #include "services/localzone.h" 56 #include "services/cache/rrset.h" 57 #include "services/outbound_list.h" 58 #include "services/authzone.h" 59 #include "util/fptr_wlist.h" 60 #include "util/module.h" 61 #include "util/regional.h" 62 #include "util/random.h" 63 #include "util/config_file.h" 64 #include "util/netevent.h" 65 #include "util/storage/lookup3.h" 66 #include "util/storage/slabhash.h" 67 #include "util/net_help.h" 68 #include "util/data/dname.h" 69 #include "util/data/msgreply.h" 70 #include "util/data/msgencode.h" 71 #include "util/tube.h" 72 #include "iterator/iter_fwd.h" 73 #include "iterator/iter_hints.h" 74 #include "sldns/sbuffer.h" 75 #include "sldns/str2wire.h" 76 77 /** handle new query command for bg worker */ 78 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len); 79 80 /** delete libworker env */ 81 static void 82 libworker_delete_env(struct libworker* w) 83 { 84 if(w->env) { 85 outside_network_quit_prepare(w->back); 86 mesh_delete(w->env->mesh); 87 context_release_alloc(w->ctx, w->env->alloc, 88 !w->is_bg || w->is_bg_thread); 89 sldns_buffer_free(w->env->scratch_buffer); 90 regional_destroy(w->env->scratch); 91 forwards_delete(w->env->fwds); 92 hints_delete(w->env->hints); 93 ub_randfree(w->env->rnd); 94 free(w->env); 95 } 96 #ifdef HAVE_SSL 97 SSL_CTX_free(w->sslctx); 98 #endif 99 outside_network_delete(w->back); 100 } 101 102 /** delete libworker struct */ 103 static void 104 libworker_delete(struct libworker* w) 105 { 106 if(!w) return; 107 libworker_delete_env(w); 108 comm_base_delete(w->base); 109 free(w); 110 } 111 112 void 113 libworker_delete_event(struct libworker* w) 114 { 115 if(!w) return; 116 libworker_delete_env(w); 117 comm_base_delete_no_base(w->base); 118 free(w); 119 } 120 121 /** setup fresh libworker struct */ 122 static struct libworker* 123 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb) 124 { 125 unsigned int seed; 126 struct libworker* w = (struct libworker*)calloc(1, sizeof(*w)); 127 struct config_file* cfg = ctx->env->cfg; 128 int* ports; 129 int numports; 130 if(!w) return NULL; 131 w->is_bg = is_bg; 132 w->ctx = ctx; 133 w->env = (struct module_env*)malloc(sizeof(*w->env)); 134 if(!w->env) { 135 free(w); 136 return NULL; 137 } 138 *w->env = *ctx->env; 139 w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread); 140 if(!w->env->alloc) { 141 libworker_delete(w); 142 return NULL; 143 } 144 w->thread_num = w->env->alloc->thread_num; 145 alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w); 146 if(!w->is_bg || w->is_bg_thread) { 147 lock_basic_lock(&ctx->cfglock); 148 } 149 w->env->scratch = regional_create_custom(cfg->msg_buffer_size); 150 w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size); 151 w->env->fwds = forwards_create(); 152 if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) { 153 forwards_delete(w->env->fwds); 154 w->env->fwds = NULL; 155 } 156 w->env->hints = hints_create(); 157 if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) { 158 hints_delete(w->env->hints); 159 w->env->hints = NULL; 160 } 161 if(cfg->ssl_upstream || (cfg->tls_cert_bundle && cfg->tls_cert_bundle[0]) || cfg->tls_win_cert) { 162 w->sslctx = connect_sslctx_create(NULL, NULL, 163 cfg->tls_cert_bundle, cfg->tls_win_cert); 164 if(!w->sslctx) { 165 /* to make the setup fail after unlock */ 166 hints_delete(w->env->hints); 167 w->env->hints = NULL; 168 } 169 } 170 if(!w->is_bg || w->is_bg_thread) { 171 lock_basic_unlock(&ctx->cfglock); 172 } 173 if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds || 174 !w->env->hints) { 175 libworker_delete(w); 176 return NULL; 177 } 178 w->env->worker = (struct worker*)w; 179 w->env->probe_timer = NULL; 180 seed = (unsigned int)time(NULL) ^ (unsigned int)getpid() ^ 181 (((unsigned int)w->thread_num)<<17); 182 seed ^= (unsigned int)w->env->alloc->next_id; 183 if(!w->is_bg || w->is_bg_thread) { 184 lock_basic_lock(&ctx->cfglock); 185 } 186 if(!(w->env->rnd = ub_initstate(seed, ctx->seed_rnd))) { 187 if(!w->is_bg || w->is_bg_thread) { 188 lock_basic_unlock(&ctx->cfglock); 189 } 190 explicit_bzero(&seed, sizeof(seed)); 191 libworker_delete(w); 192 return NULL; 193 } 194 if(!w->is_bg || w->is_bg_thread) { 195 lock_basic_unlock(&ctx->cfglock); 196 } 197 if(1) { 198 /* primitive lockout for threading: if it overwrites another 199 * thread it is like wiping the cache (which is likely empty 200 * at the start) */ 201 /* note we are holding the ctx lock in normal threaded 202 * cases so that is solved properly, it is only for many ctx 203 * in different threads that this may clash */ 204 static int done_raninit = 0; 205 if(!done_raninit) { 206 done_raninit = 1; 207 hash_set_raninit((uint32_t)ub_random(w->env->rnd)); 208 } 209 } 210 explicit_bzero(&seed, sizeof(seed)); 211 212 if(eb) 213 w->base = comm_base_create_event(eb); 214 else w->base = comm_base_create(0); 215 if(!w->base) { 216 libworker_delete(w); 217 return NULL; 218 } 219 w->env->worker_base = w->base; 220 if(!w->is_bg || w->is_bg_thread) { 221 lock_basic_lock(&ctx->cfglock); 222 } 223 numports = cfg_condense_ports(cfg, &ports); 224 if(numports == 0) { 225 if(!w->is_bg || w->is_bg_thread) { 226 lock_basic_unlock(&ctx->cfglock); 227 } 228 libworker_delete(w); 229 return NULL; 230 } 231 w->back = outside_network_create(w->base, cfg->msg_buffer_size, 232 (size_t)cfg->outgoing_num_ports, cfg->out_ifs, 233 cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6, 234 cfg->do_tcp?cfg->outgoing_num_tcp:0, 235 w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id, 236 ports, numports, cfg->unwanted_threshold, 237 cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w, 238 cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx, 239 cfg->delay_close, NULL); 240 w->env->outnet = w->back; 241 if(!w->is_bg || w->is_bg_thread) { 242 lock_basic_unlock(&ctx->cfglock); 243 } 244 free(ports); 245 if(!w->back) { 246 libworker_delete(w); 247 return NULL; 248 } 249 w->env->mesh = mesh_create(&ctx->mods, w->env); 250 if(!w->env->mesh) { 251 libworker_delete(w); 252 return NULL; 253 } 254 w->env->send_query = &libworker_send_query; 255 w->env->detach_subs = &mesh_detach_subs; 256 w->env->attach_sub = &mesh_attach_sub; 257 w->env->add_sub = &mesh_add_sub; 258 w->env->kill_sub = &mesh_state_delete; 259 w->env->detect_cycle = &mesh_detect_cycle; 260 comm_base_timept(w->base, &w->env->now, &w->env->now_tv); 261 return w; 262 } 263 264 struct libworker* libworker_create_event(struct ub_ctx* ctx, 265 struct ub_event_base* eb) 266 { 267 return libworker_setup(ctx, 0, eb); 268 } 269 270 /** handle cancel command for bg worker */ 271 static void 272 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len) 273 { 274 struct ctx_query* q; 275 if(w->is_bg_thread) { 276 lock_basic_lock(&w->ctx->cfglock); 277 q = context_deserialize_cancel(w->ctx, buf, len); 278 lock_basic_unlock(&w->ctx->cfglock); 279 } else { 280 q = context_deserialize_cancel(w->ctx, buf, len); 281 } 282 if(!q) { 283 /* probably simply lookup failed, i.e. the message had been 284 * processed and answered before the cancel arrived */ 285 return; 286 } 287 q->cancelled = 1; 288 free(buf); 289 } 290 291 /** do control command coming into bg server */ 292 static void 293 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len) 294 { 295 switch(context_serial_getcmd(msg, len)) { 296 default: 297 case UB_LIBCMD_ANSWER: 298 log_err("unknown command for bg worker %d", 299 (int)context_serial_getcmd(msg, len)); 300 /* and fall through to quit */ 301 /* fallthrough */ 302 case UB_LIBCMD_QUIT: 303 free(msg); 304 comm_base_exit(w->base); 305 break; 306 case UB_LIBCMD_NEWQUERY: 307 handle_newq(w, msg, len); 308 break; 309 case UB_LIBCMD_CANCEL: 310 handle_cancel(w, msg, len); 311 break; 312 } 313 } 314 315 /** handle control command coming into server */ 316 void 317 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 318 uint8_t* msg, size_t len, int err, void* arg) 319 { 320 struct libworker* w = (struct libworker*)arg; 321 322 if(err != 0) { 323 free(msg); 324 /* it is of no use to go on, exit */ 325 comm_base_exit(w->base); 326 return; 327 } 328 libworker_do_cmd(w, msg, len); /* also frees the buf */ 329 } 330 331 /** the background thread func */ 332 static void* 333 libworker_dobg(void* arg) 334 { 335 /* setup */ 336 uint32_t m; 337 struct libworker* w = (struct libworker*)arg; 338 struct ub_ctx* ctx; 339 if(!w) { 340 log_err("libunbound bg worker init failed, nomem"); 341 return NULL; 342 } 343 ctx = w->ctx; 344 log_thread_set(&w->thread_num); 345 #ifdef THREADS_DISABLED 346 /* we are forked */ 347 w->is_bg_thread = 0; 348 /* close non-used parts of the pipes */ 349 tube_close_write(ctx->qq_pipe); 350 tube_close_read(ctx->rr_pipe); 351 #endif 352 if(!tube_setup_bg_listen(ctx->qq_pipe, w->base, 353 libworker_handle_control_cmd, w)) { 354 log_err("libunbound bg worker init failed, no bglisten"); 355 return NULL; 356 } 357 if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) { 358 log_err("libunbound bg worker init failed, no bgwrite"); 359 return NULL; 360 } 361 362 /* do the work */ 363 comm_base_dispatch(w->base); 364 365 /* cleanup */ 366 m = UB_LIBCMD_QUIT; 367 w->want_quit = 1; 368 tube_remove_bg_listen(w->ctx->qq_pipe); 369 tube_remove_bg_write(w->ctx->rr_pipe); 370 libworker_delete(w); 371 (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m, 372 (uint32_t)sizeof(m), 0); 373 #ifdef THREADS_DISABLED 374 /* close pipes from forked process before exit */ 375 tube_close_read(ctx->qq_pipe); 376 tube_close_write(ctx->rr_pipe); 377 #endif 378 return NULL; 379 } 380 381 int libworker_bg(struct ub_ctx* ctx) 382 { 383 struct libworker* w; 384 /* fork or threadcreate */ 385 lock_basic_lock(&ctx->cfglock); 386 if(ctx->dothread) { 387 lock_basic_unlock(&ctx->cfglock); 388 w = libworker_setup(ctx, 1, NULL); 389 if(!w) return UB_NOMEM; 390 w->is_bg_thread = 1; 391 #ifdef ENABLE_LOCK_CHECKS 392 w->thread_num = 1; /* for nicer DEBUG checklocks */ 393 #endif 394 ub_thread_create(&ctx->bg_tid, libworker_dobg, w); 395 } else { 396 lock_basic_unlock(&ctx->cfglock); 397 #ifndef HAVE_FORK 398 /* no fork on windows */ 399 return UB_FORKFAIL; 400 #else /* HAVE_FORK */ 401 switch((ctx->bg_pid=fork())) { 402 case 0: 403 w = libworker_setup(ctx, 1, NULL); 404 if(!w) fatal_exit("out of memory"); 405 /* close non-used parts of the pipes */ 406 tube_close_write(ctx->qq_pipe); 407 tube_close_read(ctx->rr_pipe); 408 (void)libworker_dobg(w); 409 exit(0); 410 break; 411 case -1: 412 return UB_FORKFAIL; 413 default: 414 /* close non-used parts, so that the worker 415 * bgprocess gets 'pipe closed' when the 416 * main process exits */ 417 tube_close_read(ctx->qq_pipe); 418 tube_close_write(ctx->rr_pipe); 419 break; 420 } 421 #endif /* HAVE_FORK */ 422 } 423 return UB_NOERROR; 424 } 425 426 /** insert canonname */ 427 static int 428 fill_canon(struct ub_result* res, uint8_t* s) 429 { 430 char buf[255+2]; 431 dname_str(s, buf); 432 res->canonname = strdup(buf); 433 return res->canonname != 0; 434 } 435 436 /** fill data into result */ 437 static int 438 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer, 439 uint8_t* finalcname, struct query_info* rq, struct reply_info* rep) 440 { 441 size_t i; 442 struct packed_rrset_data* data; 443 res->ttl = 0; 444 if(!answer) { 445 if(finalcname) { 446 if(!fill_canon(res, finalcname)) 447 return 0; /* out of memory */ 448 } 449 if(rep->rrset_count != 0) 450 res->ttl = (int)rep->ttl; 451 res->data = (char**)calloc(1, sizeof(char*)); 452 res->len = (int*)calloc(1, sizeof(int)); 453 return (res->data && res->len); 454 } 455 data = (struct packed_rrset_data*)answer->entry.data; 456 if(query_dname_compare(rq->qname, answer->rk.dname) != 0) { 457 if(!fill_canon(res, answer->rk.dname)) 458 return 0; /* out of memory */ 459 } else res->canonname = NULL; 460 res->data = (char**)calloc(data->count+1, sizeof(char*)); 461 res->len = (int*)calloc(data->count+1, sizeof(int)); 462 if(!res->data || !res->len) 463 return 0; /* out of memory */ 464 for(i=0; i<data->count; i++) { 465 /* remove rdlength from rdata */ 466 res->len[i] = (int)(data->rr_len[i] - 2); 467 res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]); 468 if(!res->data[i]) 469 return 0; /* out of memory */ 470 } 471 /* ttl for positive answers, from CNAME and answer RRs */ 472 if(data->count != 0) { 473 size_t j; 474 res->ttl = (int)data->ttl; 475 for(j=0; j<rep->an_numrrsets; j++) { 476 struct packed_rrset_data* d = 477 (struct packed_rrset_data*)rep->rrsets[j]-> 478 entry.data; 479 if((int)d->ttl < res->ttl) 480 res->ttl = (int)d->ttl; 481 } 482 } 483 /* ttl for negative answers */ 484 if(data->count == 0 && rep->rrset_count != 0) 485 res->ttl = (int)rep->ttl; 486 res->data[data->count] = NULL; 487 res->len[data->count] = 0; 488 return 1; 489 } 490 491 /** fill result from parsed message, on error fills servfail */ 492 void 493 libworker_enter_result(struct ub_result* res, sldns_buffer* buf, 494 struct regional* temp, enum sec_status msg_security) 495 { 496 struct query_info rq; 497 struct reply_info* rep; 498 res->rcode = LDNS_RCODE_SERVFAIL; 499 rep = parse_reply_in_temp_region(buf, temp, &rq); 500 if(!rep) { 501 log_err("cannot parse buf"); 502 return; /* error parsing buf, or out of memory */ 503 } 504 if(!fill_res(res, reply_find_answer_rrset(&rq, rep), 505 reply_find_final_cname_target(&rq, rep), &rq, rep)) 506 return; /* out of memory */ 507 /* rcode, havedata, nxdomain, secure, bogus */ 508 res->rcode = (int)FLAGS_GET_RCODE(rep->flags); 509 if(res->data && res->data[0]) 510 res->havedata = 1; 511 if(res->rcode == LDNS_RCODE_NXDOMAIN) 512 res->nxdomain = 1; 513 if(msg_security == sec_status_secure) 514 res->secure = 1; 515 if(msg_security == sec_status_bogus || 516 msg_security == sec_status_secure_sentinel_fail) 517 res->bogus = 1; 518 } 519 520 /** fillup fg results */ 521 static void 522 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf, 523 enum sec_status s, char* why_bogus, int was_ratelimited) 524 { 525 q->res->was_ratelimited = was_ratelimited; 526 if(why_bogus) 527 q->res->why_bogus = strdup(why_bogus); 528 if(rcode != 0) { 529 q->res->rcode = rcode; 530 q->msg_security = s; 531 return; 532 } 533 534 q->res->rcode = LDNS_RCODE_SERVFAIL; 535 q->msg_security = 0; 536 q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf)); 537 q->msg_len = sldns_buffer_limit(buf); 538 if(!q->msg) { 539 return; /* the error is in the rcode */ 540 } 541 542 /* canonname and results */ 543 q->msg_security = s; 544 libworker_enter_result(q->res, buf, q->w->env->scratch, s); 545 } 546 547 void 548 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 549 char* why_bogus, int was_ratelimited) 550 { 551 struct ctx_query* q = (struct ctx_query*)arg; 552 /* fg query is done; exit comm base */ 553 comm_base_exit(q->w->base); 554 555 libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited); 556 } 557 558 /** setup qinfo and edns */ 559 static int 560 setup_qinfo_edns(struct libworker* w, struct ctx_query* q, 561 struct query_info* qinfo, struct edns_data* edns) 562 { 563 qinfo->qtype = (uint16_t)q->res->qtype; 564 qinfo->qclass = (uint16_t)q->res->qclass; 565 qinfo->local_alias = NULL; 566 qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len); 567 if(!qinfo->qname) { 568 return 0; 569 } 570 qinfo->local_alias = NULL; 571 edns->edns_present = 1; 572 edns->ext_rcode = 0; 573 edns->edns_version = 0; 574 edns->bits = EDNS_DO; 575 edns->opt_list = NULL; 576 if(sldns_buffer_capacity(w->back->udp_buff) < 65535) 577 edns->udp_size = (uint16_t)sldns_buffer_capacity( 578 w->back->udp_buff); 579 else edns->udp_size = 65535; 580 return 1; 581 } 582 583 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q) 584 { 585 struct libworker* w = libworker_setup(ctx, 0, NULL); 586 uint16_t qflags, qid; 587 struct query_info qinfo; 588 struct edns_data edns; 589 if(!w) 590 return UB_INITFAIL; 591 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 592 libworker_delete(w); 593 return UB_SYNTAX; 594 } 595 qid = 0; 596 qflags = BIT_RD; 597 q->w = w; 598 /* see if there is a fixed answer */ 599 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 600 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 601 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 602 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 603 NULL, 0, NULL, 0, NULL)) { 604 regional_free_all(w->env->scratch); 605 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 606 w->back->udp_buff, sec_status_insecure, NULL, 0); 607 libworker_delete(w); 608 free(qinfo.qname); 609 return UB_NOERROR; 610 } 611 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 612 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 613 regional_free_all(w->env->scratch); 614 libworker_fillup_fg(q, LDNS_RCODE_NOERROR, 615 w->back->udp_buff, sec_status_insecure, NULL, 0); 616 libworker_delete(w); 617 free(qinfo.qname); 618 return UB_NOERROR; 619 } 620 /* process new query */ 621 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 622 w->back->udp_buff, qid, libworker_fg_done_cb, q)) { 623 free(qinfo.qname); 624 return UB_NOMEM; 625 } 626 free(qinfo.qname); 627 628 /* wait for reply */ 629 comm_base_dispatch(w->base); 630 631 libworker_delete(w); 632 return UB_NOERROR; 633 } 634 635 void 636 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf, 637 enum sec_status s, char* why_bogus, int was_ratelimited) 638 { 639 struct ctx_query* q = (struct ctx_query*)arg; 640 ub_event_callback_type cb = q->cb_event; 641 void* cb_arg = q->cb_arg; 642 int cancelled = q->cancelled; 643 644 /* delete it now */ 645 struct ub_ctx* ctx = q->w->ctx; 646 lock_basic_lock(&ctx->cfglock); 647 (void)rbtree_delete(&ctx->queries, q->node.key); 648 ctx->num_async--; 649 context_query_delete(q); 650 lock_basic_unlock(&ctx->cfglock); 651 652 if(!cancelled) { 653 /* call callback */ 654 int sec = 0; 655 if(s == sec_status_bogus) 656 sec = 1; 657 else if(s == sec_status_secure) 658 sec = 2; 659 (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL), 660 (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited); 661 } 662 } 663 664 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q, 665 int* async_id) 666 { 667 struct libworker* w = ctx->event_worker; 668 uint16_t qflags, qid; 669 struct query_info qinfo; 670 struct edns_data edns; 671 if(!w) 672 return UB_INITFAIL; 673 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) 674 return UB_SYNTAX; 675 qid = 0; 676 qflags = BIT_RD; 677 q->w = w; 678 /* see if there is a fixed answer */ 679 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 680 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 681 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns, 682 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 683 NULL, 0, NULL, 0, NULL)) { 684 regional_free_all(w->env->scratch); 685 free(qinfo.qname); 686 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 687 w->back->udp_buff, sec_status_insecure, NULL, 0); 688 return UB_NOERROR; 689 } 690 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones, 691 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 692 regional_free_all(w->env->scratch); 693 free(qinfo.qname); 694 libworker_event_done_cb(q, LDNS_RCODE_NOERROR, 695 w->back->udp_buff, sec_status_insecure, NULL, 0); 696 return UB_NOERROR; 697 } 698 /* process new query */ 699 if(async_id) 700 *async_id = q->querynum; 701 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 702 w->back->udp_buff, qid, libworker_event_done_cb, q)) { 703 free(qinfo.qname); 704 return UB_NOMEM; 705 } 706 free(qinfo.qname); 707 return UB_NOERROR; 708 } 709 710 /** add result to the bg worker result queue */ 711 static void 712 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt, 713 int err, char* reason, int was_ratelimited) 714 { 715 uint8_t* msg = NULL; 716 uint32_t len = 0; 717 718 if(w->want_quit) { 719 context_query_delete(q); 720 return; 721 } 722 /* serialize and delete unneeded q */ 723 if(w->is_bg_thread) { 724 lock_basic_lock(&w->ctx->cfglock); 725 if(reason) 726 q->res->why_bogus = strdup(reason); 727 q->res->was_ratelimited = was_ratelimited; 728 if(pkt) { 729 q->msg_len = sldns_buffer_remaining(pkt); 730 q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len); 731 if(!q->msg) { 732 msg = context_serialize_answer(q, UB_NOMEM, NULL, &len); 733 } else { 734 msg = context_serialize_answer(q, err, NULL, &len); 735 } 736 } else { 737 msg = context_serialize_answer(q, err, NULL, &len); 738 } 739 lock_basic_unlock(&w->ctx->cfglock); 740 } else { 741 if(reason) 742 q->res->why_bogus = strdup(reason); 743 q->res->was_ratelimited = was_ratelimited; 744 msg = context_serialize_answer(q, err, pkt, &len); 745 (void)rbtree_delete(&w->ctx->queries, q->node.key); 746 w->ctx->num_async--; 747 context_query_delete(q); 748 } 749 750 if(!msg) { 751 log_err("out of memory for async answer"); 752 return; 753 } 754 if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) { 755 log_err("out of memory for async answer"); 756 return; 757 } 758 } 759 760 void 761 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s, 762 char* why_bogus, int was_ratelimited) 763 { 764 struct ctx_query* q = (struct ctx_query*)arg; 765 766 if(q->cancelled || q->w->back->want_to_quit) { 767 if(q->w->is_bg_thread) { 768 /* delete it now */ 769 struct ub_ctx* ctx = q->w->ctx; 770 lock_basic_lock(&ctx->cfglock); 771 (void)rbtree_delete(&ctx->queries, q->node.key); 772 ctx->num_async--; 773 context_query_delete(q); 774 lock_basic_unlock(&ctx->cfglock); 775 } 776 /* cancelled, do not give answer */ 777 return; 778 } 779 q->msg_security = s; 780 if(!buf) { 781 buf = q->w->env->scratch_buffer; 782 } 783 if(rcode != 0) { 784 error_encode(buf, rcode, NULL, 0, BIT_RD, NULL); 785 } 786 add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited); 787 } 788 789 790 /** handle new query command for bg worker */ 791 static void 792 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len) 793 { 794 uint16_t qflags, qid; 795 struct query_info qinfo; 796 struct edns_data edns; 797 struct ctx_query* q; 798 if(w->is_bg_thread) { 799 lock_basic_lock(&w->ctx->cfglock); 800 q = context_lookup_new_query(w->ctx, buf, len); 801 lock_basic_unlock(&w->ctx->cfglock); 802 } else { 803 q = context_deserialize_new_query(w->ctx, buf, len); 804 } 805 free(buf); 806 if(!q) { 807 log_err("failed to deserialize newq"); 808 return; 809 } 810 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) { 811 add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0); 812 return; 813 } 814 qid = 0; 815 qflags = BIT_RD; 816 /* see if there is a fixed answer */ 817 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid); 818 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags); 819 if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns, 820 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0, 821 NULL, 0, NULL, 0, NULL)) { 822 regional_free_all(w->env->scratch); 823 q->msg_security = sec_status_insecure; 824 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 825 free(qinfo.qname); 826 return; 827 } 828 if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones, 829 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) { 830 regional_free_all(w->env->scratch); 831 q->msg_security = sec_status_insecure; 832 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0); 833 free(qinfo.qname); 834 return; 835 } 836 q->w = w; 837 /* process new query */ 838 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns, 839 w->back->udp_buff, qid, libworker_bg_done_cb, q)) { 840 add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0); 841 } 842 free(qinfo.qname); 843 } 844 845 void libworker_alloc_cleanup(void* arg) 846 { 847 struct libworker* w = (struct libworker*)arg; 848 slabhash_clear(&w->env->rrset_cache->table); 849 slabhash_clear(w->env->msg_cache); 850 } 851 852 struct outbound_entry* libworker_send_query(struct query_info* qinfo, 853 uint16_t flags, int dnssec, int want_dnssec, int nocaps, 854 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone, 855 size_t zonelen, int ssl_upstream, char* tls_auth_name, 856 struct module_qstate* q) 857 { 858 struct libworker* w = (struct libworker*)q->env->worker; 859 struct outbound_entry* e = (struct outbound_entry*)regional_alloc( 860 q->region, sizeof(*e)); 861 if(!e) 862 return NULL; 863 e->qstate = q; 864 e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec, 865 want_dnssec, nocaps, q->env->cfg->tcp_upstream, ssl_upstream, 866 tls_auth_name, addr, addrlen, zone, zonelen, q, 867 libworker_handle_service_reply, e, w->back->udp_buff, q->env); 868 if(!e->qsent) { 869 return NULL; 870 } 871 return e; 872 } 873 874 int 875 libworker_handle_reply(struct comm_point* c, void* arg, int error, 876 struct comm_reply* reply_info) 877 { 878 struct module_qstate* q = (struct module_qstate*)arg; 879 struct libworker* lw = (struct libworker*)q->env->worker; 880 struct outbound_entry e; 881 e.qstate = q; 882 e.qsent = NULL; 883 884 if(error != 0) { 885 mesh_report_reply(lw->env->mesh, &e, reply_info, error); 886 return 0; 887 } 888 /* sanity check. */ 889 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) 890 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != 891 LDNS_PACKET_QUERY 892 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { 893 /* error becomes timeout for the module as if this reply 894 * never arrived. */ 895 mesh_report_reply(lw->env->mesh, &e, reply_info, 896 NETEVENT_TIMEOUT); 897 return 0; 898 } 899 mesh_report_reply(lw->env->mesh, &e, reply_info, NETEVENT_NOERROR); 900 return 0; 901 } 902 903 int 904 libworker_handle_service_reply(struct comm_point* c, void* arg, int error, 905 struct comm_reply* reply_info) 906 { 907 struct outbound_entry* e = (struct outbound_entry*)arg; 908 struct libworker* lw = (struct libworker*)e->qstate->env->worker; 909 910 if(error != 0) { 911 mesh_report_reply(lw->env->mesh, e, reply_info, error); 912 return 0; 913 } 914 /* sanity check. */ 915 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer)) 916 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) != 917 LDNS_PACKET_QUERY 918 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) { 919 /* error becomes timeout for the module as if this reply 920 * never arrived. */ 921 mesh_report_reply(lw->env->mesh, e, reply_info, 922 NETEVENT_TIMEOUT); 923 return 0; 924 } 925 mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR); 926 return 0; 927 } 928 929 /* --- fake callbacks for fptr_wlist to work --- */ 930 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube), 931 uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len), 932 int ATTR_UNUSED(error), void* ATTR_UNUSED(arg)) 933 { 934 log_assert(0); 935 } 936 937 int worker_handle_request(struct comm_point* ATTR_UNUSED(c), 938 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 939 struct comm_reply* ATTR_UNUSED(repinfo)) 940 { 941 log_assert(0); 942 return 0; 943 } 944 945 int worker_handle_reply(struct comm_point* ATTR_UNUSED(c), 946 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 947 struct comm_reply* ATTR_UNUSED(reply_info)) 948 { 949 log_assert(0); 950 return 0; 951 } 952 953 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c), 954 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 955 struct comm_reply* ATTR_UNUSED(reply_info)) 956 { 957 log_assert(0); 958 return 0; 959 } 960 961 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c), 962 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 963 struct comm_reply* ATTR_UNUSED(repinfo)) 964 { 965 log_assert(0); 966 return 0; 967 } 968 969 int remote_control_callback(struct comm_point* ATTR_UNUSED(c), 970 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error), 971 struct comm_reply* ATTR_UNUSED(repinfo)) 972 { 973 log_assert(0); 974 return 0; 975 } 976 977 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg)) 978 { 979 log_assert(0); 980 } 981 982 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo), 983 uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec), 984 int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps), 985 struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen), 986 uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), 987 int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name), 988 struct module_qstate* ATTR_UNUSED(q)) 989 { 990 log_assert(0); 991 return 0; 992 } 993 994 void 995 worker_alloc_cleanup(void* ATTR_UNUSED(arg)) 996 { 997 log_assert(0); 998 } 999 1000 void worker_stat_timer_cb(void* ATTR_UNUSED(arg)) 1001 { 1002 log_assert(0); 1003 } 1004 1005 void worker_probe_timer_cb(void* ATTR_UNUSED(arg)) 1006 { 1007 log_assert(0); 1008 } 1009 1010 void worker_start_accept(void* ATTR_UNUSED(arg)) 1011 { 1012 log_assert(0); 1013 } 1014 1015 void worker_stop_accept(void* ATTR_UNUSED(arg)) 1016 { 1017 log_assert(0); 1018 } 1019 1020 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2)) 1021 { 1022 log_assert(0); 1023 return 0; 1024 } 1025 1026 int 1027 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1028 { 1029 log_assert(0); 1030 return 0; 1031 } 1032 1033 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b)) 1034 { 1035 log_assert(0); 1036 return 0; 1037 } 1038 1039 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg)) 1040 { 1041 log_assert(0); 1042 } 1043 1044 #ifdef UB_ON_WINDOWS 1045 void 1046 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void* 1047 ATTR_UNUSED(arg)) { 1048 log_assert(0); 1049 } 1050 1051 void 1052 wsvc_cron_cb(void* ATTR_UNUSED(arg)) 1053 { 1054 log_assert(0); 1055 } 1056 #endif /* UB_ON_WINDOWS */ 1057