1 /* 2 * unbound.c - unbound validating resolver public API implementation 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains functions to resolve DNS queries and 40 * validate the answers. Synchonously and asynchronously. 41 * 42 */ 43 44 /* include the public api first, it should be able to stand alone */ 45 #include "libunbound/unbound.h" 46 #include "libunbound/unbound-event.h" 47 #include "config.h" 48 #include <ctype.h> 49 #include "libunbound/context.h" 50 #include "libunbound/libworker.h" 51 #include "util/locks.h" 52 #include "util/config_file.h" 53 #include "util/alloc.h" 54 #include "util/module.h" 55 #include "util/regional.h" 56 #include "util/log.h" 57 #include "util/random.h" 58 #include "util/net_help.h" 59 #include "util/tube.h" 60 #include "services/modstack.h" 61 #include "services/localzone.h" 62 #include "services/cache/infra.h" 63 #include "services/cache/rrset.h" 64 #include "ldns/sbuffer.h" 65 #ifdef HAVE_PTHREAD 66 #include <signal.h> 67 #endif 68 69 #if defined(UB_ON_WINDOWS) && defined (HAVE_WINDOWS_H) 70 #include <windows.h> 71 #include <iphlpapi.h> 72 #endif /* UB_ON_WINDOWS */ 73 74 /** create context functionality, but no pipes */ 75 static struct ub_ctx* ub_ctx_create_nopipe(void) 76 { 77 struct ub_ctx* ctx; 78 unsigned int seed; 79 #ifdef USE_WINSOCK 80 int r; 81 WSADATA wsa_data; 82 #endif 83 84 log_init(NULL, 0, NULL); /* logs to stderr */ 85 log_ident_set("libunbound"); 86 #ifdef USE_WINSOCK 87 if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) { 88 log_err("could not init winsock. WSAStartup: %s", 89 wsa_strerror(r)); 90 return NULL; 91 } 92 #endif 93 verbosity = 0; /* errors only */ 94 checklock_start(); 95 ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx)); 96 if(!ctx) { 97 errno = ENOMEM; 98 return NULL; 99 } 100 alloc_init(&ctx->superalloc, NULL, 0); 101 seed = (unsigned int)time(NULL) ^ (unsigned int)getpid(); 102 if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) { 103 seed = 0; 104 ub_randfree(ctx->seed_rnd); 105 free(ctx); 106 errno = ENOMEM; 107 return NULL; 108 } 109 seed = 0; 110 lock_basic_init(&ctx->qqpipe_lock); 111 lock_basic_init(&ctx->rrpipe_lock); 112 lock_basic_init(&ctx->cfglock); 113 ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env)); 114 if(!ctx->env) { 115 ub_randfree(ctx->seed_rnd); 116 free(ctx); 117 errno = ENOMEM; 118 return NULL; 119 } 120 ctx->env->cfg = config_create_forlib(); 121 if(!ctx->env->cfg) { 122 free(ctx->env); 123 ub_randfree(ctx->seed_rnd); 124 free(ctx); 125 errno = ENOMEM; 126 return NULL; 127 } 128 ctx->env->alloc = &ctx->superalloc; 129 ctx->env->worker = NULL; 130 ctx->env->need_to_validate = 0; 131 modstack_init(&ctx->mods); 132 rbtree_init(&ctx->queries, &context_query_cmp); 133 return ctx; 134 } 135 136 struct ub_ctx* 137 ub_ctx_create(void) 138 { 139 struct ub_ctx* ctx = ub_ctx_create_nopipe(); 140 if(!ctx) 141 return NULL; 142 if((ctx->qq_pipe = tube_create()) == NULL) { 143 int e = errno; 144 ub_randfree(ctx->seed_rnd); 145 config_delete(ctx->env->cfg); 146 modstack_desetup(&ctx->mods, ctx->env); 147 free(ctx->env); 148 free(ctx); 149 errno = e; 150 return NULL; 151 } 152 if((ctx->rr_pipe = tube_create()) == NULL) { 153 int e = errno; 154 tube_delete(ctx->qq_pipe); 155 ub_randfree(ctx->seed_rnd); 156 config_delete(ctx->env->cfg); 157 modstack_desetup(&ctx->mods, ctx->env); 158 free(ctx->env); 159 free(ctx); 160 errno = e; 161 return NULL; 162 } 163 return ctx; 164 } 165 166 struct ub_ctx* 167 ub_ctx_create_event(struct event_base* eb) 168 { 169 struct ub_ctx* ctx = ub_ctx_create_nopipe(); 170 if(!ctx) 171 return NULL; 172 /* no pipes, but we have the locks to make sure everything works */ 173 ctx->created_bg = 0; 174 ctx->dothread = 1; /* the processing is in the same process, 175 makes ub_cancel and ub_ctx_delete do the right thing */ 176 ctx->event_base = eb; 177 return ctx; 178 } 179 180 /** delete q */ 181 static void 182 delq(rbnode_t* n, void* ATTR_UNUSED(arg)) 183 { 184 struct ctx_query* q = (struct ctx_query*)n; 185 context_query_delete(q); 186 } 187 188 /** stop the bg thread */ 189 static void ub_stop_bg(struct ub_ctx* ctx) 190 { 191 /* stop the bg thread */ 192 lock_basic_lock(&ctx->cfglock); 193 if(ctx->created_bg) { 194 uint8_t* msg; 195 uint32_t len; 196 uint32_t cmd = UB_LIBCMD_QUIT; 197 lock_basic_unlock(&ctx->cfglock); 198 lock_basic_lock(&ctx->qqpipe_lock); 199 (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd, 200 (uint32_t)sizeof(cmd), 0); 201 lock_basic_unlock(&ctx->qqpipe_lock); 202 lock_basic_lock(&ctx->rrpipe_lock); 203 while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) { 204 /* discard all results except a quit confirm */ 205 if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) { 206 free(msg); 207 break; 208 } 209 free(msg); 210 } 211 lock_basic_unlock(&ctx->rrpipe_lock); 212 213 /* if bg worker is a thread, wait for it to exit, so that all 214 * resources are really gone. */ 215 lock_basic_lock(&ctx->cfglock); 216 if(ctx->dothread) { 217 lock_basic_unlock(&ctx->cfglock); 218 ub_thread_join(ctx->bg_tid); 219 } else { 220 lock_basic_unlock(&ctx->cfglock); 221 } 222 } 223 else { 224 lock_basic_unlock(&ctx->cfglock); 225 } 226 } 227 228 void 229 ub_ctx_delete(struct ub_ctx* ctx) 230 { 231 struct alloc_cache* a, *na; 232 int do_stop = 1; 233 if(!ctx) return; 234 235 /* see if bg thread is created and if threads have been killed */ 236 /* no locks, because those may be held by terminated threads */ 237 /* for processes the read pipe is closed and we see that on read */ 238 #ifdef HAVE_PTHREAD 239 if(ctx->created_bg && ctx->dothread) { 240 if(pthread_kill(ctx->bg_tid, 0) == ESRCH) { 241 /* thread has been killed */ 242 do_stop = 0; 243 } 244 } 245 #endif /* HAVE_PTHREAD */ 246 if(do_stop) 247 ub_stop_bg(ctx); 248 libworker_delete_event(ctx->event_worker); 249 250 modstack_desetup(&ctx->mods, ctx->env); 251 a = ctx->alloc_list; 252 while(a) { 253 na = a->super; 254 a->super = &ctx->superalloc; 255 alloc_clear(a); 256 free(a); 257 a = na; 258 } 259 local_zones_delete(ctx->local_zones); 260 lock_basic_destroy(&ctx->qqpipe_lock); 261 lock_basic_destroy(&ctx->rrpipe_lock); 262 lock_basic_destroy(&ctx->cfglock); 263 tube_delete(ctx->qq_pipe); 264 tube_delete(ctx->rr_pipe); 265 if(ctx->env) { 266 slabhash_delete(ctx->env->msg_cache); 267 rrset_cache_delete(ctx->env->rrset_cache); 268 infra_delete(ctx->env->infra_cache); 269 config_delete(ctx->env->cfg); 270 free(ctx->env); 271 } 272 ub_randfree(ctx->seed_rnd); 273 alloc_clear(&ctx->superalloc); 274 traverse_postorder(&ctx->queries, delq, NULL); 275 free(ctx); 276 #ifdef USE_WINSOCK 277 WSACleanup(); 278 #endif 279 } 280 281 int 282 ub_ctx_set_option(struct ub_ctx* ctx, const char* opt, const char* val) 283 { 284 lock_basic_lock(&ctx->cfglock); 285 if(ctx->finalized) { 286 lock_basic_unlock(&ctx->cfglock); 287 return UB_AFTERFINAL; 288 } 289 if(!config_set_option(ctx->env->cfg, opt, val)) { 290 lock_basic_unlock(&ctx->cfglock); 291 return UB_SYNTAX; 292 } 293 lock_basic_unlock(&ctx->cfglock); 294 return UB_NOERROR; 295 } 296 297 int 298 ub_ctx_get_option(struct ub_ctx* ctx, const char* opt, char** str) 299 { 300 int r; 301 lock_basic_lock(&ctx->cfglock); 302 r = config_get_option_collate(ctx->env->cfg, opt, str); 303 lock_basic_unlock(&ctx->cfglock); 304 if(r == 0) r = UB_NOERROR; 305 else if(r == 1) r = UB_SYNTAX; 306 else if(r == 2) r = UB_NOMEM; 307 return r; 308 } 309 310 int 311 ub_ctx_config(struct ub_ctx* ctx, const char* fname) 312 { 313 lock_basic_lock(&ctx->cfglock); 314 if(ctx->finalized) { 315 lock_basic_unlock(&ctx->cfglock); 316 return UB_AFTERFINAL; 317 } 318 if(!config_read(ctx->env->cfg, fname, NULL)) { 319 lock_basic_unlock(&ctx->cfglock); 320 return UB_SYNTAX; 321 } 322 lock_basic_unlock(&ctx->cfglock); 323 return UB_NOERROR; 324 } 325 326 int 327 ub_ctx_add_ta(struct ub_ctx* ctx, const char* ta) 328 { 329 char* dup = strdup(ta); 330 if(!dup) return UB_NOMEM; 331 lock_basic_lock(&ctx->cfglock); 332 if(ctx->finalized) { 333 lock_basic_unlock(&ctx->cfglock); 334 free(dup); 335 return UB_AFTERFINAL; 336 } 337 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_list, dup)) { 338 lock_basic_unlock(&ctx->cfglock); 339 free(dup); 340 return UB_NOMEM; 341 } 342 lock_basic_unlock(&ctx->cfglock); 343 return UB_NOERROR; 344 } 345 346 int 347 ub_ctx_add_ta_file(struct ub_ctx* ctx, const char* fname) 348 { 349 char* dup = strdup(fname); 350 if(!dup) return UB_NOMEM; 351 lock_basic_lock(&ctx->cfglock); 352 if(ctx->finalized) { 353 lock_basic_unlock(&ctx->cfglock); 354 free(dup); 355 return UB_AFTERFINAL; 356 } 357 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_file_list, dup)) { 358 lock_basic_unlock(&ctx->cfglock); 359 free(dup); 360 return UB_NOMEM; 361 } 362 lock_basic_unlock(&ctx->cfglock); 363 return UB_NOERROR; 364 } 365 366 int 367 ub_ctx_trustedkeys(struct ub_ctx* ctx, const char* fname) 368 { 369 char* dup = strdup(fname); 370 if(!dup) return UB_NOMEM; 371 lock_basic_lock(&ctx->cfglock); 372 if(ctx->finalized) { 373 lock_basic_unlock(&ctx->cfglock); 374 free(dup); 375 return UB_AFTERFINAL; 376 } 377 if(!cfg_strlist_insert(&ctx->env->cfg->trusted_keys_file_list, dup)) { 378 lock_basic_unlock(&ctx->cfglock); 379 free(dup); 380 return UB_NOMEM; 381 } 382 lock_basic_unlock(&ctx->cfglock); 383 return UB_NOERROR; 384 } 385 386 int 387 ub_ctx_debuglevel(struct ub_ctx* ctx, int d) 388 { 389 lock_basic_lock(&ctx->cfglock); 390 verbosity = d; 391 ctx->env->cfg->verbosity = d; 392 lock_basic_unlock(&ctx->cfglock); 393 return UB_NOERROR; 394 } 395 396 int ub_ctx_debugout(struct ub_ctx* ctx, void* out) 397 { 398 lock_basic_lock(&ctx->cfglock); 399 log_file((FILE*)out); 400 ctx->logfile_override = 1; 401 ctx->log_out = out; 402 lock_basic_unlock(&ctx->cfglock); 403 return UB_NOERROR; 404 } 405 406 int 407 ub_ctx_async(struct ub_ctx* ctx, int dothread) 408 { 409 #ifdef THREADS_DISABLED 410 if(dothread) /* cannot do threading */ 411 return UB_NOERROR; 412 #endif 413 lock_basic_lock(&ctx->cfglock); 414 if(ctx->finalized) { 415 lock_basic_unlock(&ctx->cfglock); 416 return UB_AFTERFINAL; 417 } 418 ctx->dothread = dothread; 419 lock_basic_unlock(&ctx->cfglock); 420 return UB_NOERROR; 421 } 422 423 int 424 ub_poll(struct ub_ctx* ctx) 425 { 426 /* no need to hold lock while testing for readability. */ 427 return tube_poll(ctx->rr_pipe); 428 } 429 430 int 431 ub_fd(struct ub_ctx* ctx) 432 { 433 return tube_read_fd(ctx->rr_pipe); 434 } 435 436 /** process answer from bg worker */ 437 static int 438 process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len, 439 ub_callback_t* cb, void** cbarg, int* err, 440 struct ub_result** res) 441 { 442 struct ctx_query* q; 443 if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) { 444 log_err("error: bad data from bg worker %d", 445 (int)context_serial_getcmd(msg, len)); 446 return 0; 447 } 448 449 lock_basic_lock(&ctx->cfglock); 450 q = context_deserialize_answer(ctx, msg, len, err); 451 if(!q) { 452 lock_basic_unlock(&ctx->cfglock); 453 /* probably simply the lookup that failed, i.e. 454 * response returned before cancel was sent out, so noerror */ 455 return 1; 456 } 457 log_assert(q->async); 458 459 /* grab cb while locked */ 460 if(q->cancelled) { 461 *cb = NULL; 462 *cbarg = NULL; 463 } else { 464 *cb = q->cb; 465 *cbarg = q->cb_arg; 466 } 467 if(*err) { 468 *res = NULL; 469 ub_resolve_free(q->res); 470 } else { 471 /* parse the message, extract rcode, fill result */ 472 sldns_buffer* buf = sldns_buffer_new(q->msg_len); 473 struct regional* region = regional_create(); 474 *res = q->res; 475 (*res)->rcode = LDNS_RCODE_SERVFAIL; 476 if(region && buf) { 477 sldns_buffer_clear(buf); 478 sldns_buffer_write(buf, q->msg, q->msg_len); 479 sldns_buffer_flip(buf); 480 libworker_enter_result(*res, buf, region, 481 q->msg_security); 482 } 483 (*res)->answer_packet = q->msg; 484 (*res)->answer_len = (int)q->msg_len; 485 q->msg = NULL; 486 sldns_buffer_free(buf); 487 regional_destroy(region); 488 } 489 q->res = NULL; 490 /* delete the q from list */ 491 (void)rbtree_delete(&ctx->queries, q->node.key); 492 ctx->num_async--; 493 context_query_delete(q); 494 lock_basic_unlock(&ctx->cfglock); 495 496 if(*cb) return 2; 497 ub_resolve_free(*res); 498 return 1; 499 } 500 501 /** process answer from bg worker */ 502 static int 503 process_answer(struct ub_ctx* ctx, uint8_t* msg, uint32_t len) 504 { 505 int err; 506 ub_callback_t cb; 507 void* cbarg; 508 struct ub_result* res; 509 int r; 510 511 r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res); 512 513 /* no locks held while calling callback, so that library is 514 * re-entrant. */ 515 if(r == 2) 516 (*cb)(cbarg, err, res); 517 518 return r; 519 } 520 521 int 522 ub_process(struct ub_ctx* ctx) 523 { 524 int r; 525 uint8_t* msg; 526 uint32_t len; 527 while(1) { 528 msg = NULL; 529 lock_basic_lock(&ctx->rrpipe_lock); 530 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1); 531 lock_basic_unlock(&ctx->rrpipe_lock); 532 if(r == 0) 533 return UB_PIPE; 534 else if(r == -1) 535 break; 536 if(!process_answer(ctx, msg, len)) { 537 free(msg); 538 return UB_PIPE; 539 } 540 free(msg); 541 } 542 return UB_NOERROR; 543 } 544 545 int 546 ub_wait(struct ub_ctx* ctx) 547 { 548 int err; 549 ub_callback_t cb; 550 void* cbarg; 551 struct ub_result* res; 552 int r; 553 uint8_t* msg; 554 uint32_t len; 555 /* this is basically the same loop as _process(), but with changes. 556 * holds the rrpipe lock and waits with tube_wait */ 557 while(1) { 558 lock_basic_lock(&ctx->rrpipe_lock); 559 lock_basic_lock(&ctx->cfglock); 560 if(ctx->num_async == 0) { 561 lock_basic_unlock(&ctx->cfglock); 562 lock_basic_unlock(&ctx->rrpipe_lock); 563 break; 564 } 565 lock_basic_unlock(&ctx->cfglock); 566 567 /* keep rrpipe locked, while 568 * o waiting for pipe readable 569 * o parsing message 570 * o possibly decrementing num_async 571 * do callback without lock 572 */ 573 r = tube_wait(ctx->rr_pipe); 574 if(r) { 575 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1); 576 if(r == 0) { 577 lock_basic_unlock(&ctx->rrpipe_lock); 578 return UB_PIPE; 579 } 580 if(r == -1) { 581 lock_basic_unlock(&ctx->rrpipe_lock); 582 continue; 583 } 584 r = process_answer_detail(ctx, msg, len, 585 &cb, &cbarg, &err, &res); 586 lock_basic_unlock(&ctx->rrpipe_lock); 587 free(msg); 588 if(r == 0) 589 return UB_PIPE; 590 if(r == 2) 591 (*cb)(cbarg, err, res); 592 } else { 593 lock_basic_unlock(&ctx->rrpipe_lock); 594 } 595 } 596 return UB_NOERROR; 597 } 598 599 int 600 ub_resolve(struct ub_ctx* ctx, const char* name, int rrtype, 601 int rrclass, struct ub_result** result) 602 { 603 struct ctx_query* q; 604 int r; 605 *result = NULL; 606 607 lock_basic_lock(&ctx->cfglock); 608 if(!ctx->finalized) { 609 r = context_finalize(ctx); 610 if(r) { 611 lock_basic_unlock(&ctx->cfglock); 612 return r; 613 } 614 } 615 /* create new ctx_query and attempt to add to the list */ 616 lock_basic_unlock(&ctx->cfglock); 617 q = context_new(ctx, name, rrtype, rrclass, NULL, NULL); 618 if(!q) 619 return UB_NOMEM; 620 /* become a resolver thread for a bit */ 621 622 r = libworker_fg(ctx, q); 623 if(r) { 624 lock_basic_lock(&ctx->cfglock); 625 (void)rbtree_delete(&ctx->queries, q->node.key); 626 context_query_delete(q); 627 lock_basic_unlock(&ctx->cfglock); 628 return r; 629 } 630 q->res->answer_packet = q->msg; 631 q->res->answer_len = (int)q->msg_len; 632 q->msg = NULL; 633 *result = q->res; 634 q->res = NULL; 635 636 lock_basic_lock(&ctx->cfglock); 637 (void)rbtree_delete(&ctx->queries, q->node.key); 638 context_query_delete(q); 639 lock_basic_unlock(&ctx->cfglock); 640 return UB_NOERROR; 641 } 642 643 int 644 ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype, 645 int rrclass, void* mydata, ub_event_callback_t callback, int* async_id) 646 { 647 struct ctx_query* q; 648 int r; 649 650 if(async_id) 651 *async_id = 0; 652 lock_basic_lock(&ctx->cfglock); 653 if(!ctx->finalized) { 654 int r = context_finalize(ctx); 655 if(r) { 656 lock_basic_unlock(&ctx->cfglock); 657 return r; 658 } 659 } 660 lock_basic_unlock(&ctx->cfglock); 661 if(!ctx->event_worker) { 662 ctx->event_worker = libworker_create_event(ctx, 663 ctx->event_base); 664 if(!ctx->event_worker) { 665 return UB_INITFAIL; 666 } 667 } 668 669 /* create new ctx_query and attempt to add to the list */ 670 q = context_new(ctx, name, rrtype, rrclass, (ub_callback_t)callback, 671 mydata); 672 if(!q) 673 return UB_NOMEM; 674 675 /* attach to mesh */ 676 if((r=libworker_attach_mesh(ctx, q, async_id)) != 0) 677 return r; 678 return UB_NOERROR; 679 } 680 681 682 int 683 ub_resolve_async(struct ub_ctx* ctx, const char* name, int rrtype, 684 int rrclass, void* mydata, ub_callback_t callback, int* async_id) 685 { 686 struct ctx_query* q; 687 uint8_t* msg = NULL; 688 uint32_t len = 0; 689 690 if(async_id) 691 *async_id = 0; 692 lock_basic_lock(&ctx->cfglock); 693 if(!ctx->finalized) { 694 int r = context_finalize(ctx); 695 if(r) { 696 lock_basic_unlock(&ctx->cfglock); 697 return r; 698 } 699 } 700 if(!ctx->created_bg) { 701 int r; 702 ctx->created_bg = 1; 703 lock_basic_unlock(&ctx->cfglock); 704 r = libworker_bg(ctx); 705 if(r) { 706 lock_basic_lock(&ctx->cfglock); 707 ctx->created_bg = 0; 708 lock_basic_unlock(&ctx->cfglock); 709 return r; 710 } 711 } else { 712 lock_basic_unlock(&ctx->cfglock); 713 } 714 715 /* create new ctx_query and attempt to add to the list */ 716 q = context_new(ctx, name, rrtype, rrclass, callback, mydata); 717 if(!q) 718 return UB_NOMEM; 719 720 /* write over pipe to background worker */ 721 lock_basic_lock(&ctx->cfglock); 722 msg = context_serialize_new_query(q, &len); 723 if(!msg) { 724 (void)rbtree_delete(&ctx->queries, q->node.key); 725 ctx->num_async--; 726 context_query_delete(q); 727 lock_basic_unlock(&ctx->cfglock); 728 return UB_NOMEM; 729 } 730 if(async_id) 731 *async_id = q->querynum; 732 lock_basic_unlock(&ctx->cfglock); 733 734 lock_basic_lock(&ctx->qqpipe_lock); 735 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) { 736 lock_basic_unlock(&ctx->qqpipe_lock); 737 free(msg); 738 return UB_PIPE; 739 } 740 lock_basic_unlock(&ctx->qqpipe_lock); 741 free(msg); 742 return UB_NOERROR; 743 } 744 745 int 746 ub_cancel(struct ub_ctx* ctx, int async_id) 747 { 748 struct ctx_query* q; 749 uint8_t* msg = NULL; 750 uint32_t len = 0; 751 lock_basic_lock(&ctx->cfglock); 752 q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id); 753 if(!q || !q->async) { 754 /* it is not there, so nothing to do */ 755 lock_basic_unlock(&ctx->cfglock); 756 return UB_NOID; 757 } 758 log_assert(q->async); 759 q->cancelled = 1; 760 761 /* delete it */ 762 if(!ctx->dothread) { /* if forked */ 763 (void)rbtree_delete(&ctx->queries, q->node.key); 764 ctx->num_async--; 765 msg = context_serialize_cancel(q, &len); 766 context_query_delete(q); 767 lock_basic_unlock(&ctx->cfglock); 768 if(!msg) { 769 return UB_NOMEM; 770 } 771 /* send cancel to background worker */ 772 lock_basic_lock(&ctx->qqpipe_lock); 773 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) { 774 lock_basic_unlock(&ctx->qqpipe_lock); 775 free(msg); 776 return UB_PIPE; 777 } 778 lock_basic_unlock(&ctx->qqpipe_lock); 779 free(msg); 780 } else { 781 lock_basic_unlock(&ctx->cfglock); 782 } 783 return UB_NOERROR; 784 } 785 786 void 787 ub_resolve_free(struct ub_result* result) 788 { 789 char** p; 790 if(!result) return; 791 free(result->qname); 792 if(result->canonname != result->qname) 793 free(result->canonname); 794 if(result->data) 795 for(p = result->data; *p; p++) 796 free(*p); 797 free(result->data); 798 free(result->len); 799 free(result->answer_packet); 800 free(result->why_bogus); 801 free(result); 802 } 803 804 const char* 805 ub_strerror(int err) 806 { 807 switch(err) { 808 case UB_NOERROR: return "no error"; 809 case UB_SOCKET: return "socket io error"; 810 case UB_NOMEM: return "out of memory"; 811 case UB_SYNTAX: return "syntax error"; 812 case UB_SERVFAIL: return "server failure"; 813 case UB_FORKFAIL: return "could not fork"; 814 case UB_INITFAIL: return "initialization failure"; 815 case UB_AFTERFINAL: return "setting change after finalize"; 816 case UB_PIPE: return "error in pipe communication with async"; 817 case UB_READFILE: return "error reading file"; 818 case UB_NOID: return "error async_id does not exist"; 819 default: return "unknown error"; 820 } 821 } 822 823 int 824 ub_ctx_set_fwd(struct ub_ctx* ctx, const char* addr) 825 { 826 struct sockaddr_storage storage; 827 socklen_t stlen; 828 struct config_stub* s; 829 char* dupl; 830 lock_basic_lock(&ctx->cfglock); 831 if(ctx->finalized) { 832 lock_basic_unlock(&ctx->cfglock); 833 errno=EINVAL; 834 return UB_AFTERFINAL; 835 } 836 if(!addr) { 837 /* disable fwd mode - the root stub should be first. */ 838 if(ctx->env->cfg->forwards && 839 strcmp(ctx->env->cfg->forwards->name, ".") == 0) { 840 s = ctx->env->cfg->forwards; 841 ctx->env->cfg->forwards = s->next; 842 s->next = NULL; 843 config_delstubs(s); 844 } 845 lock_basic_unlock(&ctx->cfglock); 846 return UB_NOERROR; 847 } 848 lock_basic_unlock(&ctx->cfglock); 849 850 /* check syntax for addr */ 851 if(!extstrtoaddr(addr, &storage, &stlen)) { 852 errno=EINVAL; 853 return UB_SYNTAX; 854 } 855 856 /* it parses, add root stub in front of list */ 857 lock_basic_lock(&ctx->cfglock); 858 if(!ctx->env->cfg->forwards || 859 strcmp(ctx->env->cfg->forwards->name, ".") != 0) { 860 s = calloc(1, sizeof(*s)); 861 if(!s) { 862 lock_basic_unlock(&ctx->cfglock); 863 errno=ENOMEM; 864 return UB_NOMEM; 865 } 866 s->name = strdup("."); 867 if(!s->name) { 868 free(s); 869 lock_basic_unlock(&ctx->cfglock); 870 errno=ENOMEM; 871 return UB_NOMEM; 872 } 873 s->next = ctx->env->cfg->forwards; 874 ctx->env->cfg->forwards = s; 875 } else { 876 log_assert(ctx->env->cfg->forwards); 877 s = ctx->env->cfg->forwards; 878 } 879 dupl = strdup(addr); 880 if(!dupl) { 881 lock_basic_unlock(&ctx->cfglock); 882 errno=ENOMEM; 883 return UB_NOMEM; 884 } 885 if(!cfg_strlist_insert(&s->addrs, dupl)) { 886 free(dupl); 887 lock_basic_unlock(&ctx->cfglock); 888 errno=ENOMEM; 889 return UB_NOMEM; 890 } 891 lock_basic_unlock(&ctx->cfglock); 892 return UB_NOERROR; 893 } 894 895 int 896 ub_ctx_resolvconf(struct ub_ctx* ctx, const char* fname) 897 { 898 FILE* in; 899 int numserv = 0; 900 char buf[1024]; 901 char* parse, *addr; 902 int r; 903 904 if(fname == NULL) { 905 #if !defined(UB_ON_WINDOWS) || !defined(HAVE_WINDOWS_H) 906 fname = "/etc/resolv.conf"; 907 #else 908 FIXED_INFO *info; 909 ULONG buflen = sizeof(*info); 910 IP_ADDR_STRING *ptr; 911 912 info = (FIXED_INFO *) malloc(sizeof (FIXED_INFO)); 913 if (info == NULL) 914 return UB_READFILE; 915 916 if (GetNetworkParams(info, &buflen) == ERROR_BUFFER_OVERFLOW) { 917 free(info); 918 info = (FIXED_INFO *) malloc(buflen); 919 if (info == NULL) 920 return UB_READFILE; 921 } 922 923 if (GetNetworkParams(info, &buflen) == NO_ERROR) { 924 int retval=0; 925 ptr = &(info->DnsServerList); 926 while (ptr) { 927 numserv++; 928 if((retval=ub_ctx_set_fwd(ctx, 929 ptr->IpAddress.String)!=0)) { 930 free(info); 931 return retval; 932 } 933 ptr = ptr->Next; 934 } 935 free(info); 936 if (numserv==0) 937 return UB_READFILE; 938 return UB_NOERROR; 939 } 940 free(info); 941 return UB_READFILE; 942 #endif /* WINDOWS */ 943 } 944 in = fopen(fname, "r"); 945 if(!in) { 946 /* error in errno! perror(fname) */ 947 return UB_READFILE; 948 } 949 while(fgets(buf, (int)sizeof(buf), in)) { 950 buf[sizeof(buf)-1] = 0; 951 parse=buf; 952 while(*parse == ' ' || *parse == '\t') 953 parse++; 954 if(strncmp(parse, "nameserver", 10) == 0) { 955 numserv++; 956 parse += 10; /* skip 'nameserver' */ 957 /* skip whitespace */ 958 while(*parse == ' ' || *parse == '\t') 959 parse++; 960 addr = parse; 961 /* skip [0-9a-fA-F.:]*, i.e. IP4 and IP6 address */ 962 while(isxdigit(*parse) || *parse=='.' || *parse==':') 963 parse++; 964 /* terminate after the address, remove newline */ 965 *parse = 0; 966 967 if((r = ub_ctx_set_fwd(ctx, addr)) != UB_NOERROR) { 968 fclose(in); 969 return r; 970 } 971 } 972 } 973 fclose(in); 974 if(numserv == 0) { 975 /* from resolv.conf(5) if none given, use localhost */ 976 return ub_ctx_set_fwd(ctx, "127.0.0.1"); 977 } 978 return UB_NOERROR; 979 } 980 981 int 982 ub_ctx_hosts(struct ub_ctx* ctx, const char* fname) 983 { 984 FILE* in; 985 char buf[1024], ldata[1024]; 986 char* parse, *addr, *name, *ins; 987 lock_basic_lock(&ctx->cfglock); 988 if(ctx->finalized) { 989 lock_basic_unlock(&ctx->cfglock); 990 errno=EINVAL; 991 return UB_AFTERFINAL; 992 } 993 lock_basic_unlock(&ctx->cfglock); 994 if(fname == NULL) { 995 #if defined(UB_ON_WINDOWS) && defined(HAVE_WINDOWS_H) 996 /* 997 * If this is Windows NT/XP/2K it's in 998 * %WINDIR%\system32\drivers\etc\hosts. 999 * If this is Windows 95/98/Me it's in %WINDIR%\hosts. 1000 */ 1001 name = getenv("WINDIR"); 1002 if (name != NULL) { 1003 int retval=0; 1004 snprintf(buf, sizeof(buf), "%s%s", name, 1005 "\\system32\\drivers\\etc\\hosts"); 1006 if((retval=ub_ctx_hosts(ctx, buf)) !=0 ) { 1007 snprintf(buf, sizeof(buf), "%s%s", name, 1008 "\\hosts"); 1009 retval=ub_ctx_hosts(ctx, buf); 1010 } 1011 free(name); 1012 return retval; 1013 } 1014 return UB_READFILE; 1015 #else 1016 fname = "/etc/hosts"; 1017 #endif /* WIN32 */ 1018 } 1019 in = fopen(fname, "r"); 1020 if(!in) { 1021 /* error in errno! perror(fname) */ 1022 return UB_READFILE; 1023 } 1024 while(fgets(buf, (int)sizeof(buf), in)) { 1025 buf[sizeof(buf)-1] = 0; 1026 parse=buf; 1027 while(*parse == ' ' || *parse == '\t') 1028 parse++; 1029 if(*parse == '#') 1030 continue; /* skip comment */ 1031 /* format: <addr> spaces <name> spaces <name> ... */ 1032 addr = parse; 1033 /* skip addr */ 1034 while(isxdigit(*parse) || *parse == '.' || *parse == ':') 1035 parse++; 1036 if(*parse == '\n' || *parse == 0) 1037 continue; 1038 if(*parse == '%') 1039 continue; /* ignore macOSX fe80::1%lo0 localhost */ 1040 if(*parse != ' ' && *parse != '\t') { 1041 /* must have whitespace after address */ 1042 fclose(in); 1043 errno=EINVAL; 1044 return UB_SYNTAX; 1045 } 1046 *parse++ = 0; /* end delimiter for addr ... */ 1047 /* go to names and add them */ 1048 while(*parse) { 1049 while(*parse == ' ' || *parse == '\t' || *parse=='\n') 1050 parse++; 1051 if(*parse == 0 || *parse == '#') 1052 break; 1053 /* skip name, allows (too) many printable characters */ 1054 name = parse; 1055 while('!' <= *parse && *parse <= '~') 1056 parse++; 1057 if(*parse) 1058 *parse++ = 0; /* end delimiter for name */ 1059 snprintf(ldata, sizeof(ldata), "%s %s %s", 1060 name, str_is_ip6(addr)?"AAAA":"A", addr); 1061 ins = strdup(ldata); 1062 if(!ins) { 1063 /* out of memory */ 1064 fclose(in); 1065 errno=ENOMEM; 1066 return UB_NOMEM; 1067 } 1068 lock_basic_lock(&ctx->cfglock); 1069 if(!cfg_strlist_insert(&ctx->env->cfg->local_data, 1070 ins)) { 1071 lock_basic_unlock(&ctx->cfglock); 1072 fclose(in); 1073 free(ins); 1074 errno=ENOMEM; 1075 return UB_NOMEM; 1076 } 1077 lock_basic_unlock(&ctx->cfglock); 1078 } 1079 } 1080 fclose(in); 1081 return UB_NOERROR; 1082 } 1083 1084 /** finalize the context, if not already finalized */ 1085 static int ub_ctx_finalize(struct ub_ctx* ctx) 1086 { 1087 int res = 0; 1088 lock_basic_lock(&ctx->cfglock); 1089 if (!ctx->finalized) { 1090 res = context_finalize(ctx); 1091 } 1092 lock_basic_unlock(&ctx->cfglock); 1093 return res; 1094 } 1095 1096 /* Print local zones and RR data */ 1097 int ub_ctx_print_local_zones(struct ub_ctx* ctx) 1098 { 1099 int res = ub_ctx_finalize(ctx); 1100 if (res) return res; 1101 1102 local_zones_print(ctx->local_zones); 1103 1104 return UB_NOERROR; 1105 } 1106 1107 /* Add a new zone */ 1108 int ub_ctx_zone_add(struct ub_ctx* ctx, const char *zone_name, 1109 const char *zone_type) 1110 { 1111 enum localzone_type t; 1112 struct local_zone* z; 1113 uint8_t* nm; 1114 int nmlabs; 1115 size_t nmlen; 1116 1117 int res = ub_ctx_finalize(ctx); 1118 if (res) return res; 1119 1120 if(!local_zone_str2type(zone_type, &t)) { 1121 return UB_SYNTAX; 1122 } 1123 1124 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) { 1125 return UB_SYNTAX; 1126 } 1127 1128 lock_rw_wrlock(&ctx->local_zones->lock); 1129 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs, 1130 LDNS_RR_CLASS_IN))) { 1131 /* already present in tree */ 1132 lock_rw_wrlock(&z->lock); 1133 z->type = t; /* update type anyway */ 1134 lock_rw_unlock(&z->lock); 1135 lock_rw_unlock(&ctx->local_zones->lock); 1136 free(nm); 1137 return UB_NOERROR; 1138 } 1139 if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs, 1140 LDNS_RR_CLASS_IN, t)) { 1141 lock_rw_unlock(&ctx->local_zones->lock); 1142 return UB_NOMEM; 1143 } 1144 lock_rw_unlock(&ctx->local_zones->lock); 1145 return UB_NOERROR; 1146 } 1147 1148 /* Remove zone */ 1149 int ub_ctx_zone_remove(struct ub_ctx* ctx, const char *zone_name) 1150 { 1151 struct local_zone* z; 1152 uint8_t* nm; 1153 int nmlabs; 1154 size_t nmlen; 1155 1156 int res = ub_ctx_finalize(ctx); 1157 if (res) return res; 1158 1159 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) { 1160 return UB_SYNTAX; 1161 } 1162 1163 lock_rw_wrlock(&ctx->local_zones->lock); 1164 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs, 1165 LDNS_RR_CLASS_IN))) { 1166 /* present in tree */ 1167 local_zones_del_zone(ctx->local_zones, z); 1168 } 1169 lock_rw_unlock(&ctx->local_zones->lock); 1170 free(nm); 1171 return UB_NOERROR; 1172 } 1173 1174 /* Add new RR data */ 1175 int ub_ctx_data_add(struct ub_ctx* ctx, const char *data) 1176 { 1177 int res = ub_ctx_finalize(ctx); 1178 if (res) return res; 1179 1180 res = local_zones_add_RR(ctx->local_zones, data); 1181 return (!res) ? UB_NOMEM : UB_NOERROR; 1182 } 1183 1184 /* Remove RR data */ 1185 int ub_ctx_data_remove(struct ub_ctx* ctx, const char *data) 1186 { 1187 uint8_t* nm; 1188 int nmlabs; 1189 size_t nmlen; 1190 int res = ub_ctx_finalize(ctx); 1191 if (res) return res; 1192 1193 if(!parse_dname(data, &nm, &nmlen, &nmlabs)) 1194 return UB_SYNTAX; 1195 1196 local_zones_del_data(ctx->local_zones, nm, nmlen, nmlabs, 1197 LDNS_RR_CLASS_IN); 1198 1199 free(nm); 1200 return UB_NOERROR; 1201 } 1202 1203 const char* ub_version(void) 1204 { 1205 return PACKAGE_VERSION; 1206 } 1207 1208 int 1209 ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base) { 1210 if (!ctx || !ctx->event_base || !base) { 1211 return UB_INITFAIL; 1212 } 1213 if (ctx->event_base == base) { 1214 /* already set */ 1215 return UB_NOERROR; 1216 } 1217 1218 lock_basic_lock(&ctx->cfglock); 1219 /* destroy the current worker - safe to pass in NULL */ 1220 libworker_delete_event(ctx->event_worker); 1221 ctx->event_worker = NULL; 1222 ctx->event_base = base; 1223 ctx->created_bg = 0; 1224 ctx->dothread = 1; 1225 lock_basic_unlock(&ctx->cfglock); 1226 return UB_NOERROR; 1227 } 1228