1 /* 2 * unbound.c - unbound validating resolver public API implementation 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains functions to resolve DNS queries and 40 * validate the answers. Synchronously and asynchronously. 41 * 42 */ 43 44 /* include the public api first, it should be able to stand alone */ 45 #include "libunbound/unbound.h" 46 #include "libunbound/unbound-event.h" 47 #include "config.h" 48 #include <ctype.h> 49 #include "libunbound/context.h" 50 #include "libunbound/libworker.h" 51 #include "util/locks.h" 52 #include "util/config_file.h" 53 #include "util/alloc.h" 54 #include "util/module.h" 55 #include "util/regional.h" 56 #include "util/log.h" 57 #include "util/random.h" 58 #include "util/net_help.h" 59 #include "util/tube.h" 60 #include "util/ub_event.h" 61 #include "util/edns.h" 62 #include "services/modstack.h" 63 #include "services/localzone.h" 64 #include "services/cache/infra.h" 65 #include "services/cache/rrset.h" 66 #include "services/authzone.h" 67 #include "services/listen_dnsport.h" 68 #include "sldns/sbuffer.h" 69 #ifdef HAVE_PTHREAD 70 #include <signal.h> 71 #endif 72 #ifdef HAVE_SYS_WAIT_H 73 #include <sys/wait.h> 74 #endif 75 #ifdef HAVE_TIME_H 76 #include <time.h> 77 #endif 78 79 #if defined(UB_ON_WINDOWS) && defined (HAVE_WINDOWS_H) 80 #include <windows.h> 81 #include <iphlpapi.h> 82 #endif /* UB_ON_WINDOWS */ 83 84 /** store that the logfile has a debug override */ 85 int ctx_logfile_overridden = 0; 86 87 /** create context functionality, but no pipes */ 88 static struct ub_ctx* ub_ctx_create_nopipe(void) 89 { 90 struct ub_ctx* ctx; 91 #ifdef USE_WINSOCK 92 int r; 93 WSADATA wsa_data; 94 #endif 95 96 checklock_start(); 97 if(!ctx_logfile_overridden) 98 log_init(NULL, 0, NULL); /* logs to stderr */ 99 log_ident_set("libunbound"); 100 #ifdef USE_WINSOCK 101 if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) { 102 log_err("could not init winsock. WSAStartup: %s", 103 wsa_strerror(r)); 104 return NULL; 105 } 106 #endif 107 verbosity = NO_VERBOSE; /* errors only */ 108 checklock_start(); 109 ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx)); 110 if(!ctx) { 111 errno = ENOMEM; 112 return NULL; 113 } 114 alloc_init(&ctx->superalloc, NULL, 0); 115 if(!(ctx->seed_rnd = ub_initstate(NULL))) { 116 ub_randfree(ctx->seed_rnd); 117 free(ctx); 118 errno = ENOMEM; 119 return NULL; 120 } 121 lock_basic_init(&ctx->qqpipe_lock); 122 lock_basic_init(&ctx->rrpipe_lock); 123 lock_basic_init(&ctx->cfglock); 124 ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env)); 125 if(!ctx->env) { 126 ub_randfree(ctx->seed_rnd); 127 free(ctx); 128 errno = ENOMEM; 129 return NULL; 130 } 131 ctx->env->cfg = config_create_forlib(); 132 if(!ctx->env->cfg) { 133 free(ctx->env); 134 ub_randfree(ctx->seed_rnd); 135 free(ctx); 136 errno = ENOMEM; 137 return NULL; 138 } 139 /* init edns_known_options */ 140 if(!edns_known_options_init(ctx->env)) { 141 config_delete(ctx->env->cfg); 142 free(ctx->env); 143 ub_randfree(ctx->seed_rnd); 144 free(ctx); 145 errno = ENOMEM; 146 return NULL; 147 } 148 ctx->env->auth_zones = auth_zones_create(); 149 if(!ctx->env->auth_zones) { 150 edns_known_options_delete(ctx->env); 151 config_delete(ctx->env->cfg); 152 free(ctx->env); 153 ub_randfree(ctx->seed_rnd); 154 free(ctx); 155 errno = ENOMEM; 156 return NULL; 157 } 158 ctx->env->edns_strings = edns_strings_create(); 159 if(!ctx->env->edns_strings) { 160 auth_zones_delete(ctx->env->auth_zones); 161 edns_known_options_delete(ctx->env); 162 config_delete(ctx->env->cfg); 163 free(ctx->env); 164 ub_randfree(ctx->seed_rnd); 165 free(ctx); 166 errno = ENOMEM; 167 return NULL; 168 } 169 170 ctx->env->alloc = &ctx->superalloc; 171 ctx->env->worker = NULL; 172 ctx->env->need_to_validate = 0; 173 modstack_init(&ctx->mods); 174 rbtree_init(&ctx->queries, &context_query_cmp); 175 return ctx; 176 } 177 178 struct ub_ctx* 179 ub_ctx_create(void) 180 { 181 struct ub_ctx* ctx = ub_ctx_create_nopipe(); 182 if(!ctx) 183 return NULL; 184 if((ctx->qq_pipe = tube_create()) == NULL) { 185 int e = errno; 186 ub_randfree(ctx->seed_rnd); 187 config_delete(ctx->env->cfg); 188 modstack_desetup(&ctx->mods, ctx->env); 189 listen_desetup_locks(); 190 edns_known_options_delete(ctx->env); 191 edns_strings_delete(ctx->env->edns_strings); 192 free(ctx->env); 193 free(ctx); 194 errno = e; 195 return NULL; 196 } 197 if((ctx->rr_pipe = tube_create()) == NULL) { 198 int e = errno; 199 tube_delete(ctx->qq_pipe); 200 ub_randfree(ctx->seed_rnd); 201 config_delete(ctx->env->cfg); 202 modstack_desetup(&ctx->mods, ctx->env); 203 listen_desetup_locks(); 204 edns_known_options_delete(ctx->env); 205 edns_strings_delete(ctx->env->edns_strings); 206 free(ctx->env); 207 free(ctx); 208 errno = e; 209 return NULL; 210 } 211 return ctx; 212 } 213 214 struct ub_ctx* 215 ub_ctx_create_ub_event(struct ub_event_base* ueb) 216 { 217 struct ub_ctx* ctx = ub_ctx_create_nopipe(); 218 if(!ctx) 219 return NULL; 220 /* no pipes, but we have the locks to make sure everything works */ 221 ctx->created_bg = 0; 222 ctx->dothread = 1; /* the processing is in the same process, 223 makes ub_cancel and ub_ctx_delete do the right thing */ 224 ctx->event_base = ueb; 225 return ctx; 226 } 227 228 struct ub_ctx* 229 ub_ctx_create_event(struct event_base* eb) 230 { 231 struct ub_ctx* ctx = ub_ctx_create_nopipe(); 232 if(!ctx) 233 return NULL; 234 /* no pipes, but we have the locks to make sure everything works */ 235 ctx->created_bg = 0; 236 ctx->dothread = 1; /* the processing is in the same process, 237 makes ub_cancel and ub_ctx_delete do the right thing */ 238 ctx->event_base = ub_libevent_event_base(eb); 239 if (!ctx->event_base) { 240 ub_ctx_delete(ctx); 241 return NULL; 242 } 243 ctx->event_base_malloced = 1; 244 return ctx; 245 } 246 247 /** delete q */ 248 static void 249 delq(rbnode_type* n, void* ATTR_UNUSED(arg)) 250 { 251 struct ctx_query* q = (struct ctx_query*)n; 252 context_query_delete(q); 253 } 254 255 /** stop the bg thread */ 256 static void ub_stop_bg(struct ub_ctx* ctx) 257 { 258 /* stop the bg thread */ 259 lock_basic_lock(&ctx->cfglock); 260 if(ctx->created_bg) { 261 uint8_t* msg; 262 uint32_t len; 263 uint32_t cmd = UB_LIBCMD_QUIT; 264 lock_basic_unlock(&ctx->cfglock); 265 lock_basic_lock(&ctx->qqpipe_lock); 266 (void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd, 267 (uint32_t)sizeof(cmd), 0); 268 lock_basic_unlock(&ctx->qqpipe_lock); 269 lock_basic_lock(&ctx->rrpipe_lock); 270 while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) { 271 /* discard all results except a quit confirm */ 272 if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) { 273 free(msg); 274 break; 275 } 276 free(msg); 277 } 278 lock_basic_unlock(&ctx->rrpipe_lock); 279 280 /* if bg worker is a thread, wait for it to exit, so that all 281 * resources are really gone. */ 282 lock_basic_lock(&ctx->cfglock); 283 if(ctx->dothread) { 284 lock_basic_unlock(&ctx->cfglock); 285 ub_thread_join(ctx->bg_tid); 286 } else { 287 lock_basic_unlock(&ctx->cfglock); 288 #ifndef UB_ON_WINDOWS 289 if(waitpid(ctx->bg_pid, NULL, 0) == -1) { 290 if(verbosity > 2) 291 log_err("waitpid: %s", strerror(errno)); 292 } 293 #endif 294 } 295 } 296 else { 297 lock_basic_unlock(&ctx->cfglock); 298 } 299 } 300 301 void 302 ub_ctx_delete(struct ub_ctx* ctx) 303 { 304 struct alloc_cache* a, *na; 305 int do_stop = 1; 306 if(!ctx) return; 307 308 /* if the delete is called but it has forked, and before the fork 309 * the context was finalized, then the bg worker is not stopped 310 * from here. There is one worker, but two contexts that refer to 311 * it and only one should clean up, the one with getpid == pipe_pid.*/ 312 if(ctx->created_bg && ctx->pipe_pid != getpid()) { 313 do_stop = 0; 314 #ifndef USE_WINSOCK 315 /* Stop events from getting deregistered, if the backend is 316 * epoll, the epoll fd is the same as the other process. 317 * That process should deregister them. */ 318 if(ctx->qq_pipe->listen_com) 319 ctx->qq_pipe->listen_com->event_added = 0; 320 if(ctx->qq_pipe->res_com) 321 ctx->qq_pipe->res_com->event_added = 0; 322 if(ctx->rr_pipe->listen_com) 323 ctx->rr_pipe->listen_com->event_added = 0; 324 if(ctx->rr_pipe->res_com) 325 ctx->rr_pipe->res_com->event_added = 0; 326 #endif 327 } 328 /* see if bg thread is created and if threads have been killed */ 329 /* no locks, because those may be held by terminated threads */ 330 /* for processes the read pipe is closed and we see that on read */ 331 #ifdef HAVE_PTHREAD 332 if(ctx->created_bg && ctx->dothread && do_stop) { 333 if(pthread_kill(ctx->bg_tid, 0) == ESRCH) { 334 /* thread has been killed */ 335 do_stop = 0; 336 } 337 } 338 #endif /* HAVE_PTHREAD */ 339 if(do_stop) 340 ub_stop_bg(ctx); 341 if(ctx->created_bg && ctx->pipe_pid != getpid() && ctx->thread_worker) { 342 /* This delete is happening from a different process. Delete 343 * the thread worker from this process memory space. The 344 * thread is not there to do so, so it is freed here. */ 345 struct ub_event_base* evbase = comm_base_internal( 346 ctx->thread_worker->base); 347 libworker_delete_event(ctx->thread_worker); 348 ctx->thread_worker = NULL; 349 #ifdef USE_MINI_EVENT 350 ub_event_base_free(evbase); 351 #else 352 /* cannot event_base_free, because the epoll_fd cleanup 353 * in libevent could stop the original event_base in the 354 * other process from working. */ 355 free(evbase); 356 #endif 357 } 358 libworker_delete_event(ctx->event_worker); 359 360 modstack_desetup(&ctx->mods, ctx->env); 361 a = ctx->alloc_list; 362 while(a) { 363 na = a->super; 364 a->super = &ctx->superalloc; 365 alloc_clear(a); 366 free(a); 367 a = na; 368 } 369 local_zones_delete(ctx->local_zones); 370 lock_basic_destroy(&ctx->qqpipe_lock); 371 lock_basic_destroy(&ctx->rrpipe_lock); 372 lock_basic_destroy(&ctx->cfglock); 373 tube_delete(ctx->qq_pipe); 374 tube_delete(ctx->rr_pipe); 375 if(ctx->env) { 376 slabhash_delete(ctx->env->msg_cache); 377 rrset_cache_delete(ctx->env->rrset_cache); 378 infra_delete(ctx->env->infra_cache); 379 config_delete(ctx->env->cfg); 380 edns_known_options_delete(ctx->env); 381 edns_strings_delete(ctx->env->edns_strings); 382 auth_zones_delete(ctx->env->auth_zones); 383 free(ctx->env); 384 } 385 ub_randfree(ctx->seed_rnd); 386 alloc_clear(&ctx->superalloc); 387 listen_desetup_locks(); 388 traverse_postorder(&ctx->queries, delq, NULL); 389 if(ctx_logfile_overridden) { 390 log_file(NULL); 391 ctx_logfile_overridden = 0; 392 } 393 if(ctx->event_base_malloced) 394 free(ctx->event_base); 395 free(ctx); 396 #ifdef USE_WINSOCK 397 WSACleanup(); 398 #endif 399 } 400 401 int 402 ub_ctx_set_option(struct ub_ctx* ctx, const char* opt, const char* val) 403 { 404 lock_basic_lock(&ctx->cfglock); 405 if(ctx->finalized) { 406 lock_basic_unlock(&ctx->cfglock); 407 return UB_AFTERFINAL; 408 } 409 if(!config_set_option(ctx->env->cfg, opt, val)) { 410 lock_basic_unlock(&ctx->cfglock); 411 return UB_SYNTAX; 412 } 413 lock_basic_unlock(&ctx->cfglock); 414 return UB_NOERROR; 415 } 416 417 int 418 ub_ctx_get_option(struct ub_ctx* ctx, const char* opt, char** str) 419 { 420 int r; 421 lock_basic_lock(&ctx->cfglock); 422 r = config_get_option_collate(ctx->env->cfg, opt, str); 423 lock_basic_unlock(&ctx->cfglock); 424 if(r == 0) r = UB_NOERROR; 425 else if(r == 1) r = UB_SYNTAX; 426 else if(r == 2) r = UB_NOMEM; 427 return r; 428 } 429 430 int 431 ub_ctx_config(struct ub_ctx* ctx, const char* fname) 432 { 433 lock_basic_lock(&ctx->cfglock); 434 if(ctx->finalized) { 435 lock_basic_unlock(&ctx->cfglock); 436 return UB_AFTERFINAL; 437 } 438 if(!config_read(ctx->env->cfg, fname, NULL)) { 439 lock_basic_unlock(&ctx->cfglock); 440 return UB_SYNTAX; 441 } 442 lock_basic_unlock(&ctx->cfglock); 443 return UB_NOERROR; 444 } 445 446 int 447 ub_ctx_add_ta(struct ub_ctx* ctx, const char* ta) 448 { 449 char* dup = strdup(ta); 450 if(!dup) return UB_NOMEM; 451 lock_basic_lock(&ctx->cfglock); 452 if(ctx->finalized) { 453 lock_basic_unlock(&ctx->cfglock); 454 free(dup); 455 return UB_AFTERFINAL; 456 } 457 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_list, dup)) { 458 lock_basic_unlock(&ctx->cfglock); 459 return UB_NOMEM; 460 } 461 lock_basic_unlock(&ctx->cfglock); 462 return UB_NOERROR; 463 } 464 465 int 466 ub_ctx_add_ta_file(struct ub_ctx* ctx, const char* fname) 467 { 468 char* dup = strdup(fname); 469 if(!dup) return UB_NOMEM; 470 lock_basic_lock(&ctx->cfglock); 471 if(ctx->finalized) { 472 lock_basic_unlock(&ctx->cfglock); 473 free(dup); 474 return UB_AFTERFINAL; 475 } 476 if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_file_list, dup)) { 477 lock_basic_unlock(&ctx->cfglock); 478 return UB_NOMEM; 479 } 480 lock_basic_unlock(&ctx->cfglock); 481 return UB_NOERROR; 482 } 483 484 int ub_ctx_add_ta_autr(struct ub_ctx* ctx, const char* fname) 485 { 486 char* dup = strdup(fname); 487 if(!dup) return UB_NOMEM; 488 lock_basic_lock(&ctx->cfglock); 489 if(ctx->finalized) { 490 lock_basic_unlock(&ctx->cfglock); 491 free(dup); 492 return UB_AFTERFINAL; 493 } 494 if(!cfg_strlist_insert(&ctx->env->cfg->auto_trust_anchor_file_list, 495 dup)) { 496 lock_basic_unlock(&ctx->cfglock); 497 return UB_NOMEM; 498 } 499 lock_basic_unlock(&ctx->cfglock); 500 return UB_NOERROR; 501 } 502 503 int 504 ub_ctx_trustedkeys(struct ub_ctx* ctx, const char* fname) 505 { 506 char* dup = strdup(fname); 507 if(!dup) return UB_NOMEM; 508 lock_basic_lock(&ctx->cfglock); 509 if(ctx->finalized) { 510 lock_basic_unlock(&ctx->cfglock); 511 free(dup); 512 return UB_AFTERFINAL; 513 } 514 if(!cfg_strlist_insert(&ctx->env->cfg->trusted_keys_file_list, dup)) { 515 lock_basic_unlock(&ctx->cfglock); 516 return UB_NOMEM; 517 } 518 lock_basic_unlock(&ctx->cfglock); 519 return UB_NOERROR; 520 } 521 522 int 523 ub_ctx_debuglevel(struct ub_ctx* ctx, int d) 524 { 525 lock_basic_lock(&ctx->cfglock); 526 verbosity = d; 527 ctx->env->cfg->verbosity = d; 528 lock_basic_unlock(&ctx->cfglock); 529 return UB_NOERROR; 530 } 531 532 int ub_ctx_debugout(struct ub_ctx* ctx, void* out) 533 { 534 lock_basic_lock(&ctx->cfglock); 535 log_file((FILE*)out); 536 ctx_logfile_overridden = 1; 537 ctx->logfile_override = 1; 538 ctx->log_out = out; 539 lock_basic_unlock(&ctx->cfglock); 540 return UB_NOERROR; 541 } 542 543 int 544 ub_ctx_async(struct ub_ctx* ctx, int dothread) 545 { 546 #ifdef THREADS_DISABLED 547 if(dothread) /* cannot do threading */ 548 return UB_NOERROR; 549 #endif 550 lock_basic_lock(&ctx->cfglock); 551 if(ctx->finalized) { 552 lock_basic_unlock(&ctx->cfglock); 553 return UB_AFTERFINAL; 554 } 555 ctx->dothread = dothread; 556 lock_basic_unlock(&ctx->cfglock); 557 return UB_NOERROR; 558 } 559 560 int 561 ub_poll(struct ub_ctx* ctx) 562 { 563 /* no need to hold lock while testing for readability. */ 564 return tube_poll(ctx->rr_pipe); 565 } 566 567 int 568 ub_fd(struct ub_ctx* ctx) 569 { 570 return tube_read_fd(ctx->rr_pipe); 571 } 572 573 /** process answer from bg worker */ 574 static int 575 process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len, 576 ub_callback_type* cb, void** cbarg, int* err, 577 struct ub_result** res) 578 { 579 struct ctx_query* q; 580 if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) { 581 log_err("error: bad data from bg worker %d", 582 (int)context_serial_getcmd(msg, len)); 583 return 0; 584 } 585 586 lock_basic_lock(&ctx->cfglock); 587 q = context_deserialize_answer(ctx, msg, len, err); 588 if(!q) { 589 lock_basic_unlock(&ctx->cfglock); 590 /* probably simply the lookup that failed, i.e. 591 * response returned before cancel was sent out, so noerror */ 592 return 1; 593 } 594 log_assert(q->async); 595 596 /* grab cb while locked */ 597 if(q->cancelled) { 598 *cb = NULL; 599 *cbarg = NULL; 600 } else { 601 *cb = q->cb; 602 *cbarg = q->cb_arg; 603 } 604 if(*err) { 605 *res = NULL; 606 ub_resolve_free(q->res); 607 } else { 608 /* parse the message, extract rcode, fill result */ 609 sldns_buffer* buf = sldns_buffer_new(q->msg_len); 610 struct regional* region = regional_create(); 611 *res = q->res; 612 (*res)->rcode = LDNS_RCODE_SERVFAIL; 613 if(region && buf) { 614 sldns_buffer_clear(buf); 615 sldns_buffer_write(buf, q->msg, q->msg_len); 616 sldns_buffer_flip(buf); 617 libworker_enter_result(*res, buf, region, 618 q->msg_security); 619 } 620 (*res)->answer_packet = q->msg; 621 (*res)->answer_len = (int)q->msg_len; 622 q->msg = NULL; 623 sldns_buffer_free(buf); 624 regional_destroy(region); 625 } 626 q->res = NULL; 627 /* delete the q from list */ 628 (void)rbtree_delete(&ctx->queries, q->node.key); 629 ctx->num_async--; 630 context_query_delete(q); 631 lock_basic_unlock(&ctx->cfglock); 632 633 if(*cb) return 2; 634 ub_resolve_free(*res); 635 return 1; 636 } 637 638 /** process answer from bg worker */ 639 static int 640 process_answer(struct ub_ctx* ctx, uint8_t* msg, uint32_t len) 641 { 642 int err; 643 ub_callback_type cb; 644 void* cbarg; 645 struct ub_result* res; 646 int r; 647 648 r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res); 649 650 /* no locks held while calling callback, so that library is 651 * re-entrant. */ 652 if(r == 2) 653 (*cb)(cbarg, err, res); 654 655 return r; 656 } 657 658 int 659 ub_process(struct ub_ctx* ctx) 660 { 661 int r; 662 uint8_t* msg; 663 uint32_t len; 664 while(1) { 665 msg = NULL; 666 lock_basic_lock(&ctx->rrpipe_lock); 667 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1); 668 lock_basic_unlock(&ctx->rrpipe_lock); 669 if(r == 0) 670 return UB_PIPE; 671 else if(r == -1) 672 break; 673 if(!process_answer(ctx, msg, len)) { 674 free(msg); 675 return UB_PIPE; 676 } 677 free(msg); 678 } 679 return UB_NOERROR; 680 } 681 682 int 683 ub_wait(struct ub_ctx* ctx) 684 { 685 int err; 686 ub_callback_type cb; 687 void* cbarg; 688 struct ub_result* res; 689 int r; 690 uint8_t* msg; 691 uint32_t len; 692 /* this is basically the same loop as _process(), but with changes. 693 * holds the rrpipe lock and waits with tube_wait */ 694 while(1) { 695 lock_basic_lock(&ctx->rrpipe_lock); 696 lock_basic_lock(&ctx->cfglock); 697 if(ctx->num_async == 0) { 698 lock_basic_unlock(&ctx->cfglock); 699 lock_basic_unlock(&ctx->rrpipe_lock); 700 break; 701 } 702 lock_basic_unlock(&ctx->cfglock); 703 704 /* keep rrpipe locked, while 705 * o waiting for pipe readable 706 * o parsing message 707 * o possibly decrementing num_async 708 * do callback without lock 709 */ 710 r = tube_wait(ctx->rr_pipe); 711 if(r) { 712 r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1); 713 if(r == 0) { 714 lock_basic_unlock(&ctx->rrpipe_lock); 715 return UB_PIPE; 716 } 717 if(r == -1) { 718 lock_basic_unlock(&ctx->rrpipe_lock); 719 continue; 720 } 721 r = process_answer_detail(ctx, msg, len, 722 &cb, &cbarg, &err, &res); 723 lock_basic_unlock(&ctx->rrpipe_lock); 724 free(msg); 725 if(r == 0) 726 return UB_PIPE; 727 if(r == 2) 728 (*cb)(cbarg, err, res); 729 } else { 730 lock_basic_unlock(&ctx->rrpipe_lock); 731 } 732 } 733 return UB_NOERROR; 734 } 735 736 int 737 ub_resolve(struct ub_ctx* ctx, const char* name, int rrtype, 738 int rrclass, struct ub_result** result) 739 { 740 struct ctx_query* q; 741 int r; 742 *result = NULL; 743 744 lock_basic_lock(&ctx->cfglock); 745 if(!ctx->finalized) { 746 r = context_finalize(ctx); 747 if(r) { 748 lock_basic_unlock(&ctx->cfglock); 749 return r; 750 } 751 } 752 /* create new ctx_query and attempt to add to the list */ 753 lock_basic_unlock(&ctx->cfglock); 754 q = context_new(ctx, name, rrtype, rrclass, NULL, NULL, NULL); 755 if(!q) 756 return UB_NOMEM; 757 /* become a resolver thread for a bit */ 758 759 r = libworker_fg(ctx, q); 760 if(r) { 761 lock_basic_lock(&ctx->cfglock); 762 (void)rbtree_delete(&ctx->queries, q->node.key); 763 context_query_delete(q); 764 lock_basic_unlock(&ctx->cfglock); 765 return r; 766 } 767 q->res->answer_packet = q->msg; 768 q->res->answer_len = (int)q->msg_len; 769 q->msg = NULL; 770 *result = q->res; 771 q->res = NULL; 772 773 lock_basic_lock(&ctx->cfglock); 774 (void)rbtree_delete(&ctx->queries, q->node.key); 775 context_query_delete(q); 776 lock_basic_unlock(&ctx->cfglock); 777 return UB_NOERROR; 778 } 779 780 int 781 ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype, 782 int rrclass, void* mydata, ub_event_callback_type callback, 783 int* async_id) 784 { 785 struct ctx_query* q; 786 int r; 787 788 if(async_id) 789 *async_id = 0; 790 lock_basic_lock(&ctx->cfglock); 791 if(!ctx->finalized) { 792 r = context_finalize(ctx); 793 if(r) { 794 lock_basic_unlock(&ctx->cfglock); 795 return r; 796 } 797 } 798 lock_basic_unlock(&ctx->cfglock); 799 if(!ctx->event_worker) { 800 ctx->event_worker = libworker_create_event(ctx, 801 ctx->event_base); 802 if(!ctx->event_worker) { 803 return UB_INITFAIL; 804 } 805 } 806 807 /* set time in case answer comes from cache */ 808 ub_comm_base_now(ctx->event_worker->base); 809 810 /* create new ctx_query and attempt to add to the list */ 811 q = context_new(ctx, name, rrtype, rrclass, NULL, callback, mydata); 812 if(!q) 813 return UB_NOMEM; 814 815 /* attach to mesh */ 816 if((r=libworker_attach_mesh(ctx, q, async_id)) != 0) 817 return r; 818 return UB_NOERROR; 819 } 820 821 822 int 823 ub_resolve_async(struct ub_ctx* ctx, const char* name, int rrtype, 824 int rrclass, void* mydata, ub_callback_type callback, int* async_id) 825 { 826 struct ctx_query* q; 827 uint8_t* msg = NULL; 828 uint32_t len = 0; 829 830 if(async_id) 831 *async_id = 0; 832 lock_basic_lock(&ctx->cfglock); 833 if(!ctx->finalized) { 834 int r = context_finalize(ctx); 835 if(r) { 836 lock_basic_unlock(&ctx->cfglock); 837 return r; 838 } 839 } 840 if(!ctx->created_bg) { 841 int r; 842 ctx->created_bg = 1; 843 lock_basic_unlock(&ctx->cfglock); 844 r = libworker_bg(ctx); 845 if(r) { 846 lock_basic_lock(&ctx->cfglock); 847 ctx->created_bg = 0; 848 lock_basic_unlock(&ctx->cfglock); 849 return r; 850 } 851 } else { 852 lock_basic_unlock(&ctx->cfglock); 853 } 854 855 /* create new ctx_query and attempt to add to the list */ 856 q = context_new(ctx, name, rrtype, rrclass, callback, NULL, mydata); 857 if(!q) 858 return UB_NOMEM; 859 860 /* write over pipe to background worker */ 861 lock_basic_lock(&ctx->cfglock); 862 msg = context_serialize_new_query(q, &len); 863 if(!msg) { 864 (void)rbtree_delete(&ctx->queries, q->node.key); 865 ctx->num_async--; 866 context_query_delete(q); 867 lock_basic_unlock(&ctx->cfglock); 868 return UB_NOMEM; 869 } 870 if(async_id) 871 *async_id = q->querynum; 872 lock_basic_unlock(&ctx->cfglock); 873 874 lock_basic_lock(&ctx->qqpipe_lock); 875 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) { 876 lock_basic_unlock(&ctx->qqpipe_lock); 877 free(msg); 878 return UB_PIPE; 879 } 880 lock_basic_unlock(&ctx->qqpipe_lock); 881 free(msg); 882 return UB_NOERROR; 883 } 884 885 int 886 ub_cancel(struct ub_ctx* ctx, int async_id) 887 { 888 struct ctx_query* q; 889 uint8_t* msg = NULL; 890 uint32_t len = 0; 891 lock_basic_lock(&ctx->cfglock); 892 q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id); 893 if(!q || !q->async) { 894 /* it is not there, so nothing to do */ 895 lock_basic_unlock(&ctx->cfglock); 896 return UB_NOID; 897 } 898 log_assert(q->async); 899 q->cancelled = 1; 900 901 /* delete it */ 902 if(!ctx->dothread) { /* if forked */ 903 (void)rbtree_delete(&ctx->queries, q->node.key); 904 ctx->num_async--; 905 msg = context_serialize_cancel(q, &len); 906 context_query_delete(q); 907 lock_basic_unlock(&ctx->cfglock); 908 if(!msg) { 909 return UB_NOMEM; 910 } 911 /* send cancel to background worker */ 912 lock_basic_lock(&ctx->qqpipe_lock); 913 if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) { 914 lock_basic_unlock(&ctx->qqpipe_lock); 915 free(msg); 916 return UB_PIPE; 917 } 918 lock_basic_unlock(&ctx->qqpipe_lock); 919 free(msg); 920 } else { 921 lock_basic_unlock(&ctx->cfglock); 922 } 923 return UB_NOERROR; 924 } 925 926 void 927 ub_resolve_free(struct ub_result* result) 928 { 929 char** p; 930 if(!result) return; 931 free(result->qname); 932 if(result->canonname != result->qname) 933 free(result->canonname); 934 if(result->data) 935 for(p = result->data; *p; p++) 936 free(*p); 937 free(result->data); 938 free(result->len); 939 free(result->answer_packet); 940 free(result->why_bogus); 941 free(result); 942 } 943 944 const char* 945 ub_strerror(int err) 946 { 947 switch(err) { 948 case UB_NOERROR: return "no error"; 949 case UB_SOCKET: return "socket io error"; 950 case UB_NOMEM: return "out of memory"; 951 case UB_SYNTAX: return "syntax error"; 952 case UB_SERVFAIL: return "server failure"; 953 case UB_FORKFAIL: return "could not fork"; 954 case UB_INITFAIL: return "initialization failure"; 955 case UB_AFTERFINAL: return "setting change after finalize"; 956 case UB_PIPE: return "error in pipe communication with async"; 957 case UB_READFILE: return "error reading file"; 958 case UB_NOID: return "error async_id does not exist"; 959 default: return "unknown error"; 960 } 961 } 962 963 int 964 ub_ctx_set_fwd(struct ub_ctx* ctx, const char* addr) 965 { 966 struct sockaddr_storage storage; 967 socklen_t stlen; 968 struct config_stub* s; 969 char* dupl; 970 lock_basic_lock(&ctx->cfglock); 971 if(ctx->finalized) { 972 lock_basic_unlock(&ctx->cfglock); 973 errno=EINVAL; 974 return UB_AFTERFINAL; 975 } 976 if(!addr) { 977 /* disable fwd mode - the root stub should be first. */ 978 if(ctx->env->cfg->forwards && 979 strcmp(ctx->env->cfg->forwards->name, ".") == 0) { 980 s = ctx->env->cfg->forwards; 981 ctx->env->cfg->forwards = s->next; 982 s->next = NULL; 983 config_delstubs(s); 984 } 985 lock_basic_unlock(&ctx->cfglock); 986 return UB_NOERROR; 987 } 988 lock_basic_unlock(&ctx->cfglock); 989 990 /* check syntax for addr */ 991 if(!extstrtoaddr(addr, &storage, &stlen, UNBOUND_DNS_PORT)) { 992 errno=EINVAL; 993 return UB_SYNTAX; 994 } 995 996 /* it parses, add root stub in front of list */ 997 lock_basic_lock(&ctx->cfglock); 998 if(!ctx->env->cfg->forwards || 999 strcmp(ctx->env->cfg->forwards->name, ".") != 0) { 1000 s = calloc(1, sizeof(*s)); 1001 if(!s) { 1002 lock_basic_unlock(&ctx->cfglock); 1003 errno=ENOMEM; 1004 return UB_NOMEM; 1005 } 1006 s->name = strdup("."); 1007 if(!s->name) { 1008 free(s); 1009 lock_basic_unlock(&ctx->cfglock); 1010 errno=ENOMEM; 1011 return UB_NOMEM; 1012 } 1013 s->next = ctx->env->cfg->forwards; 1014 ctx->env->cfg->forwards = s; 1015 } else { 1016 log_assert(ctx->env->cfg->forwards); 1017 s = ctx->env->cfg->forwards; 1018 } 1019 dupl = strdup(addr); 1020 if(!dupl) { 1021 lock_basic_unlock(&ctx->cfglock); 1022 errno=ENOMEM; 1023 return UB_NOMEM; 1024 } 1025 if(!cfg_strlist_insert(&s->addrs, dupl)) { 1026 lock_basic_unlock(&ctx->cfglock); 1027 errno=ENOMEM; 1028 return UB_NOMEM; 1029 } 1030 lock_basic_unlock(&ctx->cfglock); 1031 return UB_NOERROR; 1032 } 1033 1034 int ub_ctx_set_tls(struct ub_ctx* ctx, int tls) 1035 { 1036 lock_basic_lock(&ctx->cfglock); 1037 if(ctx->finalized) { 1038 lock_basic_unlock(&ctx->cfglock); 1039 errno=EINVAL; 1040 return UB_AFTERFINAL; 1041 } 1042 ctx->env->cfg->ssl_upstream = tls; 1043 lock_basic_unlock(&ctx->cfglock); 1044 return UB_NOERROR; 1045 } 1046 1047 int ub_ctx_set_stub(struct ub_ctx* ctx, const char* zone, const char* addr, 1048 int isprime) 1049 { 1050 char* a; 1051 struct config_stub **prev, *elem; 1052 1053 /* check syntax for zone name */ 1054 if(zone) { 1055 uint8_t* nm; 1056 int nmlabs; 1057 size_t nmlen; 1058 if(!parse_dname(zone, &nm, &nmlen, &nmlabs)) { 1059 errno=EINVAL; 1060 return UB_SYNTAX; 1061 } 1062 free(nm); 1063 } else { 1064 zone = "."; 1065 } 1066 1067 /* check syntax for addr (if not NULL) */ 1068 if(addr) { 1069 struct sockaddr_storage storage; 1070 socklen_t stlen; 1071 if(!extstrtoaddr(addr, &storage, &stlen, UNBOUND_DNS_PORT)) { 1072 errno=EINVAL; 1073 return UB_SYNTAX; 1074 } 1075 } 1076 1077 lock_basic_lock(&ctx->cfglock); 1078 if(ctx->finalized) { 1079 lock_basic_unlock(&ctx->cfglock); 1080 errno=EINVAL; 1081 return UB_AFTERFINAL; 1082 } 1083 1084 /* arguments all right, now find or add the stub */ 1085 prev = &ctx->env->cfg->stubs; 1086 elem = cfg_stub_find(&prev, zone); 1087 if(!elem && !addr) { 1088 /* not found and we want to delete, nothing to do */ 1089 lock_basic_unlock(&ctx->cfglock); 1090 return UB_NOERROR; 1091 } else if(elem && !addr) { 1092 /* found, and we want to delete */ 1093 *prev = elem->next; 1094 config_delstub(elem); 1095 lock_basic_unlock(&ctx->cfglock); 1096 return UB_NOERROR; 1097 } else if(!elem) { 1098 /* not found, create the stub entry */ 1099 elem=(struct config_stub*)calloc(1, sizeof(struct config_stub)); 1100 if(elem) elem->name = strdup(zone); 1101 if(!elem || !elem->name) { 1102 free(elem); 1103 lock_basic_unlock(&ctx->cfglock); 1104 errno = ENOMEM; 1105 return UB_NOMEM; 1106 } 1107 elem->next = ctx->env->cfg->stubs; 1108 ctx->env->cfg->stubs = elem; 1109 } 1110 1111 /* add the address to the list and set settings */ 1112 elem->isprime = isprime; 1113 a = strdup(addr); 1114 if(!a) { 1115 lock_basic_unlock(&ctx->cfglock); 1116 errno = ENOMEM; 1117 return UB_NOMEM; 1118 } 1119 if(!cfg_strlist_insert(&elem->addrs, a)) { 1120 lock_basic_unlock(&ctx->cfglock); 1121 errno = ENOMEM; 1122 return UB_NOMEM; 1123 } 1124 lock_basic_unlock(&ctx->cfglock); 1125 return UB_NOERROR; 1126 } 1127 1128 int 1129 ub_ctx_resolvconf(struct ub_ctx* ctx, const char* fname) 1130 { 1131 FILE* in; 1132 int numserv = 0; 1133 char buf[1024]; 1134 char* parse, *addr; 1135 int r; 1136 1137 if(fname == NULL) { 1138 #if !defined(UB_ON_WINDOWS) || !defined(HAVE_WINDOWS_H) 1139 fname = "/etc/resolv.conf"; 1140 #else 1141 FIXED_INFO *info; 1142 ULONG buflen = sizeof(*info); 1143 IP_ADDR_STRING *ptr; 1144 1145 info = (FIXED_INFO *) malloc(sizeof (FIXED_INFO)); 1146 if (info == NULL) 1147 return UB_READFILE; 1148 1149 if (GetNetworkParams(info, &buflen) == ERROR_BUFFER_OVERFLOW) { 1150 free(info); 1151 info = (FIXED_INFO *) malloc(buflen); 1152 if (info == NULL) 1153 return UB_READFILE; 1154 } 1155 1156 if (GetNetworkParams(info, &buflen) == NO_ERROR) { 1157 int retval=0; 1158 ptr = &(info->DnsServerList); 1159 while (ptr) { 1160 numserv++; 1161 if((retval=ub_ctx_set_fwd(ctx, 1162 ptr->IpAddress.String))!=0) { 1163 free(info); 1164 return retval; 1165 } 1166 ptr = ptr->Next; 1167 } 1168 free(info); 1169 if (numserv==0) 1170 return UB_READFILE; 1171 return UB_NOERROR; 1172 } 1173 free(info); 1174 return UB_READFILE; 1175 #endif /* WINDOWS */ 1176 } 1177 in = fopen(fname, "r"); 1178 if(!in) { 1179 /* error in errno! perror(fname) */ 1180 return UB_READFILE; 1181 } 1182 while(fgets(buf, (int)sizeof(buf), in)) { 1183 buf[sizeof(buf)-1] = 0; 1184 parse=buf; 1185 while(*parse == ' ' || *parse == '\t') 1186 parse++; 1187 if(strncmp(parse, "nameserver", 10) == 0) { 1188 numserv++; 1189 parse += 10; /* skip 'nameserver' */ 1190 /* skip whitespace */ 1191 while(*parse == ' ' || *parse == '\t') 1192 parse++; 1193 addr = parse; 1194 /* skip [0-9a-fA-F.:]*, i.e. IP4 and IP6 address */ 1195 while(isxdigit((unsigned char)*parse) || *parse=='.' || *parse==':') 1196 parse++; 1197 /* terminate after the address, remove newline */ 1198 *parse = 0; 1199 1200 if((r = ub_ctx_set_fwd(ctx, addr)) != UB_NOERROR) { 1201 fclose(in); 1202 return r; 1203 } 1204 } 1205 } 1206 fclose(in); 1207 if(numserv == 0) { 1208 /* from resolv.conf(5) if none given, use localhost */ 1209 return ub_ctx_set_fwd(ctx, "127.0.0.1"); 1210 } 1211 return UB_NOERROR; 1212 } 1213 1214 int 1215 ub_ctx_hosts(struct ub_ctx* ctx, const char* fname) 1216 { 1217 FILE* in; 1218 char buf[1024], ldata[2048]; 1219 char* parse, *addr, *name, *ins; 1220 lock_basic_lock(&ctx->cfglock); 1221 if(ctx->finalized) { 1222 lock_basic_unlock(&ctx->cfglock); 1223 errno=EINVAL; 1224 return UB_AFTERFINAL; 1225 } 1226 lock_basic_unlock(&ctx->cfglock); 1227 if(fname == NULL) { 1228 #if defined(UB_ON_WINDOWS) && defined(HAVE_WINDOWS_H) 1229 /* 1230 * If this is Windows NT/XP/2K it's in 1231 * %WINDIR%\system32\drivers\etc\hosts. 1232 * If this is Windows 95/98/Me it's in %WINDIR%\hosts. 1233 */ 1234 name = getenv("WINDIR"); 1235 if (name != NULL) { 1236 int retval=0; 1237 snprintf(buf, sizeof(buf), "%s%s", name, 1238 "\\system32\\drivers\\etc\\hosts"); 1239 if((retval=ub_ctx_hosts(ctx, buf)) !=0 ) { 1240 snprintf(buf, sizeof(buf), "%s%s", name, 1241 "\\hosts"); 1242 retval=ub_ctx_hosts(ctx, buf); 1243 } 1244 return retval; 1245 } 1246 return UB_READFILE; 1247 #else 1248 fname = "/etc/hosts"; 1249 #endif /* WIN32 */ 1250 } 1251 in = fopen(fname, "r"); 1252 if(!in) { 1253 /* error in errno! perror(fname) */ 1254 return UB_READFILE; 1255 } 1256 while(fgets(buf, (int)sizeof(buf), in)) { 1257 buf[sizeof(buf)-1] = 0; 1258 parse=buf; 1259 while(*parse == ' ' || *parse == '\t') 1260 parse++; 1261 if(*parse == '#') 1262 continue; /* skip comment */ 1263 /* format: <addr> spaces <name> spaces <name> ... */ 1264 addr = parse; 1265 /* skip addr */ 1266 while(isxdigit((unsigned char)*parse) || *parse == '.' || *parse == ':') 1267 parse++; 1268 if(*parse == '\r') 1269 parse++; 1270 if(*parse == '\n' || *parse == 0) 1271 continue; 1272 if(*parse == '%') 1273 continue; /* ignore macOSX fe80::1%lo0 localhost */ 1274 if(*parse != ' ' && *parse != '\t') { 1275 /* must have whitespace after address */ 1276 fclose(in); 1277 errno=EINVAL; 1278 return UB_SYNTAX; 1279 } 1280 *parse++ = 0; /* end delimiter for addr ... */ 1281 /* go to names and add them */ 1282 while(*parse) { 1283 while(*parse == ' ' || *parse == '\t' || *parse=='\n' 1284 || *parse=='\r') 1285 parse++; 1286 if(*parse == 0 || *parse == '#') 1287 break; 1288 /* skip name, allows (too) many printable characters */ 1289 name = parse; 1290 while('!' <= *parse && *parse <= '~') 1291 parse++; 1292 if(*parse) 1293 *parse++ = 0; /* end delimiter for name */ 1294 snprintf(ldata, sizeof(ldata), "%s %s %s", 1295 name, str_is_ip6(addr)?"AAAA":"A", addr); 1296 ins = strdup(ldata); 1297 if(!ins) { 1298 /* out of memory */ 1299 fclose(in); 1300 errno=ENOMEM; 1301 return UB_NOMEM; 1302 } 1303 lock_basic_lock(&ctx->cfglock); 1304 if(!cfg_strlist_insert(&ctx->env->cfg->local_data, 1305 ins)) { 1306 lock_basic_unlock(&ctx->cfglock); 1307 fclose(in); 1308 errno=ENOMEM; 1309 return UB_NOMEM; 1310 } 1311 lock_basic_unlock(&ctx->cfglock); 1312 } 1313 } 1314 fclose(in); 1315 return UB_NOERROR; 1316 } 1317 1318 /** finalize the context, if not already finalized */ 1319 static int ub_ctx_finalize(struct ub_ctx* ctx) 1320 { 1321 int res = 0; 1322 lock_basic_lock(&ctx->cfglock); 1323 if (!ctx->finalized) { 1324 res = context_finalize(ctx); 1325 } 1326 lock_basic_unlock(&ctx->cfglock); 1327 return res; 1328 } 1329 1330 /* Print local zones and RR data */ 1331 int ub_ctx_print_local_zones(struct ub_ctx* ctx) 1332 { 1333 int res = ub_ctx_finalize(ctx); 1334 if (res) return res; 1335 1336 local_zones_print(ctx->local_zones); 1337 1338 return UB_NOERROR; 1339 } 1340 1341 /* Add a new zone */ 1342 int ub_ctx_zone_add(struct ub_ctx* ctx, const char *zone_name, 1343 const char *zone_type) 1344 { 1345 enum localzone_type t; 1346 struct local_zone* z; 1347 uint8_t* nm; 1348 int nmlabs; 1349 size_t nmlen; 1350 1351 int res = ub_ctx_finalize(ctx); 1352 if (res) return res; 1353 1354 if(!local_zone_str2type(zone_type, &t)) { 1355 return UB_SYNTAX; 1356 } 1357 1358 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) { 1359 return UB_SYNTAX; 1360 } 1361 1362 lock_rw_wrlock(&ctx->local_zones->lock); 1363 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs, 1364 LDNS_RR_CLASS_IN))) { 1365 /* already present in tree */ 1366 lock_rw_wrlock(&z->lock); 1367 z->type = t; /* update type anyway */ 1368 lock_rw_unlock(&z->lock); 1369 lock_rw_unlock(&ctx->local_zones->lock); 1370 free(nm); 1371 return UB_NOERROR; 1372 } 1373 if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs, 1374 LDNS_RR_CLASS_IN, t)) { 1375 lock_rw_unlock(&ctx->local_zones->lock); 1376 return UB_NOMEM; 1377 } 1378 lock_rw_unlock(&ctx->local_zones->lock); 1379 return UB_NOERROR; 1380 } 1381 1382 /* Remove zone */ 1383 int ub_ctx_zone_remove(struct ub_ctx* ctx, const char *zone_name) 1384 { 1385 struct local_zone* z; 1386 uint8_t* nm; 1387 int nmlabs; 1388 size_t nmlen; 1389 1390 int res = ub_ctx_finalize(ctx); 1391 if (res) return res; 1392 1393 if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) { 1394 return UB_SYNTAX; 1395 } 1396 1397 lock_rw_wrlock(&ctx->local_zones->lock); 1398 if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs, 1399 LDNS_RR_CLASS_IN))) { 1400 /* present in tree */ 1401 local_zones_del_zone(ctx->local_zones, z); 1402 } 1403 lock_rw_unlock(&ctx->local_zones->lock); 1404 free(nm); 1405 return UB_NOERROR; 1406 } 1407 1408 /* Add new RR data */ 1409 int ub_ctx_data_add(struct ub_ctx* ctx, const char *data) 1410 { 1411 int res = ub_ctx_finalize(ctx); 1412 if (res) return res; 1413 1414 res = local_zones_add_RR(ctx->local_zones, data); 1415 return (!res) ? UB_NOMEM : UB_NOERROR; 1416 } 1417 1418 /* Remove RR data */ 1419 int ub_ctx_data_remove(struct ub_ctx* ctx, const char *data) 1420 { 1421 uint8_t* nm; 1422 int nmlabs; 1423 size_t nmlen; 1424 int res = ub_ctx_finalize(ctx); 1425 if (res) return res; 1426 1427 if(!parse_dname(data, &nm, &nmlen, &nmlabs)) 1428 return UB_SYNTAX; 1429 1430 local_zones_del_data(ctx->local_zones, nm, nmlen, nmlabs, 1431 LDNS_RR_CLASS_IN); 1432 1433 free(nm); 1434 return UB_NOERROR; 1435 } 1436 1437 const char* ub_version(void) 1438 { 1439 return PACKAGE_VERSION; 1440 } 1441 1442 int 1443 ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base) { 1444 struct ub_event_base* new_base; 1445 1446 if (!ctx || !ctx->event_base || !base) { 1447 return UB_INITFAIL; 1448 } 1449 if (ub_libevent_get_event_base(ctx->event_base) == base) { 1450 /* already set */ 1451 return UB_NOERROR; 1452 } 1453 1454 lock_basic_lock(&ctx->cfglock); 1455 /* destroy the current worker - safe to pass in NULL */ 1456 libworker_delete_event(ctx->event_worker); 1457 ctx->event_worker = NULL; 1458 new_base = ub_libevent_event_base(base); 1459 if (new_base) 1460 ctx->event_base = new_base; 1461 ctx->created_bg = 0; 1462 ctx->dothread = 1; 1463 lock_basic_unlock(&ctx->cfglock); 1464 return new_base ? UB_NOERROR : UB_INITFAIL; 1465 } 1466