1 /*
2 * libunbound/worker.c - worker thread or process that resolves
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /**
37 * \file
38 *
39 * This file contains the worker process or thread that performs
40 * the DNS resolving and validation. The worker is called by a procedure
41 * and if in the background continues until exit, if in the foreground
42 * returns from the procedure when done.
43 */
44 #include "config.h"
45 #ifdef HAVE_SSL
46 #include <openssl/ssl.h>
47 #endif
48 #include "libunbound/libworker.h"
49 #include "libunbound/context.h"
50 #include "libunbound/unbound.h"
51 #include "libunbound/worker.h"
52 #include "libunbound/unbound-event.h"
53 #include "services/outside_network.h"
54 #include "services/mesh.h"
55 #include "services/localzone.h"
56 #include "services/cache/rrset.h"
57 #include "services/outbound_list.h"
58 #include "services/authzone.h"
59 #include "util/fptr_wlist.h"
60 #include "util/module.h"
61 #include "util/regional.h"
62 #include "util/random.h"
63 #include "util/config_file.h"
64 #include "util/netevent.h"
65 #include "util/proxy_protocol.h"
66 #include "util/storage/lookup3.h"
67 #include "util/storage/slabhash.h"
68 #include "util/net_help.h"
69 #include "util/data/dname.h"
70 #include "util/data/msgreply.h"
71 #include "util/data/msgencode.h"
72 #include "util/tube.h"
73 #include "sldns/sbuffer.h"
74 #include "sldns/str2wire.h"
75 #ifdef USE_DNSTAP
76 #include "dnstap/dtstream.h"
77 #endif
78
79 #ifdef HAVE_TARGETCONDITIONALS_H
80 #include <TargetConditionals.h>
81 #endif
82
83 #if (defined(TARGET_OS_TV) && TARGET_OS_TV) || (defined(TARGET_OS_WATCH) && TARGET_OS_WATCH)
84 #undef HAVE_FORK
85 #endif
86
87 /** handle new query command for bg worker */
88 static void handle_newq(struct libworker* w, uint8_t* buf, uint32_t len);
89
90 /** delete libworker env */
91 static void
libworker_delete_env(struct libworker * w)92 libworker_delete_env(struct libworker* w)
93 {
94 if(w->env) {
95 outside_network_quit_prepare(w->back);
96 mesh_delete(w->env->mesh);
97 context_release_alloc(w->ctx, w->env->alloc,
98 !w->is_bg || w->is_bg_thread);
99 sldns_buffer_free(w->env->scratch_buffer);
100 regional_destroy(w->env->scratch);
101 ub_randfree(w->env->rnd);
102 free(w->env);
103 }
104 #ifdef HAVE_SSL
105 SSL_CTX_free(w->sslctx);
106 #endif
107 outside_network_delete(w->back);
108 }
109
110 /** delete libworker struct */
111 static void
libworker_delete(struct libworker * w)112 libworker_delete(struct libworker* w)
113 {
114 if(!w) return;
115 libworker_delete_env(w);
116 comm_base_delete(w->base);
117 free(w);
118 }
119
120 void
libworker_delete_event(struct libworker * w)121 libworker_delete_event(struct libworker* w)
122 {
123 if(!w) return;
124 libworker_delete_env(w);
125 comm_base_delete_no_base(w->base);
126 free(w);
127 }
128
129 /** setup fresh libworker struct */
130 static struct libworker*
libworker_setup(struct ub_ctx * ctx,int is_bg,struct ub_event_base * eb)131 libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
132 {
133 struct libworker* w = (struct libworker*)calloc(1, sizeof(*w));
134 struct config_file* cfg = ctx->env->cfg;
135 int* ports;
136 int numports;
137 if(!w) return NULL;
138 w->is_bg = is_bg;
139 w->ctx = ctx;
140 w->env = (struct module_env*)malloc(sizeof(*w->env));
141 if(!w->env) {
142 free(w);
143 return NULL;
144 }
145 *w->env = *ctx->env;
146 w->env->alloc = context_obtain_alloc(ctx, !w->is_bg || w->is_bg_thread);
147 if(!w->env->alloc) {
148 libworker_delete(w);
149 return NULL;
150 }
151 w->thread_num = w->env->alloc->thread_num;
152 alloc_set_id_cleanup(w->env->alloc, &libworker_alloc_cleanup, w);
153 if(!w->is_bg || w->is_bg_thread) {
154 lock_basic_lock(&ctx->cfglock);
155 }
156 w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
157 w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
158 #ifdef HAVE_SSL
159 w->sslctx = connect_sslctx_create(NULL, NULL,
160 cfg->tls_cert_bundle, cfg->tls_win_cert);
161 if(!w->sslctx) {
162 /* to make the setup fail after unlock */
163 sldns_buffer_free(w->env->scratch_buffer);
164 w->env->scratch_buffer = NULL;
165 }
166 #endif
167 if(!w->is_bg || w->is_bg_thread) {
168 lock_basic_unlock(&ctx->cfglock);
169 }
170 if(!w->env->scratch || !w->env->scratch_buffer) {
171 libworker_delete(w);
172 return NULL;
173 }
174 w->env->worker = (struct worker*)w;
175 w->env->probe_timer = NULL;
176 if(!w->is_bg || w->is_bg_thread) {
177 lock_basic_lock(&ctx->cfglock);
178 }
179 if(!(w->env->rnd = ub_initstate(ctx->seed_rnd))) {
180 if(!w->is_bg || w->is_bg_thread) {
181 lock_basic_unlock(&ctx->cfglock);
182 }
183 libworker_delete(w);
184 return NULL;
185 }
186 if(!w->is_bg || w->is_bg_thread) {
187 lock_basic_unlock(&ctx->cfglock);
188 }
189 if(1) {
190 /* primitive lockout for threading: if it overwrites another
191 * thread it is like wiping the cache (which is likely empty
192 * at the start) */
193 /* note we are holding the ctx lock in normal threaded
194 * cases so that is solved properly, it is only for many ctx
195 * in different threads that this may clash */
196 static int done_raninit = 0;
197 if(!done_raninit) {
198 done_raninit = 1;
199 hash_set_raninit((uint32_t)ub_random(w->env->rnd));
200 }
201 }
202
203 if(eb)
204 w->base = comm_base_create_event(eb);
205 else w->base = comm_base_create(0);
206 if(!w->base) {
207 libworker_delete(w);
208 return NULL;
209 }
210 w->env->worker_base = w->base;
211 if(!w->is_bg || w->is_bg_thread) {
212 lock_basic_lock(&ctx->cfglock);
213 }
214 numports = cfg_condense_ports(cfg, &ports);
215 if(numports == 0) {
216 if(!w->is_bg || w->is_bg_thread) {
217 lock_basic_unlock(&ctx->cfglock);
218 }
219 libworker_delete(w);
220 return NULL;
221 }
222 w->back = outside_network_create(w->base, cfg->msg_buffer_size,
223 (size_t)cfg->outgoing_num_ports, cfg->out_ifs,
224 cfg->num_out_ifs, cfg->do_ip4, cfg->do_ip6,
225 cfg->do_tcp?cfg->outgoing_num_tcp:0, cfg->ip_dscp,
226 w->env->infra_cache, w->env->rnd, cfg->use_caps_bits_for_id,
227 ports, numports, cfg->unwanted_threshold,
228 cfg->outgoing_tcp_mss, &libworker_alloc_cleanup, w,
229 cfg->do_udp || cfg->udp_upstream_without_downstream, w->sslctx,
230 cfg->delay_close, cfg->tls_use_sni, NULL, cfg->udp_connect,
231 cfg->max_reuse_tcp_queries, cfg->tcp_reuse_timeout,
232 cfg->tcp_auth_query_timeout);
233 w->env->outnet = w->back;
234 if(!w->is_bg || w->is_bg_thread) {
235 lock_basic_unlock(&ctx->cfglock);
236 }
237 free(ports);
238 if(!w->back) {
239 libworker_delete(w);
240 return NULL;
241 }
242 w->env->mesh = mesh_create(&ctx->mods, w->env);
243 if(!w->env->mesh) {
244 libworker_delete(w);
245 return NULL;
246 }
247 w->env->send_query = &libworker_send_query;
248 w->env->detach_subs = &mesh_detach_subs;
249 w->env->attach_sub = &mesh_attach_sub;
250 w->env->add_sub = &mesh_add_sub;
251 w->env->kill_sub = &mesh_state_delete;
252 w->env->detect_cycle = &mesh_detect_cycle;
253 comm_base_timept(w->base, &w->env->now, &w->env->now_tv);
254 pp_init(&sldns_write_uint16, &sldns_write_uint32);
255 return w;
256 }
257
libworker_create_event(struct ub_ctx * ctx,struct ub_event_base * eb)258 struct libworker* libworker_create_event(struct ub_ctx* ctx,
259 struct ub_event_base* eb)
260 {
261 return libworker_setup(ctx, 0, eb);
262 }
263
264 /** handle cancel command for bg worker */
265 static void
handle_cancel(struct libworker * w,uint8_t * buf,uint32_t len)266 handle_cancel(struct libworker* w, uint8_t* buf, uint32_t len)
267 {
268 struct ctx_query* q;
269 if(w->is_bg_thread) {
270 lock_basic_lock(&w->ctx->cfglock);
271 q = context_deserialize_cancel(w->ctx, buf, len);
272 lock_basic_unlock(&w->ctx->cfglock);
273 } else {
274 q = context_deserialize_cancel(w->ctx, buf, len);
275 }
276 if(!q) {
277 /* probably simply lookup failed, i.e. the message had been
278 * processed and answered before the cancel arrived */
279 return;
280 }
281 q->cancelled = 1;
282 free(buf);
283 }
284
285 /** do control command coming into bg server */
286 static void
libworker_do_cmd(struct libworker * w,uint8_t * msg,uint32_t len)287 libworker_do_cmd(struct libworker* w, uint8_t* msg, uint32_t len)
288 {
289 switch(context_serial_getcmd(msg, len)) {
290 default:
291 case UB_LIBCMD_ANSWER:
292 log_err("unknown command for bg worker %d",
293 (int)context_serial_getcmd(msg, len));
294 /* and fall through to quit */
295 ATTR_FALLTHROUGH
296 /* fallthrough */
297 case UB_LIBCMD_QUIT:
298 free(msg);
299 comm_base_exit(w->base);
300 break;
301 case UB_LIBCMD_NEWQUERY:
302 handle_newq(w, msg, len);
303 break;
304 case UB_LIBCMD_CANCEL:
305 handle_cancel(w, msg, len);
306 break;
307 }
308 }
309
310 /** handle control command coming into server */
311 void
libworker_handle_control_cmd(struct tube * ATTR_UNUSED (tube),uint8_t * msg,size_t len,int err,void * arg)312 libworker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
313 uint8_t* msg, size_t len, int err, void* arg)
314 {
315 struct libworker* w = (struct libworker*)arg;
316
317 if(err != 0) {
318 free(msg);
319 /* it is of no use to go on, exit */
320 comm_base_exit(w->base);
321 return;
322 }
323 libworker_do_cmd(w, msg, len); /* also frees the buf */
324 }
325
326 /** the background thread func */
327 static void*
libworker_dobg(void * arg)328 libworker_dobg(void* arg)
329 {
330 /* setup */
331 uint32_t m;
332 struct libworker* w = (struct libworker*)arg;
333 struct ub_ctx* ctx;
334 if(!w) {
335 log_err("libunbound bg worker init failed, nomem");
336 return NULL;
337 }
338 ctx = w->ctx;
339 log_thread_set(&w->thread_num);
340 #ifdef THREADS_DISABLED
341 /* we are forked */
342 w->is_bg_thread = 0;
343 /* close non-used parts of the pipes */
344 tube_close_write(ctx->qq_pipe);
345 tube_close_read(ctx->rr_pipe);
346 #endif
347 if(!tube_setup_bg_listen(ctx->qq_pipe, w->base,
348 libworker_handle_control_cmd, w)) {
349 log_err("libunbound bg worker init failed, no bglisten");
350 return NULL;
351 }
352 if(!tube_setup_bg_write(ctx->rr_pipe, w->base)) {
353 log_err("libunbound bg worker init failed, no bgwrite");
354 return NULL;
355 }
356
357 /* do the work */
358 comm_base_dispatch(w->base);
359
360 /* cleanup */
361 m = UB_LIBCMD_QUIT;
362 w->want_quit = 1;
363 tube_remove_bg_listen(w->ctx->qq_pipe);
364 tube_remove_bg_write(w->ctx->rr_pipe);
365 libworker_delete(w);
366 (void)tube_write_msg(ctx->rr_pipe, (uint8_t*)&m,
367 (uint32_t)sizeof(m), 0);
368 #ifdef THREADS_DISABLED
369 /* close pipes from forked process before exit */
370 tube_close_read(ctx->qq_pipe);
371 tube_close_write(ctx->rr_pipe);
372 #endif
373 return NULL;
374 }
375
libworker_bg(struct ub_ctx * ctx)376 int libworker_bg(struct ub_ctx* ctx)
377 {
378 struct libworker* w;
379 /* fork or threadcreate */
380 lock_basic_lock(&ctx->cfglock);
381 if(ctx->dothread) {
382 lock_basic_unlock(&ctx->cfglock);
383 w = libworker_setup(ctx, 1, NULL);
384 if(!w) return UB_NOMEM;
385 w->is_bg_thread = 1;
386 ctx->thread_worker = w;
387 #ifdef ENABLE_LOCK_CHECKS
388 w->thread_num = 1; /* for nicer DEBUG checklocks */
389 #endif
390 ub_thread_create(&ctx->bg_tid, libworker_dobg, w);
391 } else {
392 lock_basic_unlock(&ctx->cfglock);
393 #ifndef HAVE_FORK
394 /* no fork on windows */
395 return UB_FORKFAIL;
396 #else /* HAVE_FORK */
397 switch((ctx->bg_pid=fork())) {
398 case 0:
399 w = libworker_setup(ctx, 1, NULL);
400 if(!w) fatal_exit("out of memory");
401 /* close non-used parts of the pipes */
402 tube_close_write(ctx->qq_pipe);
403 tube_close_read(ctx->rr_pipe);
404 (void)libworker_dobg(w);
405 exit(0);
406 break;
407 case -1:
408 return UB_FORKFAIL;
409 default:
410 /* close non-used parts, so that the worker
411 * bgprocess gets 'pipe closed' when the
412 * main process exits */
413 tube_close_read(ctx->qq_pipe);
414 tube_close_write(ctx->rr_pipe);
415 break;
416 }
417 #endif /* HAVE_FORK */
418 }
419 return UB_NOERROR;
420 }
421
422 /** insert canonname */
423 static int
fill_canon(struct ub_result * res,uint8_t * s)424 fill_canon(struct ub_result* res, uint8_t* s)
425 {
426 char buf[255+2];
427 dname_str(s, buf);
428 res->canonname = strdup(buf);
429 return res->canonname != 0;
430 }
431
432 /** fill data into result */
433 static int
fill_res(struct ub_result * res,struct ub_packed_rrset_key * answer,uint8_t * finalcname,struct query_info * rq,struct reply_info * rep)434 fill_res(struct ub_result* res, struct ub_packed_rrset_key* answer,
435 uint8_t* finalcname, struct query_info* rq, struct reply_info* rep)
436 {
437 size_t i;
438 struct packed_rrset_data* data;
439 res->ttl = 0;
440 if(!answer) {
441 if(finalcname) {
442 if(!fill_canon(res, finalcname))
443 return 0; /* out of memory */
444 }
445 if(rep->rrset_count != 0)
446 res->ttl = (int)rep->ttl;
447 res->data = (char**)calloc(1, sizeof(char*));
448 if(!res->data)
449 return 0; /* out of memory */
450 res->len = (int*)calloc(1, sizeof(int));
451 if(!res->len) {
452 free(res->data);
453 res->data = NULL;
454 return 0; /* out of memory */
455 }
456 return 1;
457 }
458 data = (struct packed_rrset_data*)answer->entry.data;
459 if(query_dname_compare(rq->qname, answer->rk.dname) != 0) {
460 if(!fill_canon(res, answer->rk.dname))
461 return 0; /* out of memory */
462 } else res->canonname = NULL;
463 res->data = (char**)calloc(data->count+1, sizeof(char*));
464 if(!res->data)
465 return 0; /* out of memory */
466 res->len = (int*)calloc(data->count+1, sizeof(int));
467 if(!res->len) {
468 free(res->data);
469 res->data = NULL;
470 return 0; /* out of memory */
471 }
472 for(i=0; i<data->count; i++) {
473 /* remove rdlength from rdata */
474 res->len[i] = (int)(data->rr_len[i] - 2);
475 res->data[i] = memdup(data->rr_data[i]+2, (size_t)res->len[i]);
476 if(!res->data[i]) {
477 size_t j;
478 for(j=0; j<i; j++) {
479 free(res->data[j]);
480 res->data[j] = NULL;
481 }
482 free(res->data);
483 res->data = NULL;
484 free(res->len);
485 res->len = NULL;
486 return 0; /* out of memory */
487 }
488 }
489 /* ttl for positive answers, from CNAME and answer RRs */
490 if(data->count != 0) {
491 size_t j;
492 res->ttl = (int)data->ttl;
493 for(j=0; j<rep->an_numrrsets; j++) {
494 struct packed_rrset_data* d =
495 (struct packed_rrset_data*)rep->rrsets[j]->
496 entry.data;
497 if((int)d->ttl < res->ttl)
498 res->ttl = (int)d->ttl;
499 }
500 }
501 /* ttl for negative answers */
502 if(data->count == 0 && rep->rrset_count != 0)
503 res->ttl = (int)rep->ttl;
504 res->data[data->count] = NULL;
505 res->len[data->count] = 0;
506 return 1;
507 }
508
509 /** fill result from parsed message, on error fills servfail */
510 void
libworker_enter_result(struct ub_result * res,sldns_buffer * buf,struct regional * temp,enum sec_status msg_security)511 libworker_enter_result(struct ub_result* res, sldns_buffer* buf,
512 struct regional* temp, enum sec_status msg_security)
513 {
514 struct query_info rq;
515 struct reply_info* rep;
516 res->rcode = LDNS_RCODE_SERVFAIL;
517 rep = parse_reply_in_temp_region(buf, temp, &rq);
518 if(!rep) {
519 log_err("cannot parse buf");
520 return; /* error parsing buf, or out of memory */
521 }
522 if(!fill_res(res, reply_find_answer_rrset(&rq, rep),
523 reply_find_final_cname_target(&rq, rep), &rq, rep))
524 return; /* out of memory */
525 /* rcode, havedata, nxdomain, secure, bogus */
526 res->rcode = (int)FLAGS_GET_RCODE(rep->flags);
527 if(res->data && res->data[0])
528 res->havedata = 1;
529 if(res->rcode == LDNS_RCODE_NXDOMAIN)
530 res->nxdomain = 1;
531 if(msg_security == sec_status_secure)
532 res->secure = 1;
533 if(msg_security == sec_status_bogus ||
534 msg_security == sec_status_secure_sentinel_fail)
535 res->bogus = 1;
536 }
537
538 /** fillup fg results */
539 static void
libworker_fillup_fg(struct ctx_query * q,int rcode,sldns_buffer * buf,enum sec_status s,char * why_bogus,int was_ratelimited)540 libworker_fillup_fg(struct ctx_query* q, int rcode, sldns_buffer* buf,
541 enum sec_status s, char* why_bogus, int was_ratelimited)
542 {
543 q->res->was_ratelimited = was_ratelimited;
544 if(why_bogus)
545 q->res->why_bogus = strdup(why_bogus);
546 if(rcode != 0) {
547 q->res->rcode = rcode;
548 q->msg_security = s;
549 return;
550 }
551
552 q->res->rcode = LDNS_RCODE_SERVFAIL;
553 q->msg_security = sec_status_unchecked;
554 q->msg = memdup(sldns_buffer_begin(buf), sldns_buffer_limit(buf));
555 q->msg_len = sldns_buffer_limit(buf);
556 if(!q->msg) {
557 return; /* the error is in the rcode */
558 }
559
560 /* canonname and results */
561 q->msg_security = s;
562 libworker_enter_result(q->res, buf, q->w->env->scratch, s);
563 }
564
565 void
libworker_fg_done_cb(void * arg,int rcode,sldns_buffer * buf,enum sec_status s,char * why_bogus,int was_ratelimited)566 libworker_fg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
567 char* why_bogus, int was_ratelimited)
568 {
569 struct ctx_query* q = (struct ctx_query*)arg;
570 /* fg query is done; exit comm base */
571 comm_base_exit(q->w->base);
572
573 libworker_fillup_fg(q, rcode, buf, s, why_bogus, was_ratelimited);
574 }
575
576 /** setup qinfo and edns */
577 static int
setup_qinfo_edns(struct libworker * w,struct ctx_query * q,struct query_info * qinfo,struct edns_data * edns)578 setup_qinfo_edns(struct libworker* w, struct ctx_query* q,
579 struct query_info* qinfo, struct edns_data* edns)
580 {
581 qinfo->qtype = (uint16_t)q->res->qtype;
582 qinfo->qclass = (uint16_t)q->res->qclass;
583 qinfo->local_alias = NULL;
584 qinfo->qname = sldns_str2wire_dname(q->res->qname, &qinfo->qname_len);
585 if(!qinfo->qname) {
586 return 0;
587 }
588 edns->edns_present = 1;
589 edns->ext_rcode = 0;
590 edns->edns_version = 0;
591 edns->bits = EDNS_DO;
592 edns->opt_list_in = NULL;
593 edns->opt_list_out = NULL;
594 edns->opt_list_inplace_cb_out = NULL;
595 edns->padding_block_size = 0;
596 edns->cookie_present = 0;
597 edns->cookie_valid = 0;
598 if(sldns_buffer_capacity(w->back->udp_buff) < 65535)
599 edns->udp_size = (uint16_t)sldns_buffer_capacity(
600 w->back->udp_buff);
601 else edns->udp_size = 65535;
602 return 1;
603 }
604
libworker_fg(struct ub_ctx * ctx,struct ctx_query * q)605 int libworker_fg(struct ub_ctx* ctx, struct ctx_query* q)
606 {
607 struct libworker* w = libworker_setup(ctx, 0, NULL);
608 uint16_t qflags, qid;
609 struct query_info qinfo;
610 struct edns_data edns;
611 if(!w)
612 return UB_INITFAIL;
613 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
614 libworker_delete(w);
615 return UB_SYNTAX;
616 }
617 qid = 0;
618 qflags = BIT_RD;
619 q->w = w;
620 /* see if there is a fixed answer */
621 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
622 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
623 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
624 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
625 NULL, 0, NULL, 0, NULL)) {
626 regional_free_all(w->env->scratch);
627 libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
628 w->back->udp_buff, sec_status_insecure, NULL, 0);
629 libworker_delete(w);
630 free(qinfo.qname);
631 return UB_NOERROR;
632 }
633 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
634 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
635 regional_free_all(w->env->scratch);
636 libworker_fillup_fg(q, LDNS_RCODE_NOERROR,
637 w->back->udp_buff, sec_status_insecure, NULL, 0);
638 libworker_delete(w);
639 free(qinfo.qname);
640 return UB_NOERROR;
641 }
642 /* process new query */
643 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
644 w->back->udp_buff, qid, libworker_fg_done_cb, q, 0)) {
645 free(qinfo.qname);
646 return UB_NOMEM;
647 }
648 free(qinfo.qname);
649
650 /* wait for reply */
651 comm_base_dispatch(w->base);
652
653 libworker_delete(w);
654 return UB_NOERROR;
655 }
656
657 void
libworker_event_done_cb(void * arg,int rcode,sldns_buffer * buf,enum sec_status s,char * why_bogus,int was_ratelimited)658 libworker_event_done_cb(void* arg, int rcode, sldns_buffer* buf,
659 enum sec_status s, char* why_bogus, int was_ratelimited)
660 {
661 struct ctx_query* q = (struct ctx_query*)arg;
662 ub_event_callback_type cb = q->cb_event;
663 void* cb_arg = q->cb_arg;
664 int cancelled = q->cancelled;
665
666 /* delete it now */
667 struct ub_ctx* ctx = q->w->ctx;
668 lock_basic_lock(&ctx->cfglock);
669 (void)rbtree_delete(&ctx->queries, q->node.key);
670 ctx->num_async--;
671 context_query_delete(q);
672 lock_basic_unlock(&ctx->cfglock);
673
674 if(!cancelled) {
675 /* call callback */
676 int sec = 0;
677 if(s == sec_status_bogus)
678 sec = 1;
679 else if(s == sec_status_secure)
680 sec = 2;
681 (*cb)(cb_arg, rcode, (buf?(void*)sldns_buffer_begin(buf):NULL),
682 (buf?(int)sldns_buffer_limit(buf):0), sec, why_bogus, was_ratelimited);
683 }
684 }
685
libworker_attach_mesh(struct ub_ctx * ctx,struct ctx_query * q,int * async_id)686 int libworker_attach_mesh(struct ub_ctx* ctx, struct ctx_query* q,
687 int* async_id)
688 {
689 struct libworker* w = ctx->event_worker;
690 uint16_t qflags, qid;
691 struct query_info qinfo;
692 struct edns_data edns;
693 if(!w)
694 return UB_INITFAIL;
695 if(!setup_qinfo_edns(w, q, &qinfo, &edns))
696 return UB_SYNTAX;
697 qid = 0;
698 qflags = BIT_RD;
699 q->w = w;
700 /* see if there is a fixed answer */
701 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
702 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
703 if(local_zones_answer(ctx->local_zones, w->env, &qinfo, &edns,
704 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
705 NULL, 0, NULL, 0, NULL)) {
706 regional_free_all(w->env->scratch);
707 free(qinfo.qname);
708 libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
709 w->back->udp_buff, sec_status_insecure, NULL, 0);
710 return UB_NOERROR;
711 }
712 if(ctx->env->auth_zones && auth_zones_answer(ctx->env->auth_zones,
713 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
714 regional_free_all(w->env->scratch);
715 free(qinfo.qname);
716 libworker_event_done_cb(q, LDNS_RCODE_NOERROR,
717 w->back->udp_buff, sec_status_insecure, NULL, 0);
718 return UB_NOERROR;
719 }
720 /* process new query */
721 if(async_id)
722 *async_id = q->querynum;
723 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
724 w->back->udp_buff, qid, libworker_event_done_cb, q, 0)) {
725 free(qinfo.qname);
726 return UB_NOMEM;
727 }
728 free(qinfo.qname);
729 return UB_NOERROR;
730 }
731
732 /** add result to the bg worker result queue */
733 static void
add_bg_result(struct libworker * w,struct ctx_query * q,sldns_buffer * pkt,int err,char * reason,int was_ratelimited)734 add_bg_result(struct libworker* w, struct ctx_query* q, sldns_buffer* pkt,
735 int err, char* reason, int was_ratelimited)
736 {
737 uint8_t* msg = NULL;
738 uint32_t len = 0;
739
740 if(w->want_quit) {
741 context_query_delete(q);
742 return;
743 }
744 /* serialize and delete unneeded q */
745 if(w->is_bg_thread) {
746 lock_basic_lock(&w->ctx->cfglock);
747 if(reason)
748 q->res->why_bogus = strdup(reason);
749 q->res->was_ratelimited = was_ratelimited;
750 if(pkt) {
751 q->msg_len = sldns_buffer_remaining(pkt);
752 q->msg = memdup(sldns_buffer_begin(pkt), q->msg_len);
753 if(!q->msg) {
754 msg = context_serialize_answer(q, UB_NOMEM, NULL, &len);
755 } else {
756 msg = context_serialize_answer(q, err, NULL, &len);
757 }
758 } else {
759 msg = context_serialize_answer(q, err, NULL, &len);
760 }
761 lock_basic_unlock(&w->ctx->cfglock);
762 } else {
763 if(reason)
764 q->res->why_bogus = strdup(reason);
765 q->res->was_ratelimited = was_ratelimited;
766 msg = context_serialize_answer(q, err, pkt, &len);
767 (void)rbtree_delete(&w->ctx->queries, q->node.key);
768 w->ctx->num_async--;
769 context_query_delete(q);
770 }
771
772 if(!msg) {
773 log_err("out of memory for async answer");
774 return;
775 }
776 if(!tube_queue_item(w->ctx->rr_pipe, msg, len)) {
777 log_err("out of memory for async answer");
778 return;
779 }
780 }
781
782 void
libworker_bg_done_cb(void * arg,int rcode,sldns_buffer * buf,enum sec_status s,char * why_bogus,int was_ratelimited)783 libworker_bg_done_cb(void* arg, int rcode, sldns_buffer* buf, enum sec_status s,
784 char* why_bogus, int was_ratelimited)
785 {
786 struct ctx_query* q = (struct ctx_query*)arg;
787
788 if(q->cancelled || q->w->back->want_to_quit) {
789 if(q->w->is_bg_thread) {
790 /* delete it now */
791 struct ub_ctx* ctx = q->w->ctx;
792 lock_basic_lock(&ctx->cfglock);
793 (void)rbtree_delete(&ctx->queries, q->node.key);
794 ctx->num_async--;
795 context_query_delete(q);
796 lock_basic_unlock(&ctx->cfglock);
797 }
798 /* cancelled, do not give answer */
799 return;
800 }
801 q->msg_security = s;
802 if(!buf) {
803 buf = q->w->env->scratch_buffer;
804 }
805 if(rcode != 0) {
806 error_encode(buf, rcode, NULL, 0, BIT_RD, NULL);
807 }
808 add_bg_result(q->w, q, buf, UB_NOERROR, why_bogus, was_ratelimited);
809 }
810
811
812 /** handle new query command for bg worker */
813 static void
handle_newq(struct libworker * w,uint8_t * buf,uint32_t len)814 handle_newq(struct libworker* w, uint8_t* buf, uint32_t len)
815 {
816 uint16_t qflags, qid;
817 struct query_info qinfo;
818 struct edns_data edns;
819 struct ctx_query* q;
820 if(w->is_bg_thread) {
821 lock_basic_lock(&w->ctx->cfglock);
822 q = context_lookup_new_query(w->ctx, buf, len);
823 lock_basic_unlock(&w->ctx->cfglock);
824 } else {
825 q = context_deserialize_new_query(w->ctx, buf, len);
826 }
827 free(buf);
828 if(!q) {
829 log_err("failed to deserialize newq");
830 return;
831 }
832 if(!setup_qinfo_edns(w, q, &qinfo, &edns)) {
833 add_bg_result(w, q, NULL, UB_SYNTAX, NULL, 0);
834 return;
835 }
836 qid = 0;
837 qflags = BIT_RD;
838 /* see if there is a fixed answer */
839 sldns_buffer_write_u16_at(w->back->udp_buff, 0, qid);
840 sldns_buffer_write_u16_at(w->back->udp_buff, 2, qflags);
841 if(local_zones_answer(w->ctx->local_zones, w->env, &qinfo, &edns,
842 w->back->udp_buff, w->env->scratch, NULL, NULL, 0, NULL, 0,
843 NULL, 0, NULL, 0, NULL)) {
844 regional_free_all(w->env->scratch);
845 q->msg_security = sec_status_insecure;
846 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
847 free(qinfo.qname);
848 return;
849 }
850 if(w->ctx->env->auth_zones && auth_zones_answer(w->ctx->env->auth_zones,
851 w->env, &qinfo, &edns, NULL, w->back->udp_buff, w->env->scratch)) {
852 regional_free_all(w->env->scratch);
853 q->msg_security = sec_status_insecure;
854 add_bg_result(w, q, w->back->udp_buff, UB_NOERROR, NULL, 0);
855 free(qinfo.qname);
856 return;
857 }
858 q->w = w;
859 /* process new query */
860 if(!mesh_new_callback(w->env->mesh, &qinfo, qflags, &edns,
861 w->back->udp_buff, qid, libworker_bg_done_cb, q, 0)) {
862 add_bg_result(w, q, NULL, UB_NOMEM, NULL, 0);
863 }
864 free(qinfo.qname);
865 }
866
libworker_alloc_cleanup(void * arg)867 void libworker_alloc_cleanup(void* arg)
868 {
869 struct libworker* w = (struct libworker*)arg;
870 slabhash_clear(&w->env->rrset_cache->table);
871 slabhash_clear(w->env->msg_cache);
872 }
873
libworker_send_query(struct query_info * qinfo,uint16_t flags,int dnssec,int want_dnssec,int nocaps,int check_ratelimit,struct sockaddr_storage * addr,socklen_t addrlen,uint8_t * zone,size_t zonelen,int tcp_upstream,int ssl_upstream,char * tls_auth_name,struct module_qstate * q,int * was_ratelimited)874 struct outbound_entry* libworker_send_query(struct query_info* qinfo,
875 uint16_t flags, int dnssec, int want_dnssec, int nocaps,
876 int check_ratelimit,
877 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
878 size_t zonelen, int tcp_upstream, int ssl_upstream, char* tls_auth_name,
879 struct module_qstate* q, int* was_ratelimited)
880 {
881 struct libworker* w = (struct libworker*)q->env->worker;
882 struct outbound_entry* e = (struct outbound_entry*)regional_alloc(
883 q->region, sizeof(*e));
884 if(!e)
885 return NULL;
886 e->qstate = q;
887 e->qsent = outnet_serviced_query(w->back, qinfo, flags, dnssec,
888 want_dnssec, nocaps, check_ratelimit, tcp_upstream, ssl_upstream,
889 tls_auth_name, addr, addrlen, zone, zonelen, q,
890 libworker_handle_service_reply, e, w->back->udp_buff, q->env,
891 was_ratelimited);
892 if(!e->qsent) {
893 return NULL;
894 }
895 return e;
896 }
897
898 int
libworker_handle_service_reply(struct comm_point * c,void * arg,int error,struct comm_reply * reply_info)899 libworker_handle_service_reply(struct comm_point* c, void* arg, int error,
900 struct comm_reply* reply_info)
901 {
902 struct outbound_entry* e = (struct outbound_entry*)arg;
903 struct libworker* lw = (struct libworker*)e->qstate->env->worker;
904
905 if(error != 0) {
906 mesh_report_reply(lw->env->mesh, e, reply_info, error);
907 return 0;
908 }
909 /* sanity check. */
910 if(!LDNS_QR_WIRE(sldns_buffer_begin(c->buffer))
911 || LDNS_OPCODE_WIRE(sldns_buffer_begin(c->buffer)) !=
912 LDNS_PACKET_QUERY
913 || LDNS_QDCOUNT(sldns_buffer_begin(c->buffer)) > 1) {
914 /* error becomes timeout for the module as if this reply
915 * never arrived. */
916 mesh_report_reply(lw->env->mesh, e, reply_info,
917 NETEVENT_TIMEOUT);
918 return 0;
919 }
920 mesh_report_reply(lw->env->mesh, e, reply_info, NETEVENT_NOERROR);
921 return 0;
922 }
923
924 /* --- fake callbacks for fptr_wlist to work --- */
worker_handle_control_cmd(struct tube * ATTR_UNUSED (tube),uint8_t * ATTR_UNUSED (buffer),size_t ATTR_UNUSED (len),int ATTR_UNUSED (error),void * ATTR_UNUSED (arg))925 void worker_handle_control_cmd(struct tube* ATTR_UNUSED(tube),
926 uint8_t* ATTR_UNUSED(buffer), size_t ATTR_UNUSED(len),
927 int ATTR_UNUSED(error), void* ATTR_UNUSED(arg))
928 {
929 log_assert(0);
930 }
931
worker_handle_request(struct comm_point * ATTR_UNUSED (c),void * ATTR_UNUSED (arg),int ATTR_UNUSED (error),struct comm_reply * ATTR_UNUSED (repinfo))932 int worker_handle_request(struct comm_point* ATTR_UNUSED(c),
933 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
934 struct comm_reply* ATTR_UNUSED(repinfo))
935 {
936 log_assert(0);
937 return 0;
938 }
939
worker_handle_service_reply(struct comm_point * ATTR_UNUSED (c),void * ATTR_UNUSED (arg),int ATTR_UNUSED (error),struct comm_reply * ATTR_UNUSED (reply_info))940 int worker_handle_service_reply(struct comm_point* ATTR_UNUSED(c),
941 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
942 struct comm_reply* ATTR_UNUSED(reply_info))
943 {
944 log_assert(0);
945 return 0;
946 }
947
remote_accept_callback(struct comm_point * ATTR_UNUSED (c),void * ATTR_UNUSED (arg),int ATTR_UNUSED (error),struct comm_reply * ATTR_UNUSED (repinfo))948 int remote_accept_callback(struct comm_point* ATTR_UNUSED(c),
949 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
950 struct comm_reply* ATTR_UNUSED(repinfo))
951 {
952 log_assert(0);
953 return 0;
954 }
955
remote_control_callback(struct comm_point * ATTR_UNUSED (c),void * ATTR_UNUSED (arg),int ATTR_UNUSED (error),struct comm_reply * ATTR_UNUSED (repinfo))956 int remote_control_callback(struct comm_point* ATTR_UNUSED(c),
957 void* ATTR_UNUSED(arg), int ATTR_UNUSED(error),
958 struct comm_reply* ATTR_UNUSED(repinfo))
959 {
960 log_assert(0);
961 return 0;
962 }
963
worker_sighandler(int ATTR_UNUSED (sig),void * ATTR_UNUSED (arg))964 void worker_sighandler(int ATTR_UNUSED(sig), void* ATTR_UNUSED(arg))
965 {
966 log_assert(0);
967 }
968
worker_send_query(struct query_info * ATTR_UNUSED (qinfo),uint16_t ATTR_UNUSED (flags),int ATTR_UNUSED (dnssec),int ATTR_UNUSED (want_dnssec),int ATTR_UNUSED (nocaps),int ATTR_UNUSED (check_ratelimit),struct sockaddr_storage * ATTR_UNUSED (addr),socklen_t ATTR_UNUSED (addrlen),uint8_t * ATTR_UNUSED (zone),size_t ATTR_UNUSED (zonelen),int ATTR_UNUSED (tcp_upstream),int ATTR_UNUSED (ssl_upstream),char * ATTR_UNUSED (tls_auth_name),struct module_qstate * ATTR_UNUSED (q),int * ATTR_UNUSED (was_ratelimited))969 struct outbound_entry* worker_send_query(struct query_info* ATTR_UNUSED(qinfo),
970 uint16_t ATTR_UNUSED(flags), int ATTR_UNUSED(dnssec),
971 int ATTR_UNUSED(want_dnssec), int ATTR_UNUSED(nocaps),
972 int ATTR_UNUSED(check_ratelimit),
973 struct sockaddr_storage* ATTR_UNUSED(addr), socklen_t ATTR_UNUSED(addrlen),
974 uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen), int ATTR_UNUSED(tcp_upstream),
975 int ATTR_UNUSED(ssl_upstream), char* ATTR_UNUSED(tls_auth_name),
976 struct module_qstate* ATTR_UNUSED(q), int* ATTR_UNUSED(was_ratelimited))
977 {
978 log_assert(0);
979 return 0;
980 }
981
982 void
worker_alloc_cleanup(void * ATTR_UNUSED (arg))983 worker_alloc_cleanup(void* ATTR_UNUSED(arg))
984 {
985 log_assert(0);
986 }
987
worker_stat_timer_cb(void * ATTR_UNUSED (arg))988 void worker_stat_timer_cb(void* ATTR_UNUSED(arg))
989 {
990 log_assert(0);
991 }
992
worker_probe_timer_cb(void * ATTR_UNUSED (arg))993 void worker_probe_timer_cb(void* ATTR_UNUSED(arg))
994 {
995 log_assert(0);
996 }
997
worker_start_accept(void * ATTR_UNUSED (arg))998 void worker_start_accept(void* ATTR_UNUSED(arg))
999 {
1000 log_assert(0);
1001 }
1002
worker_stop_accept(void * ATTR_UNUSED (arg))1003 void worker_stop_accept(void* ATTR_UNUSED(arg))
1004 {
1005 log_assert(0);
1006 }
1007
order_lock_cmp(const void * ATTR_UNUSED (e1),const void * ATTR_UNUSED (e2))1008 int order_lock_cmp(const void* ATTR_UNUSED(e1), const void* ATTR_UNUSED(e2))
1009 {
1010 log_assert(0);
1011 return 0;
1012 }
1013
1014 int
codeline_cmp(const void * ATTR_UNUSED (a),const void * ATTR_UNUSED (b))1015 codeline_cmp(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1016 {
1017 log_assert(0);
1018 return 0;
1019 }
1020
replay_var_compare(const void * ATTR_UNUSED (a),const void * ATTR_UNUSED (b))1021 int replay_var_compare(const void* ATTR_UNUSED(a), const void* ATTR_UNUSED(b))
1022 {
1023 log_assert(0);
1024 return 0;
1025 }
1026
remote_get_opt_ssl(char * ATTR_UNUSED (str),void * ATTR_UNUSED (arg))1027 void remote_get_opt_ssl(char* ATTR_UNUSED(str), void* ATTR_UNUSED(arg))
1028 {
1029 log_assert(0);
1030 }
1031
1032 #ifdef UB_ON_WINDOWS
1033 void
worker_win_stop_cb(int ATTR_UNUSED (fd),short ATTR_UNUSED (ev),void * ATTR_UNUSED (arg))1034 worker_win_stop_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev), void*
1035 ATTR_UNUSED(arg)) {
1036 log_assert(0);
1037 }
1038
1039 void
wsvc_cron_cb(void * ATTR_UNUSED (arg))1040 wsvc_cron_cb(void* ATTR_UNUSED(arg))
1041 {
1042 log_assert(0);
1043 }
1044 #endif /* UB_ON_WINDOWS */
1045
1046 #ifdef USE_DNSTAP
dtio_tap_callback(int ATTR_UNUSED (fd),short ATTR_UNUSED (ev),void * ATTR_UNUSED (arg))1047 void dtio_tap_callback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1048 void* ATTR_UNUSED(arg))
1049 {
1050 log_assert(0);
1051 }
1052 #endif
1053
1054 #ifdef USE_DNSTAP
dtio_mainfdcallback(int ATTR_UNUSED (fd),short ATTR_UNUSED (ev),void * ATTR_UNUSED (arg))1055 void dtio_mainfdcallback(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1056 void* ATTR_UNUSED(arg))
1057 {
1058 log_assert(0);
1059 }
1060 #endif
1061
1062 #ifdef HAVE_NGTCP2
doq_client_event_cb(int ATTR_UNUSED (fd),short ATTR_UNUSED (ev),void * ATTR_UNUSED (arg))1063 void doq_client_event_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1064 void* ATTR_UNUSED(arg))
1065 {
1066 log_assert(0);
1067 }
1068 #endif
1069
1070 #ifdef HAVE_NGTCP2
doq_client_timer_cb(int ATTR_UNUSED (fd),short ATTR_UNUSED (ev),void * ATTR_UNUSED (arg))1071 void doq_client_timer_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(ev),
1072 void* ATTR_UNUSED(arg))
1073 {
1074 log_assert(0);
1075 }
1076 #endif
1077