1 /* 2 * services/authzone.c - authoritative zone that is locally hosted. 3 * 4 * Copyright (c) 2017, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains the functions for an authority zone. This zone 40 * is queried by the iterator, just like a stub or forward zone, but then 41 * the data is locally held. 42 */ 43 44 #include "config.h" 45 #include "services/authzone.h" 46 #include "util/data/dname.h" 47 #include "util/data/msgparse.h" 48 #include "util/data/msgreply.h" 49 #include "util/data/msgencode.h" 50 #include "util/data/packed_rrset.h" 51 #include "util/regional.h" 52 #include "util/net_help.h" 53 #include "util/netevent.h" 54 #include "util/config_file.h" 55 #include "util/log.h" 56 #include "util/module.h" 57 #include "util/random.h" 58 #include "services/cache/dns.h" 59 #include "services/outside_network.h" 60 #include "services/listen_dnsport.h" 61 #include "services/mesh.h" 62 #include "sldns/rrdef.h" 63 #include "sldns/pkthdr.h" 64 #include "sldns/sbuffer.h" 65 #include "sldns/str2wire.h" 66 #include "sldns/wire2str.h" 67 #include "sldns/parseutil.h" 68 #include "sldns/keyraw.h" 69 #include "validator/val_nsec3.h" 70 #include "validator/val_secalgo.h" 71 #include <ctype.h> 72 73 /** bytes to use for NSEC3 hash buffer. 20 for sha1 */ 74 #define N3HASHBUFLEN 32 75 /** max number of CNAMEs we are willing to follow (in one answer) */ 76 #define MAX_CNAME_CHAIN 8 77 /** timeout for probe packets for SOA */ 78 #define AUTH_PROBE_TIMEOUT 100 /* msec */ 79 /** when to stop with SOA probes (when exponential timeouts exceed this) */ 80 #define AUTH_PROBE_TIMEOUT_STOP 1000 /* msec */ 81 /* auth transfer timeout for TCP connections, in msec */ 82 #define AUTH_TRANSFER_TIMEOUT 10000 /* msec */ 83 /* auth transfer max backoff for failed tranfers and probes */ 84 #define AUTH_TRANSFER_MAX_BACKOFF 86400 /* sec */ 85 /* auth http port number */ 86 #define AUTH_HTTP_PORT 80 87 /* auth https port number */ 88 #define AUTH_HTTPS_PORT 443 89 /* max depth for nested $INCLUDEs */ 90 #define MAX_INCLUDE_DEPTH 10 91 92 /** pick up nextprobe task to start waiting to perform transfer actions */ 93 static void xfr_set_timeout(struct auth_xfer* xfr, struct module_env* env, 94 int failure, int lookup_only); 95 /** move to sending the probe packets, next if fails. task_probe */ 96 static void xfr_probe_send_or_end(struct auth_xfer* xfr, 97 struct module_env* env); 98 /** pick up probe task with specified(or NULL) destination first, 99 * or transfer task if nothing to probe, or false if already in progress */ 100 static int xfr_start_probe(struct auth_xfer* xfr, struct module_env* env, 101 struct auth_master* spec); 102 /** delete xfer structure (not its tree entry) */ 103 static void auth_xfer_delete(struct auth_xfer* xfr); 104 105 /** create new dns_msg */ 106 static struct dns_msg* 107 msg_create(struct regional* region, struct query_info* qinfo) 108 { 109 struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, 110 sizeof(struct dns_msg)); 111 if(!msg) 112 return NULL; 113 msg->qinfo.qname = regional_alloc_init(region, qinfo->qname, 114 qinfo->qname_len); 115 if(!msg->qinfo.qname) 116 return NULL; 117 msg->qinfo.qname_len = qinfo->qname_len; 118 msg->qinfo.qtype = qinfo->qtype; 119 msg->qinfo.qclass = qinfo->qclass; 120 msg->qinfo.local_alias = NULL; 121 /* non-packed reply_info, because it needs to grow the array */ 122 msg->rep = (struct reply_info*)regional_alloc_zero(region, 123 sizeof(struct reply_info)-sizeof(struct rrset_ref)); 124 if(!msg->rep) 125 return NULL; 126 msg->rep->flags = (uint16_t)(BIT_QR | BIT_AA); 127 msg->rep->authoritative = 1; 128 msg->rep->qdcount = 1; 129 /* rrsets is NULL, no rrsets yet */ 130 return msg; 131 } 132 133 /** grow rrset array by one in msg */ 134 static int 135 msg_grow_array(struct regional* region, struct dns_msg* msg) 136 { 137 if(msg->rep->rrsets == NULL) { 138 msg->rep->rrsets = regional_alloc_zero(region, 139 sizeof(struct ub_packed_rrset_key*)*(msg->rep->rrset_count+1)); 140 if(!msg->rep->rrsets) 141 return 0; 142 } else { 143 struct ub_packed_rrset_key** rrsets_old = msg->rep->rrsets; 144 msg->rep->rrsets = regional_alloc_zero(region, 145 sizeof(struct ub_packed_rrset_key*)*(msg->rep->rrset_count+1)); 146 if(!msg->rep->rrsets) 147 return 0; 148 memmove(msg->rep->rrsets, rrsets_old, 149 sizeof(struct ub_packed_rrset_key*)*msg->rep->rrset_count); 150 } 151 return 1; 152 } 153 154 /** get ttl of rrset */ 155 static time_t 156 get_rrset_ttl(struct ub_packed_rrset_key* k) 157 { 158 struct packed_rrset_data* d = (struct packed_rrset_data*) 159 k->entry.data; 160 return d->ttl; 161 } 162 163 /** Copy rrset into region from domain-datanode and packet rrset */ 164 static struct ub_packed_rrset_key* 165 auth_packed_rrset_copy_region(struct auth_zone* z, struct auth_data* node, 166 struct auth_rrset* rrset, struct regional* region, time_t adjust) 167 { 168 struct ub_packed_rrset_key key; 169 memset(&key, 0, sizeof(key)); 170 key.entry.key = &key; 171 key.entry.data = rrset->data; 172 key.rk.dname = node->name; 173 key.rk.dname_len = node->namelen; 174 key.rk.type = htons(rrset->type); 175 key.rk.rrset_class = htons(z->dclass); 176 key.entry.hash = rrset_key_hash(&key.rk); 177 return packed_rrset_copy_region(&key, region, adjust); 178 } 179 180 /** fix up msg->rep TTL and prefetch ttl */ 181 static void 182 msg_ttl(struct dns_msg* msg) 183 { 184 if(msg->rep->rrset_count == 0) return; 185 if(msg->rep->rrset_count == 1) { 186 msg->rep->ttl = get_rrset_ttl(msg->rep->rrsets[0]); 187 msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); 188 } else if(get_rrset_ttl(msg->rep->rrsets[msg->rep->rrset_count-1]) < 189 msg->rep->ttl) { 190 msg->rep->ttl = get_rrset_ttl(msg->rep->rrsets[ 191 msg->rep->rrset_count-1]); 192 msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); 193 } 194 } 195 196 /** see if rrset is a duplicate in the answer message */ 197 static int 198 msg_rrset_duplicate(struct dns_msg* msg, uint8_t* nm, size_t nmlen, 199 uint16_t type, uint16_t dclass) 200 { 201 size_t i; 202 for(i=0; i<msg->rep->rrset_count; i++) { 203 struct ub_packed_rrset_key* k = msg->rep->rrsets[i]; 204 if(ntohs(k->rk.type) == type && k->rk.dname_len == nmlen && 205 ntohs(k->rk.rrset_class) == dclass && 206 query_dname_compare(k->rk.dname, nm) == 0) 207 return 1; 208 } 209 return 0; 210 } 211 212 /** add rrset to answer section (no auth, add rrsets yet) */ 213 static int 214 msg_add_rrset_an(struct auth_zone* z, struct regional* region, 215 struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) 216 { 217 log_assert(msg->rep->ns_numrrsets == 0); 218 log_assert(msg->rep->ar_numrrsets == 0); 219 if(!rrset) 220 return 1; 221 if(msg_rrset_duplicate(msg, node->name, node->namelen, rrset->type, 222 z->dclass)) 223 return 1; 224 /* grow array */ 225 if(!msg_grow_array(region, msg)) 226 return 0; 227 /* copy it */ 228 if(!(msg->rep->rrsets[msg->rep->rrset_count] = 229 auth_packed_rrset_copy_region(z, node, rrset, region, 0))) 230 return 0; 231 msg->rep->rrset_count++; 232 msg->rep->an_numrrsets++; 233 msg_ttl(msg); 234 return 1; 235 } 236 237 /** add rrset to authority section (no additonal section rrsets yet) */ 238 static int 239 msg_add_rrset_ns(struct auth_zone* z, struct regional* region, 240 struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) 241 { 242 log_assert(msg->rep->ar_numrrsets == 0); 243 if(!rrset) 244 return 1; 245 if(msg_rrset_duplicate(msg, node->name, node->namelen, rrset->type, 246 z->dclass)) 247 return 1; 248 /* grow array */ 249 if(!msg_grow_array(region, msg)) 250 return 0; 251 /* copy it */ 252 if(!(msg->rep->rrsets[msg->rep->rrset_count] = 253 auth_packed_rrset_copy_region(z, node, rrset, region, 0))) 254 return 0; 255 msg->rep->rrset_count++; 256 msg->rep->ns_numrrsets++; 257 msg_ttl(msg); 258 return 1; 259 } 260 261 /** add rrset to additional section */ 262 static int 263 msg_add_rrset_ar(struct auth_zone* z, struct regional* region, 264 struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) 265 { 266 if(!rrset) 267 return 1; 268 if(msg_rrset_duplicate(msg, node->name, node->namelen, rrset->type, 269 z->dclass)) 270 return 1; 271 /* grow array */ 272 if(!msg_grow_array(region, msg)) 273 return 0; 274 /* copy it */ 275 if(!(msg->rep->rrsets[msg->rep->rrset_count] = 276 auth_packed_rrset_copy_region(z, node, rrset, region, 0))) 277 return 0; 278 msg->rep->rrset_count++; 279 msg->rep->ar_numrrsets++; 280 msg_ttl(msg); 281 return 1; 282 } 283 284 struct auth_zones* auth_zones_create(void) 285 { 286 struct auth_zones* az = (struct auth_zones*)calloc(1, sizeof(*az)); 287 if(!az) { 288 log_err("out of memory"); 289 return NULL; 290 } 291 rbtree_init(&az->ztree, &auth_zone_cmp); 292 rbtree_init(&az->xtree, &auth_xfer_cmp); 293 lock_rw_init(&az->lock); 294 lock_protect(&az->lock, &az->ztree, sizeof(az->ztree)); 295 lock_protect(&az->lock, &az->xtree, sizeof(az->xtree)); 296 /* also lock protects the rbnode's in struct auth_zone, auth_xfer */ 297 return az; 298 } 299 300 int auth_zone_cmp(const void* z1, const void* z2) 301 { 302 /* first sort on class, so that hierarchy can be maintained within 303 * a class */ 304 struct auth_zone* a = (struct auth_zone*)z1; 305 struct auth_zone* b = (struct auth_zone*)z2; 306 int m; 307 if(a->dclass != b->dclass) { 308 if(a->dclass < b->dclass) 309 return -1; 310 return 1; 311 } 312 /* sorted such that higher zones sort before lower zones (their 313 * contents) */ 314 return dname_lab_cmp(a->name, a->namelabs, b->name, b->namelabs, &m); 315 } 316 317 int auth_data_cmp(const void* z1, const void* z2) 318 { 319 struct auth_data* a = (struct auth_data*)z1; 320 struct auth_data* b = (struct auth_data*)z2; 321 int m; 322 /* canonical sort, because DNSSEC needs that */ 323 return dname_canon_lab_cmp(a->name, a->namelabs, b->name, 324 b->namelabs, &m); 325 } 326 327 int auth_xfer_cmp(const void* z1, const void* z2) 328 { 329 /* first sort on class, so that hierarchy can be maintained within 330 * a class */ 331 struct auth_xfer* a = (struct auth_xfer*)z1; 332 struct auth_xfer* b = (struct auth_xfer*)z2; 333 int m; 334 if(a->dclass != b->dclass) { 335 if(a->dclass < b->dclass) 336 return -1; 337 return 1; 338 } 339 /* sorted such that higher zones sort before lower zones (their 340 * contents) */ 341 return dname_lab_cmp(a->name, a->namelabs, b->name, b->namelabs, &m); 342 } 343 344 /** delete auth rrset node */ 345 static void 346 auth_rrset_delete(struct auth_rrset* rrset) 347 { 348 if(!rrset) return; 349 free(rrset->data); 350 free(rrset); 351 } 352 353 /** delete auth data domain node */ 354 static void 355 auth_data_delete(struct auth_data* n) 356 { 357 struct auth_rrset* p, *np; 358 if(!n) return; 359 p = n->rrsets; 360 while(p) { 361 np = p->next; 362 auth_rrset_delete(p); 363 p = np; 364 } 365 free(n->name); 366 free(n); 367 } 368 369 /** helper traverse to delete zones */ 370 static void 371 auth_data_del(rbnode_type* n, void* ATTR_UNUSED(arg)) 372 { 373 struct auth_data* z = (struct auth_data*)n->key; 374 auth_data_delete(z); 375 } 376 377 /** delete an auth zone structure (tree remove must be done elsewhere) */ 378 static void 379 auth_zone_delete(struct auth_zone* z) 380 { 381 if(!z) return; 382 lock_rw_destroy(&z->lock); 383 traverse_postorder(&z->data, auth_data_del, NULL); 384 free(z->name); 385 free(z->zonefile); 386 free(z); 387 } 388 389 struct auth_zone* 390 auth_zone_create(struct auth_zones* az, uint8_t* nm, size_t nmlen, 391 uint16_t dclass) 392 { 393 struct auth_zone* z = (struct auth_zone*)calloc(1, sizeof(*z)); 394 if(!z) { 395 return NULL; 396 } 397 z->node.key = z; 398 z->dclass = dclass; 399 z->namelen = nmlen; 400 z->namelabs = dname_count_labels(nm); 401 z->name = memdup(nm, nmlen); 402 if(!z->name) { 403 free(z); 404 return NULL; 405 } 406 rbtree_init(&z->data, &auth_data_cmp); 407 lock_rw_init(&z->lock); 408 lock_protect(&z->lock, &z->name, sizeof(*z)-sizeof(rbnode_type)); 409 lock_rw_wrlock(&z->lock); 410 /* z lock protects all, except rbtree itself, which is az->lock */ 411 if(!rbtree_insert(&az->ztree, &z->node)) { 412 lock_rw_unlock(&z->lock); 413 auth_zone_delete(z); 414 log_warn("duplicate auth zone"); 415 return NULL; 416 } 417 return z; 418 } 419 420 struct auth_zone* 421 auth_zone_find(struct auth_zones* az, uint8_t* nm, size_t nmlen, 422 uint16_t dclass) 423 { 424 struct auth_zone key; 425 key.node.key = &key; 426 key.dclass = dclass; 427 key.name = nm; 428 key.namelen = nmlen; 429 key.namelabs = dname_count_labels(nm); 430 return (struct auth_zone*)rbtree_search(&az->ztree, &key); 431 } 432 433 struct auth_xfer* 434 auth_xfer_find(struct auth_zones* az, uint8_t* nm, size_t nmlen, 435 uint16_t dclass) 436 { 437 struct auth_xfer key; 438 key.node.key = &key; 439 key.dclass = dclass; 440 key.name = nm; 441 key.namelen = nmlen; 442 key.namelabs = dname_count_labels(nm); 443 return (struct auth_xfer*)rbtree_search(&az->xtree, &key); 444 } 445 446 /** find an auth zone or sorted less-or-equal, return true if exact */ 447 static int 448 auth_zone_find_less_equal(struct auth_zones* az, uint8_t* nm, size_t nmlen, 449 uint16_t dclass, struct auth_zone** z) 450 { 451 struct auth_zone key; 452 key.node.key = &key; 453 key.dclass = dclass; 454 key.name = nm; 455 key.namelen = nmlen; 456 key.namelabs = dname_count_labels(nm); 457 return rbtree_find_less_equal(&az->ztree, &key, (rbnode_type**)z); 458 } 459 460 461 /** find the auth zone that is above the given name */ 462 struct auth_zone* 463 auth_zones_find_zone(struct auth_zones* az, uint8_t* name, size_t name_len, 464 uint16_t dclass) 465 { 466 uint8_t* nm = name; 467 size_t nmlen = name_len; 468 struct auth_zone* z; 469 if(auth_zone_find_less_equal(az, nm, nmlen, dclass, &z)) { 470 /* exact match */ 471 return z; 472 } else { 473 /* less-or-nothing */ 474 if(!z) return NULL; /* nothing smaller, nothing above it */ 475 /* we found smaller name; smaller may be above the name, 476 * but not below it. */ 477 nm = dname_get_shared_topdomain(z->name, name); 478 dname_count_size_labels(nm, &nmlen); 479 z = NULL; 480 } 481 482 /* search up */ 483 while(!z) { 484 z = auth_zone_find(az, nm, nmlen, dclass); 485 if(z) return z; 486 if(dname_is_root(nm)) break; 487 dname_remove_label(&nm, &nmlen); 488 } 489 return NULL; 490 } 491 492 /** find or create zone with name str. caller must have lock on az. 493 * returns a wrlocked zone */ 494 static struct auth_zone* 495 auth_zones_find_or_add_zone(struct auth_zones* az, char* name) 496 { 497 uint8_t nm[LDNS_MAX_DOMAINLEN+1]; 498 size_t nmlen = sizeof(nm); 499 struct auth_zone* z; 500 501 if(sldns_str2wire_dname_buf(name, nm, &nmlen) != 0) { 502 log_err("cannot parse auth zone name: %s", name); 503 return 0; 504 } 505 z = auth_zone_find(az, nm, nmlen, LDNS_RR_CLASS_IN); 506 if(!z) { 507 /* not found, create the zone */ 508 z = auth_zone_create(az, nm, nmlen, LDNS_RR_CLASS_IN); 509 } else { 510 lock_rw_wrlock(&z->lock); 511 } 512 return z; 513 } 514 515 /** find or create xfer zone with name str. caller must have lock on az. 516 * returns a locked xfer */ 517 static struct auth_xfer* 518 auth_zones_find_or_add_xfer(struct auth_zones* az, struct auth_zone* z) 519 { 520 struct auth_xfer* x; 521 x = auth_xfer_find(az, z->name, z->namelen, z->dclass); 522 if(!x) { 523 /* not found, create the zone */ 524 x = auth_xfer_create(az, z); 525 } else { 526 lock_basic_lock(&x->lock); 527 } 528 return x; 529 } 530 531 int 532 auth_zone_set_zonefile(struct auth_zone* z, char* zonefile) 533 { 534 if(z->zonefile) free(z->zonefile); 535 if(zonefile == NULL) { 536 z->zonefile = NULL; 537 } else { 538 z->zonefile = strdup(zonefile); 539 if(!z->zonefile) { 540 log_err("malloc failure"); 541 return 0; 542 } 543 } 544 return 1; 545 } 546 547 /** set auth zone fallback. caller must have lock on zone */ 548 int 549 auth_zone_set_fallback(struct auth_zone* z, char* fallbackstr) 550 { 551 if(strcmp(fallbackstr, "yes") != 0 && strcmp(fallbackstr, "no") != 0){ 552 log_err("auth zone fallback, expected yes or no, got %s", 553 fallbackstr); 554 return 0; 555 } 556 z->fallback_enabled = (strcmp(fallbackstr, "yes")==0); 557 return 1; 558 } 559 560 /** create domain with the given name */ 561 static struct auth_data* 562 az_domain_create(struct auth_zone* z, uint8_t* nm, size_t nmlen) 563 { 564 struct auth_data* n = (struct auth_data*)malloc(sizeof(*n)); 565 if(!n) return NULL; 566 memset(n, 0, sizeof(*n)); 567 n->node.key = n; 568 n->name = memdup(nm, nmlen); 569 if(!n->name) { 570 free(n); 571 return NULL; 572 } 573 n->namelen = nmlen; 574 n->namelabs = dname_count_labels(nm); 575 if(!rbtree_insert(&z->data, &n->node)) { 576 log_warn("duplicate auth domain name"); 577 free(n->name); 578 free(n); 579 return NULL; 580 } 581 return n; 582 } 583 584 /** find domain with exactly the given name */ 585 static struct auth_data* 586 az_find_name(struct auth_zone* z, uint8_t* nm, size_t nmlen) 587 { 588 struct auth_zone key; 589 key.node.key = &key; 590 key.name = nm; 591 key.namelen = nmlen; 592 key.namelabs = dname_count_labels(nm); 593 return (struct auth_data*)rbtree_search(&z->data, &key); 594 } 595 596 /** Find domain name (or closest match) */ 597 static void 598 az_find_domain(struct auth_zone* z, struct query_info* qinfo, int* node_exact, 599 struct auth_data** node) 600 { 601 struct auth_zone key; 602 key.node.key = &key; 603 key.name = qinfo->qname; 604 key.namelen = qinfo->qname_len; 605 key.namelabs = dname_count_labels(key.name); 606 *node_exact = rbtree_find_less_equal(&z->data, &key, 607 (rbnode_type**)node); 608 } 609 610 /** find or create domain with name in zone */ 611 static struct auth_data* 612 az_domain_find_or_create(struct auth_zone* z, uint8_t* dname, 613 size_t dname_len) 614 { 615 struct auth_data* n = az_find_name(z, dname, dname_len); 616 if(!n) { 617 n = az_domain_create(z, dname, dname_len); 618 } 619 return n; 620 } 621 622 /** find rrset of given type in the domain */ 623 static struct auth_rrset* 624 az_domain_rrset(struct auth_data* n, uint16_t t) 625 { 626 struct auth_rrset* rrset; 627 if(!n) return NULL; 628 rrset = n->rrsets; 629 while(rrset) { 630 if(rrset->type == t) 631 return rrset; 632 rrset = rrset->next; 633 } 634 return NULL; 635 } 636 637 /** remove rrset of this type from domain */ 638 static void 639 domain_remove_rrset(struct auth_data* node, uint16_t rr_type) 640 { 641 struct auth_rrset* rrset, *prev; 642 if(!node) return; 643 prev = NULL; 644 rrset = node->rrsets; 645 while(rrset) { 646 if(rrset->type == rr_type) { 647 /* found it, now delete it */ 648 if(prev) prev->next = rrset->next; 649 else node->rrsets = rrset->next; 650 auth_rrset_delete(rrset); 651 return; 652 } 653 prev = rrset; 654 rrset = rrset->next; 655 } 656 } 657 658 /** find an rr index in the rrset. returns true if found */ 659 static int 660 az_rrset_find_rr(struct packed_rrset_data* d, uint8_t* rdata, size_t len, 661 size_t* index) 662 { 663 size_t i; 664 for(i=0; i<d->count; i++) { 665 if(d->rr_len[i] != len) 666 continue; 667 if(memcmp(d->rr_data[i], rdata, len) == 0) { 668 *index = i; 669 return 1; 670 } 671 } 672 return 0; 673 } 674 675 /** find an rrsig index in the rrset. returns true if found */ 676 static int 677 az_rrset_find_rrsig(struct packed_rrset_data* d, uint8_t* rdata, size_t len, 678 size_t* index) 679 { 680 size_t i; 681 for(i=d->count; i<d->count + d->rrsig_count; i++) { 682 if(d->rr_len[i] != len) 683 continue; 684 if(memcmp(d->rr_data[i], rdata, len) == 0) { 685 *index = i; 686 return 1; 687 } 688 } 689 return 0; 690 } 691 692 /** see if rdata is duplicate */ 693 static int 694 rdata_duplicate(struct packed_rrset_data* d, uint8_t* rdata, size_t len) 695 { 696 size_t i; 697 for(i=0; i<d->count + d->rrsig_count; i++) { 698 if(d->rr_len[i] != len) 699 continue; 700 if(memcmp(d->rr_data[i], rdata, len) == 0) 701 return 1; 702 } 703 return 0; 704 } 705 706 /** get rrsig type covered from rdata. 707 * @param rdata: rdata in wireformat, starting with 16bit rdlength. 708 * @param rdatalen: length of rdata buffer. 709 * @return type covered (or 0). 710 */ 711 static uint16_t 712 rrsig_rdata_get_type_covered(uint8_t* rdata, size_t rdatalen) 713 { 714 if(rdatalen < 4) 715 return 0; 716 return sldns_read_uint16(rdata+2); 717 } 718 719 /** remove RR from existing RRset. Also sig, if it is a signature. 720 * reallocates the packed rrset for a new one, false on alloc failure */ 721 static int 722 rrset_remove_rr(struct auth_rrset* rrset, size_t index) 723 { 724 struct packed_rrset_data* d, *old = rrset->data; 725 size_t i; 726 if(index >= old->count + old->rrsig_count) 727 return 0; /* index out of bounds */ 728 d = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(old) - ( 729 sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) + 730 old->rr_len[index])); 731 if(!d) { 732 log_err("malloc failure"); 733 return 0; 734 } 735 d->ttl = old->ttl; 736 d->count = old->count; 737 d->rrsig_count = old->rrsig_count; 738 if(index < d->count) d->count--; 739 else d->rrsig_count--; 740 d->trust = old->trust; 741 d->security = old->security; 742 743 /* set rr_len, needed for ptr_fixup */ 744 d->rr_len = (size_t*)((uint8_t*)d + 745 sizeof(struct packed_rrset_data)); 746 if(index > 0) 747 memmove(d->rr_len, old->rr_len, (index)*sizeof(size_t)); 748 if(index+1 < old->count+old->rrsig_count) 749 memmove(&d->rr_len[index], &old->rr_len[index+1], 750 (old->count+old->rrsig_count - (index+1))*sizeof(size_t)); 751 packed_rrset_ptr_fixup(d); 752 753 /* move over ttls */ 754 if(index > 0) 755 memmove(d->rr_ttl, old->rr_ttl, (index)*sizeof(time_t)); 756 if(index+1 < old->count+old->rrsig_count) 757 memmove(&d->rr_ttl[index], &old->rr_ttl[index+1], 758 (old->count+old->rrsig_count - (index+1))*sizeof(time_t)); 759 760 /* move over rr_data */ 761 for(i=0; i<d->count+d->rrsig_count; i++) { 762 size_t oldi; 763 if(i < index) oldi = i; 764 else oldi = i+1; 765 memmove(d->rr_data[i], old->rr_data[oldi], d->rr_len[i]); 766 } 767 768 /* recalc ttl (lowest of remaining RR ttls) */ 769 if(d->count + d->rrsig_count > 0) 770 d->ttl = d->rr_ttl[0]; 771 for(i=0; i<d->count+d->rrsig_count; i++) { 772 if(d->rr_ttl[i] < d->ttl) 773 d->ttl = d->rr_ttl[i]; 774 } 775 776 free(rrset->data); 777 rrset->data = d; 778 return 1; 779 } 780 781 /** add RR to existing RRset. If insert_sig is true, add to rrsigs. 782 * This reallocates the packed rrset for a new one */ 783 static int 784 rrset_add_rr(struct auth_rrset* rrset, uint32_t rr_ttl, uint8_t* rdata, 785 size_t rdatalen, int insert_sig) 786 { 787 struct packed_rrset_data* d, *old = rrset->data; 788 size_t total, old_total; 789 790 d = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(old) 791 + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t) 792 + rdatalen); 793 if(!d) { 794 log_err("out of memory"); 795 return 0; 796 } 797 /* copy base values */ 798 memcpy(d, old, sizeof(struct packed_rrset_data)); 799 if(!insert_sig) { 800 d->count++; 801 } else { 802 d->rrsig_count++; 803 } 804 old_total = old->count + old->rrsig_count; 805 total = d->count + d->rrsig_count; 806 /* set rr_len, needed for ptr_fixup */ 807 d->rr_len = (size_t*)((uint8_t*)d + 808 sizeof(struct packed_rrset_data)); 809 if(old->count != 0) 810 memmove(d->rr_len, old->rr_len, old->count*sizeof(size_t)); 811 if(old->rrsig_count != 0) 812 memmove(d->rr_len+d->count, old->rr_len+old->count, 813 old->rrsig_count*sizeof(size_t)); 814 if(!insert_sig) 815 d->rr_len[d->count-1] = rdatalen; 816 else d->rr_len[total-1] = rdatalen; 817 packed_rrset_ptr_fixup(d); 818 if((time_t)rr_ttl < d->ttl) 819 d->ttl = rr_ttl; 820 821 /* copy old values into new array */ 822 if(old->count != 0) { 823 memmove(d->rr_ttl, old->rr_ttl, old->count*sizeof(time_t)); 824 /* all the old rr pieces are allocated sequential, so we 825 * can copy them in one go */ 826 memmove(d->rr_data[0], old->rr_data[0], 827 (old->rr_data[old->count-1] - old->rr_data[0]) + 828 old->rr_len[old->count-1]); 829 } 830 if(old->rrsig_count != 0) { 831 memmove(d->rr_ttl+d->count, old->rr_ttl+old->count, 832 old->rrsig_count*sizeof(time_t)); 833 memmove(d->rr_data[d->count], old->rr_data[old->count], 834 (old->rr_data[old_total-1] - old->rr_data[old->count]) + 835 old->rr_len[old_total-1]); 836 } 837 838 /* insert new value */ 839 if(!insert_sig) { 840 d->rr_ttl[d->count-1] = rr_ttl; 841 memmove(d->rr_data[d->count-1], rdata, rdatalen); 842 } else { 843 d->rr_ttl[total-1] = rr_ttl; 844 memmove(d->rr_data[total-1], rdata, rdatalen); 845 } 846 847 rrset->data = d; 848 free(old); 849 return 1; 850 } 851 852 /** Create new rrset for node with packed rrset with one RR element */ 853 static struct auth_rrset* 854 rrset_create(struct auth_data* node, uint16_t rr_type, uint32_t rr_ttl, 855 uint8_t* rdata, size_t rdatalen) 856 { 857 struct auth_rrset* rrset = (struct auth_rrset*)calloc(1, 858 sizeof(*rrset)); 859 struct auth_rrset* p, *prev; 860 struct packed_rrset_data* d; 861 if(!rrset) { 862 log_err("out of memory"); 863 return NULL; 864 } 865 rrset->type = rr_type; 866 867 /* the rrset data structure, with one RR */ 868 d = (struct packed_rrset_data*)calloc(1, 869 sizeof(struct packed_rrset_data) + sizeof(size_t) + 870 sizeof(uint8_t*) + sizeof(time_t) + rdatalen); 871 if(!d) { 872 free(rrset); 873 log_err("out of memory"); 874 return NULL; 875 } 876 rrset->data = d; 877 d->ttl = rr_ttl; 878 d->trust = rrset_trust_prim_noglue; 879 d->rr_len = (size_t*)((uint8_t*)d + sizeof(struct packed_rrset_data)); 880 d->rr_data = (uint8_t**)&(d->rr_len[1]); 881 d->rr_ttl = (time_t*)&(d->rr_data[1]); 882 d->rr_data[0] = (uint8_t*)&(d->rr_ttl[1]); 883 884 /* insert the RR */ 885 d->rr_len[0] = rdatalen; 886 d->rr_ttl[0] = rr_ttl; 887 memmove(d->rr_data[0], rdata, rdatalen); 888 d->count++; 889 890 /* insert rrset into linked list for domain */ 891 /* find sorted place to link the rrset into the list */ 892 prev = NULL; 893 p = node->rrsets; 894 while(p && p->type<=rr_type) { 895 prev = p; 896 p = p->next; 897 } 898 /* so, prev is smaller, and p is larger than rr_type */ 899 rrset->next = p; 900 if(prev) prev->next = rrset; 901 else node->rrsets = rrset; 902 return rrset; 903 } 904 905 /** count number (and size) of rrsigs that cover a type */ 906 static size_t 907 rrsig_num_that_cover(struct auth_rrset* rrsig, uint16_t rr_type, size_t* sigsz) 908 { 909 struct packed_rrset_data* d = rrsig->data; 910 size_t i, num = 0; 911 *sigsz = 0; 912 log_assert(d && rrsig->type == LDNS_RR_TYPE_RRSIG); 913 for(i=0; i<d->count+d->rrsig_count; i++) { 914 if(rrsig_rdata_get_type_covered(d->rr_data[i], 915 d->rr_len[i]) == rr_type) { 916 num++; 917 (*sigsz) += d->rr_len[i]; 918 } 919 } 920 return num; 921 } 922 923 /** See if rrsig set has covered sigs for rrset and move them over */ 924 static int 925 rrset_moveover_rrsigs(struct auth_data* node, uint16_t rr_type, 926 struct auth_rrset* rrset, struct auth_rrset* rrsig) 927 { 928 size_t sigs, sigsz, i, j, total; 929 struct packed_rrset_data* sigold = rrsig->data; 930 struct packed_rrset_data* old = rrset->data; 931 struct packed_rrset_data* d, *sigd; 932 933 log_assert(rrset->type == rr_type); 934 log_assert(rrsig->type == LDNS_RR_TYPE_RRSIG); 935 sigs = rrsig_num_that_cover(rrsig, rr_type, &sigsz); 936 if(sigs == 0) { 937 /* 0 rrsigs to move over, done */ 938 return 1; 939 } 940 941 /* allocate rrset sigsz larger for extra sigs elements, and 942 * allocate rrsig sigsz smaller for less sigs elements. */ 943 d = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(old) 944 + sigs*(sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t)) 945 + sigsz); 946 if(!d) { 947 log_err("out of memory"); 948 return 0; 949 } 950 /* copy base values */ 951 total = old->count + old->rrsig_count; 952 memcpy(d, old, sizeof(struct packed_rrset_data)); 953 d->rrsig_count += sigs; 954 /* setup rr_len */ 955 d->rr_len = (size_t*)((uint8_t*)d + 956 sizeof(struct packed_rrset_data)); 957 if(total != 0) 958 memmove(d->rr_len, old->rr_len, total*sizeof(size_t)); 959 j = d->count+d->rrsig_count-sigs; 960 for(i=0; i<sigold->count+sigold->rrsig_count; i++) { 961 if(rrsig_rdata_get_type_covered(sigold->rr_data[i], 962 sigold->rr_len[i]) == rr_type) { 963 d->rr_len[j] = sigold->rr_len[i]; 964 j++; 965 } 966 } 967 packed_rrset_ptr_fixup(d); 968 969 /* copy old values into new array */ 970 if(total != 0) { 971 memmove(d->rr_ttl, old->rr_ttl, total*sizeof(time_t)); 972 /* all the old rr pieces are allocated sequential, so we 973 * can copy them in one go */ 974 memmove(d->rr_data[0], old->rr_data[0], 975 (old->rr_data[total-1] - old->rr_data[0]) + 976 old->rr_len[total-1]); 977 } 978 979 /* move over the rrsigs to the larger rrset*/ 980 j = d->count+d->rrsig_count-sigs; 981 for(i=0; i<sigold->count+sigold->rrsig_count; i++) { 982 if(rrsig_rdata_get_type_covered(sigold->rr_data[i], 983 sigold->rr_len[i]) == rr_type) { 984 /* move this one over to location j */ 985 d->rr_ttl[j] = sigold->rr_ttl[i]; 986 memmove(d->rr_data[j], sigold->rr_data[i], 987 sigold->rr_len[i]); 988 if(d->rr_ttl[j] < d->ttl) 989 d->ttl = d->rr_ttl[j]; 990 j++; 991 } 992 } 993 994 /* put it in and deallocate the old rrset */ 995 rrset->data = d; 996 free(old); 997 998 /* now make rrsig set smaller */ 999 if(sigold->count+sigold->rrsig_count == sigs) { 1000 /* remove all sigs from rrsig, remove it entirely */ 1001 domain_remove_rrset(node, LDNS_RR_TYPE_RRSIG); 1002 return 1; 1003 } 1004 log_assert(packed_rrset_sizeof(sigold) > sigs*(sizeof(size_t) + 1005 sizeof(uint8_t*) + sizeof(time_t)) + sigsz); 1006 sigd = (struct packed_rrset_data*)calloc(1, packed_rrset_sizeof(sigold) 1007 - sigs*(sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t)) 1008 - sigsz); 1009 if(!sigd) { 1010 /* no need to free up d, it has already been placed in the 1011 * node->rrset structure */ 1012 log_err("out of memory"); 1013 return 0; 1014 } 1015 /* copy base values */ 1016 memcpy(sigd, sigold, sizeof(struct packed_rrset_data)); 1017 sigd->rrsig_count -= sigs; 1018 /* setup rr_len */ 1019 sigd->rr_len = (size_t*)((uint8_t*)sigd + 1020 sizeof(struct packed_rrset_data)); 1021 j = 0; 1022 for(i=0; i<sigold->count+sigold->rrsig_count; i++) { 1023 if(rrsig_rdata_get_type_covered(sigold->rr_data[i], 1024 sigold->rr_len[i]) != rr_type) { 1025 sigd->rr_len[j] = sigold->rr_len[i]; 1026 j++; 1027 } 1028 } 1029 packed_rrset_ptr_fixup(sigd); 1030 1031 /* copy old values into new rrsig array */ 1032 j = 0; 1033 for(i=0; i<sigold->count+sigold->rrsig_count; i++) { 1034 if(rrsig_rdata_get_type_covered(sigold->rr_data[i], 1035 sigold->rr_len[i]) != rr_type) { 1036 /* move this one over to location j */ 1037 sigd->rr_ttl[j] = sigold->rr_ttl[i]; 1038 memmove(sigd->rr_data[j], sigold->rr_data[i], 1039 sigold->rr_len[i]); 1040 if(j==0) sigd->ttl = sigd->rr_ttl[j]; 1041 else { 1042 if(sigd->rr_ttl[j] < sigd->ttl) 1043 sigd->ttl = sigd->rr_ttl[j]; 1044 } 1045 j++; 1046 } 1047 } 1048 1049 /* put it in and deallocate the old rrset */ 1050 rrsig->data = sigd; 1051 free(sigold); 1052 1053 return 1; 1054 } 1055 1056 /** copy the rrsigs from the rrset to the rrsig rrset, because the rrset 1057 * is going to be deleted. reallocates the RRSIG rrset data. */ 1058 static int 1059 rrsigs_copy_from_rrset_to_rrsigset(struct auth_rrset* rrset, 1060 struct auth_rrset* rrsigset) 1061 { 1062 size_t i; 1063 if(rrset->data->rrsig_count == 0) 1064 return 1; 1065 1066 /* move them over one by one, because there might be duplicates, 1067 * duplicates are ignored */ 1068 for(i=rrset->data->count; 1069 i<rrset->data->count+rrset->data->rrsig_count; i++) { 1070 uint8_t* rdata = rrset->data->rr_data[i]; 1071 size_t rdatalen = rrset->data->rr_len[i]; 1072 time_t rr_ttl = rrset->data->rr_ttl[i]; 1073 1074 if(rdata_duplicate(rrsigset->data, rdata, rdatalen)) { 1075 continue; 1076 } 1077 if(!rrset_add_rr(rrsigset, rr_ttl, rdata, rdatalen, 0)) 1078 return 0; 1079 } 1080 return 1; 1081 } 1082 1083 /** Add rr to node, ignores duplicate RRs, 1084 * rdata points to buffer with rdatalen octets, starts with 2bytelength. */ 1085 static int 1086 az_domain_add_rr(struct auth_data* node, uint16_t rr_type, uint32_t rr_ttl, 1087 uint8_t* rdata, size_t rdatalen, int* duplicate) 1088 { 1089 struct auth_rrset* rrset; 1090 /* packed rrsets have their rrsigs along with them, sort them out */ 1091 if(rr_type == LDNS_RR_TYPE_RRSIG) { 1092 uint16_t ctype = rrsig_rdata_get_type_covered(rdata, rdatalen); 1093 if((rrset=az_domain_rrset(node, ctype))!= NULL) { 1094 /* a node of the correct type exists, add the RRSIG 1095 * to the rrset of the covered data type */ 1096 if(rdata_duplicate(rrset->data, rdata, rdatalen)) { 1097 if(duplicate) *duplicate = 1; 1098 return 1; 1099 } 1100 if(!rrset_add_rr(rrset, rr_ttl, rdata, rdatalen, 1)) 1101 return 0; 1102 } else if((rrset=az_domain_rrset(node, rr_type))!= NULL) { 1103 /* add RRSIG to rrset of type RRSIG */ 1104 if(rdata_duplicate(rrset->data, rdata, rdatalen)) { 1105 if(duplicate) *duplicate = 1; 1106 return 1; 1107 } 1108 if(!rrset_add_rr(rrset, rr_ttl, rdata, rdatalen, 0)) 1109 return 0; 1110 } else { 1111 /* create rrset of type RRSIG */ 1112 if(!rrset_create(node, rr_type, rr_ttl, rdata, 1113 rdatalen)) 1114 return 0; 1115 } 1116 } else { 1117 /* normal RR type */ 1118 if((rrset=az_domain_rrset(node, rr_type))!= NULL) { 1119 /* add data to existing node with data type */ 1120 if(rdata_duplicate(rrset->data, rdata, rdatalen)) { 1121 if(duplicate) *duplicate = 1; 1122 return 1; 1123 } 1124 if(!rrset_add_rr(rrset, rr_ttl, rdata, rdatalen, 0)) 1125 return 0; 1126 } else { 1127 struct auth_rrset* rrsig; 1128 /* create new node with data type */ 1129 if(!(rrset=rrset_create(node, rr_type, rr_ttl, rdata, 1130 rdatalen))) 1131 return 0; 1132 1133 /* see if node of type RRSIG has signatures that 1134 * cover the data type, and move them over */ 1135 /* and then make the RRSIG type smaller */ 1136 if((rrsig=az_domain_rrset(node, LDNS_RR_TYPE_RRSIG)) 1137 != NULL) { 1138 if(!rrset_moveover_rrsigs(node, rr_type, 1139 rrset, rrsig)) 1140 return 0; 1141 } 1142 } 1143 } 1144 return 1; 1145 } 1146 1147 /** insert RR into zone, ignore duplicates */ 1148 static int 1149 az_insert_rr(struct auth_zone* z, uint8_t* rr, size_t rr_len, 1150 size_t dname_len, int* duplicate) 1151 { 1152 struct auth_data* node; 1153 uint8_t* dname = rr; 1154 uint16_t rr_type = sldns_wirerr_get_type(rr, rr_len, dname_len); 1155 uint16_t rr_class = sldns_wirerr_get_class(rr, rr_len, dname_len); 1156 uint32_t rr_ttl = sldns_wirerr_get_ttl(rr, rr_len, dname_len); 1157 size_t rdatalen = ((size_t)sldns_wirerr_get_rdatalen(rr, rr_len, 1158 dname_len))+2; 1159 /* rdata points to rdata prefixed with uint16 rdatalength */ 1160 uint8_t* rdata = sldns_wirerr_get_rdatawl(rr, rr_len, dname_len); 1161 1162 if(rr_class != z->dclass) { 1163 log_err("wrong class for RR"); 1164 return 0; 1165 } 1166 if(!(node=az_domain_find_or_create(z, dname, dname_len))) { 1167 log_err("cannot create domain"); 1168 return 0; 1169 } 1170 if(!az_domain_add_rr(node, rr_type, rr_ttl, rdata, rdatalen, 1171 duplicate)) { 1172 log_err("cannot add RR to domain"); 1173 return 0; 1174 } 1175 return 1; 1176 } 1177 1178 /** Remove rr from node, ignores nonexisting RRs, 1179 * rdata points to buffer with rdatalen octets, starts with 2bytelength. */ 1180 static int 1181 az_domain_remove_rr(struct auth_data* node, uint16_t rr_type, 1182 uint8_t* rdata, size_t rdatalen, int* nonexist) 1183 { 1184 struct auth_rrset* rrset; 1185 size_t index = 0; 1186 1187 /* find the plain RR of the given type */ 1188 if((rrset=az_domain_rrset(node, rr_type))!= NULL) { 1189 if(az_rrset_find_rr(rrset->data, rdata, rdatalen, &index)) { 1190 if(rrset->data->count == 1 && 1191 rrset->data->rrsig_count == 0) { 1192 /* last RR, delete the rrset */ 1193 domain_remove_rrset(node, rr_type); 1194 } else if(rrset->data->count == 1 && 1195 rrset->data->rrsig_count != 0) { 1196 /* move RRSIGs to the RRSIG rrset, or 1197 * this one becomes that RRset */ 1198 struct auth_rrset* rrsigset = az_domain_rrset( 1199 node, LDNS_RR_TYPE_RRSIG); 1200 if(rrsigset) { 1201 /* move left over rrsigs to the 1202 * existing rrset of type RRSIG */ 1203 rrsigs_copy_from_rrset_to_rrsigset( 1204 rrset, rrsigset); 1205 /* and then delete the rrset */ 1206 domain_remove_rrset(node, rr_type); 1207 } else { 1208 /* no rrset of type RRSIG, this 1209 * set is now of that type, 1210 * just remove the rr */ 1211 if(!rrset_remove_rr(rrset, index)) 1212 return 0; 1213 rrset->type = LDNS_RR_TYPE_RRSIG; 1214 rrset->data->count = rrset->data->rrsig_count; 1215 rrset->data->rrsig_count = 0; 1216 } 1217 } else { 1218 /* remove the RR from the rrset */ 1219 if(!rrset_remove_rr(rrset, index)) 1220 return 0; 1221 } 1222 return 1; 1223 } 1224 /* rr not found in rrset */ 1225 } 1226 1227 /* is it a type RRSIG, look under the covered type */ 1228 if(rr_type == LDNS_RR_TYPE_RRSIG) { 1229 uint16_t ctype = rrsig_rdata_get_type_covered(rdata, rdatalen); 1230 if((rrset=az_domain_rrset(node, ctype))!= NULL) { 1231 if(az_rrset_find_rrsig(rrset->data, rdata, rdatalen, 1232 &index)) { 1233 /* rrsig should have d->count > 0, be 1234 * over some rr of that type */ 1235 /* remove the rrsig from the rrsigs list of the 1236 * rrset */ 1237 if(!rrset_remove_rr(rrset, index)) 1238 return 0; 1239 return 1; 1240 } 1241 } 1242 /* also RRSIG not found */ 1243 } 1244 1245 /* nothing found to delete */ 1246 if(nonexist) *nonexist = 1; 1247 return 1; 1248 } 1249 1250 /** remove RR from zone, ignore if it does not exist, false on alloc failure*/ 1251 static int 1252 az_remove_rr(struct auth_zone* z, uint8_t* rr, size_t rr_len, 1253 size_t dname_len, int* nonexist) 1254 { 1255 struct auth_data* node; 1256 uint8_t* dname = rr; 1257 uint16_t rr_type = sldns_wirerr_get_type(rr, rr_len, dname_len); 1258 uint16_t rr_class = sldns_wirerr_get_class(rr, rr_len, dname_len); 1259 size_t rdatalen = ((size_t)sldns_wirerr_get_rdatalen(rr, rr_len, 1260 dname_len))+2; 1261 /* rdata points to rdata prefixed with uint16 rdatalength */ 1262 uint8_t* rdata = sldns_wirerr_get_rdatawl(rr, rr_len, dname_len); 1263 1264 if(rr_class != z->dclass) { 1265 log_err("wrong class for RR"); 1266 /* really also a nonexisting entry, because no records 1267 * of that class in the zone, but return an error because 1268 * getting records of the wrong class is a failure of the 1269 * zone transfer */ 1270 return 0; 1271 } 1272 node = az_find_name(z, dname, dname_len); 1273 if(!node) { 1274 /* node with that name does not exist */ 1275 /* nonexisting entry, because no such name */ 1276 *nonexist = 1; 1277 return 1; 1278 } 1279 if(!az_domain_remove_rr(node, rr_type, rdata, rdatalen, nonexist)) { 1280 /* alloc failure or so */ 1281 return 0; 1282 } 1283 /* remove the node, if necessary */ 1284 /* an rrsets==NULL entry is not kept around for empty nonterminals, 1285 * and also parent nodes are not kept around, so we just delete it */ 1286 if(node->rrsets == NULL) { 1287 (void)rbtree_delete(&z->data, node); 1288 auth_data_delete(node); 1289 } 1290 return 1; 1291 } 1292 1293 /** decompress an RR into the buffer where it'll be an uncompressed RR 1294 * with uncompressed dname and uncompressed rdata (dnames) */ 1295 static int 1296 decompress_rr_into_buffer(struct sldns_buffer* buf, uint8_t* pkt, 1297 size_t pktlen, uint8_t* dname, uint16_t rr_type, uint16_t rr_class, 1298 uint32_t rr_ttl, uint8_t* rr_data, uint16_t rr_rdlen) 1299 { 1300 sldns_buffer pktbuf; 1301 size_t dname_len = 0; 1302 size_t rdlenpos; 1303 size_t rdlen; 1304 uint8_t* rd; 1305 const sldns_rr_descriptor* desc; 1306 sldns_buffer_init_frm_data(&pktbuf, pkt, pktlen); 1307 sldns_buffer_clear(buf); 1308 1309 /* decompress dname */ 1310 sldns_buffer_set_position(&pktbuf, 1311 (size_t)(dname - sldns_buffer_current(&pktbuf))); 1312 dname_len = pkt_dname_len(&pktbuf); 1313 if(dname_len == 0) return 0; /* parse fail on dname */ 1314 if(!sldns_buffer_available(buf, dname_len)) return 0; 1315 dname_pkt_copy(&pktbuf, sldns_buffer_current(buf), dname); 1316 sldns_buffer_skip(buf, (ssize_t)dname_len); 1317 1318 /* type, class, ttl and rdatalength fields */ 1319 if(!sldns_buffer_available(buf, 10)) return 0; 1320 sldns_buffer_write_u16(buf, rr_type); 1321 sldns_buffer_write_u16(buf, rr_class); 1322 sldns_buffer_write_u32(buf, rr_ttl); 1323 rdlenpos = sldns_buffer_position(buf); 1324 sldns_buffer_write_u16(buf, 0); /* rd length position */ 1325 1326 /* decompress rdata */ 1327 desc = sldns_rr_descript(rr_type); 1328 rd = rr_data; 1329 rdlen = rr_rdlen; 1330 if(rdlen > 0 && desc && desc->_dname_count > 0) { 1331 int count = (int)desc->_dname_count; 1332 int rdf = 0; 1333 size_t len; /* how much rdata to plain copy */ 1334 size_t uncompressed_len, compressed_len; 1335 size_t oldpos; 1336 /* decompress dnames. */ 1337 while(rdlen > 0 && count) { 1338 switch(desc->_wireformat[rdf]) { 1339 case LDNS_RDF_TYPE_DNAME: 1340 sldns_buffer_set_position(&pktbuf, 1341 (size_t)(rd - 1342 sldns_buffer_begin(&pktbuf))); 1343 oldpos = sldns_buffer_position(&pktbuf); 1344 /* moves pktbuf to right after the 1345 * compressed dname, and returns uncompressed 1346 * dname length */ 1347 uncompressed_len = pkt_dname_len(&pktbuf); 1348 if(!uncompressed_len) 1349 return 0; /* parse error in dname */ 1350 if(!sldns_buffer_available(buf, 1351 uncompressed_len)) 1352 /* dname too long for buffer */ 1353 return 0; 1354 dname_pkt_copy(&pktbuf, 1355 sldns_buffer_current(buf), rd); 1356 sldns_buffer_skip(buf, (ssize_t)uncompressed_len); 1357 compressed_len = sldns_buffer_position( 1358 &pktbuf) - oldpos; 1359 rd += compressed_len; 1360 rdlen -= compressed_len; 1361 count--; 1362 len = 0; 1363 break; 1364 case LDNS_RDF_TYPE_STR: 1365 len = rd[0] + 1; 1366 break; 1367 default: 1368 len = get_rdf_size(desc->_wireformat[rdf]); 1369 break; 1370 } 1371 if(len) { 1372 if(!sldns_buffer_available(buf, len)) 1373 return 0; /* too long for buffer */ 1374 sldns_buffer_write(buf, rd, len); 1375 rd += len; 1376 rdlen -= len; 1377 } 1378 rdf++; 1379 } 1380 } 1381 /* copy remaining data */ 1382 if(rdlen > 0) { 1383 if(!sldns_buffer_available(buf, rdlen)) return 0; 1384 sldns_buffer_write(buf, rd, rdlen); 1385 } 1386 /* fixup rdlength */ 1387 sldns_buffer_write_u16_at(buf, rdlenpos, 1388 sldns_buffer_position(buf)-rdlenpos-2); 1389 sldns_buffer_flip(buf); 1390 return 1; 1391 } 1392 1393 /** insert RR into zone, from packet, decompress RR, 1394 * if duplicate is nonNULL set the flag but otherwise ignore duplicates */ 1395 static int 1396 az_insert_rr_decompress(struct auth_zone* z, uint8_t* pkt, size_t pktlen, 1397 struct sldns_buffer* scratch_buffer, uint8_t* dname, uint16_t rr_type, 1398 uint16_t rr_class, uint32_t rr_ttl, uint8_t* rr_data, 1399 uint16_t rr_rdlen, int* duplicate) 1400 { 1401 uint8_t* rr; 1402 size_t rr_len; 1403 size_t dname_len; 1404 if(!decompress_rr_into_buffer(scratch_buffer, pkt, pktlen, dname, 1405 rr_type, rr_class, rr_ttl, rr_data, rr_rdlen)) { 1406 log_err("could not decompress RR"); 1407 return 0; 1408 } 1409 rr = sldns_buffer_begin(scratch_buffer); 1410 rr_len = sldns_buffer_limit(scratch_buffer); 1411 dname_len = dname_valid(rr, rr_len); 1412 return az_insert_rr(z, rr, rr_len, dname_len, duplicate); 1413 } 1414 1415 /** remove RR from zone, from packet, decompress RR, 1416 * if nonexist is nonNULL set the flag but otherwise ignore nonexisting entries*/ 1417 static int 1418 az_remove_rr_decompress(struct auth_zone* z, uint8_t* pkt, size_t pktlen, 1419 struct sldns_buffer* scratch_buffer, uint8_t* dname, uint16_t rr_type, 1420 uint16_t rr_class, uint32_t rr_ttl, uint8_t* rr_data, 1421 uint16_t rr_rdlen, int* nonexist) 1422 { 1423 uint8_t* rr; 1424 size_t rr_len; 1425 size_t dname_len; 1426 if(!decompress_rr_into_buffer(scratch_buffer, pkt, pktlen, dname, 1427 rr_type, rr_class, rr_ttl, rr_data, rr_rdlen)) { 1428 log_err("could not decompress RR"); 1429 return 0; 1430 } 1431 rr = sldns_buffer_begin(scratch_buffer); 1432 rr_len = sldns_buffer_limit(scratch_buffer); 1433 dname_len = dname_valid(rr, rr_len); 1434 return az_remove_rr(z, rr, rr_len, dname_len, nonexist); 1435 } 1436 1437 /** 1438 * Parse zonefile 1439 * @param z: zone to read in. 1440 * @param in: file to read from (just opened). 1441 * @param rr: buffer to use for RRs, 64k. 1442 * passed so that recursive includes can use the same buffer and do 1443 * not grow the stack too much. 1444 * @param rrbuflen: sizeof rr buffer. 1445 * @param state: parse state with $ORIGIN, $TTL and 'prev-dname' and so on, 1446 * that is kept between includes. 1447 * The lineno is set at 1 and then increased by the function. 1448 * @param fname: file name. 1449 * @param depth: recursion depth for includes 1450 * returns false on failure, has printed an error message 1451 */ 1452 static int 1453 az_parse_file(struct auth_zone* z, FILE* in, uint8_t* rr, size_t rrbuflen, 1454 struct sldns_file_parse_state* state, char* fname, int depth) 1455 { 1456 size_t rr_len, dname_len; 1457 int status; 1458 state->lineno = 1; 1459 1460 while(!feof(in)) { 1461 rr_len = rrbuflen; 1462 dname_len = 0; 1463 status = sldns_fp2wire_rr_buf(in, rr, &rr_len, &dname_len, 1464 state); 1465 if(status == LDNS_WIREPARSE_ERR_INCLUDE && rr_len == 0) { 1466 /* we have $INCLUDE or $something */ 1467 if(strncmp((char*)rr, "$INCLUDE ", 9) == 0 || 1468 strncmp((char*)rr, "$INCLUDE\t", 9) == 0) { 1469 FILE* inc; 1470 int lineno_orig = state->lineno; 1471 char* incfile = (char*)rr + 8; 1472 if(depth > MAX_INCLUDE_DEPTH) { 1473 log_err("%s:%d max include depth" 1474 "exceeded", fname, state->lineno); 1475 return 0; 1476 } 1477 /* skip spaces */ 1478 while(*incfile == ' ' || *incfile == '\t') 1479 incfile++; 1480 incfile = strdup(incfile); 1481 if(!incfile) { 1482 log_err("malloc failure"); 1483 return 0; 1484 } 1485 verbose(VERB_ALGO, "opening $INCLUDE %s", 1486 incfile); 1487 inc = fopen(incfile, "r"); 1488 if(!inc) { 1489 log_err("%s:%d cannot open include " 1490 "file %s: %s", z->zonefile, 1491 lineno_orig, incfile, 1492 strerror(errno)); 1493 free(incfile); 1494 return 0; 1495 } 1496 /* recurse read that file now */ 1497 if(!az_parse_file(z, inc, rr, rrbuflen, 1498 state, incfile, depth+1)) { 1499 log_err("%s:%d cannot parse include " 1500 "file %s", fname, 1501 lineno_orig, incfile); 1502 fclose(inc); 1503 free(incfile); 1504 return 0; 1505 } 1506 fclose(inc); 1507 verbose(VERB_ALGO, "done with $INCLUDE %s", 1508 incfile); 1509 free(incfile); 1510 state->lineno = lineno_orig; 1511 } 1512 continue; 1513 } 1514 if(status != 0) { 1515 log_err("parse error %s %d:%d: %s", fname, 1516 state->lineno, LDNS_WIREPARSE_OFFSET(status), 1517 sldns_get_errorstr_parse(status)); 1518 return 0; 1519 } 1520 if(rr_len == 0) { 1521 /* EMPTY line, TTL or ORIGIN */ 1522 continue; 1523 } 1524 /* insert wirerr in rrbuf */ 1525 if(!az_insert_rr(z, rr, rr_len, dname_len, NULL)) { 1526 char buf[17]; 1527 sldns_wire2str_type_buf(sldns_wirerr_get_type(rr, 1528 rr_len, dname_len), buf, sizeof(buf)); 1529 log_err("%s:%d cannot insert RR of type %s", 1530 fname, state->lineno, buf); 1531 return 0; 1532 } 1533 } 1534 return 1; 1535 } 1536 1537 int 1538 auth_zone_read_zonefile(struct auth_zone* z) 1539 { 1540 uint8_t rr[LDNS_RR_BUF_SIZE]; 1541 struct sldns_file_parse_state state; 1542 FILE* in; 1543 if(!z || !z->zonefile || z->zonefile[0]==0) 1544 return 1; /* no file, or "", nothing to read */ 1545 if(verbosity >= VERB_ALGO) { 1546 char nm[255+1]; 1547 dname_str(z->name, nm); 1548 verbose(VERB_ALGO, "read zonefile %s for %s", z->zonefile, nm); 1549 } 1550 in = fopen(z->zonefile, "r"); 1551 if(!in) { 1552 char* n = sldns_wire2str_dname(z->name, z->namelen); 1553 if(z->zone_is_slave && errno == ENOENT) { 1554 /* we fetch the zone contents later, no file yet */ 1555 verbose(VERB_ALGO, "no zonefile %s for %s", 1556 z->zonefile, n?n:"error"); 1557 free(n); 1558 return 1; 1559 } 1560 log_err("cannot open zonefile %s for %s: %s", 1561 z->zonefile, n?n:"error", strerror(errno)); 1562 free(n); 1563 return 0; 1564 } 1565 1566 /* clear the data tree */ 1567 traverse_postorder(&z->data, auth_data_del, NULL); 1568 rbtree_init(&z->data, &auth_data_cmp); 1569 1570 memset(&state, 0, sizeof(state)); 1571 /* default TTL to 3600 */ 1572 state.default_ttl = 3600; 1573 /* set $ORIGIN to the zone name */ 1574 if(z->namelen <= sizeof(state.origin)) { 1575 memcpy(state.origin, z->name, z->namelen); 1576 state.origin_len = z->namelen; 1577 } 1578 /* parse the (toplevel) file */ 1579 if(!az_parse_file(z, in, rr, sizeof(rr), &state, z->zonefile, 0)) { 1580 char* n = sldns_wire2str_dname(z->name, z->namelen); 1581 log_err("error parsing zonefile %s for %s", 1582 z->zonefile, n?n:"error"); 1583 free(n); 1584 fclose(in); 1585 return 0; 1586 } 1587 fclose(in); 1588 return 1; 1589 } 1590 1591 /** write buffer to file and check return codes */ 1592 static int 1593 write_out(FILE* out, const char* str, size_t len) 1594 { 1595 size_t r; 1596 if(len == 0) 1597 return 1; 1598 r = fwrite(str, 1, len, out); 1599 if(r == 0) { 1600 log_err("write failed: %s", strerror(errno)); 1601 return 0; 1602 } else if(r < len) { 1603 log_err("write failed: too short (disk full?)"); 1604 return 0; 1605 } 1606 return 1; 1607 } 1608 1609 /** convert auth rr to string */ 1610 static int 1611 auth_rr_to_string(uint8_t* nm, size_t nmlen, uint16_t tp, uint16_t cl, 1612 struct packed_rrset_data* data, size_t i, char* s, size_t buflen) 1613 { 1614 int w = 0; 1615 size_t slen = buflen, datlen; 1616 uint8_t* dat; 1617 if(i >= data->count) tp = LDNS_RR_TYPE_RRSIG; 1618 dat = nm; 1619 datlen = nmlen; 1620 w += sldns_wire2str_dname_scan(&dat, &datlen, &s, &slen, NULL, 0); 1621 w += sldns_str_print(&s, &slen, "\t"); 1622 w += sldns_str_print(&s, &slen, "%lu\t", (unsigned long)data->rr_ttl[i]); 1623 w += sldns_wire2str_class_print(&s, &slen, cl); 1624 w += sldns_str_print(&s, &slen, "\t"); 1625 w += sldns_wire2str_type_print(&s, &slen, tp); 1626 w += sldns_str_print(&s, &slen, "\t"); 1627 datlen = data->rr_len[i]-2; 1628 dat = data->rr_data[i]+2; 1629 w += sldns_wire2str_rdata_scan(&dat, &datlen, &s, &slen, tp, NULL, 0); 1630 1631 if(tp == LDNS_RR_TYPE_DNSKEY) { 1632 w += sldns_str_print(&s, &slen, " ;{id = %u}", 1633 sldns_calc_keytag_raw(data->rr_data[i]+2, 1634 data->rr_len[i]-2)); 1635 } 1636 w += sldns_str_print(&s, &slen, "\n"); 1637 1638 if(w > (int)buflen) { 1639 log_nametypeclass(0, "RR too long to print", nm, tp, cl); 1640 return 0; 1641 } 1642 return 1; 1643 } 1644 1645 /** write rrset to file */ 1646 static int 1647 auth_zone_write_rrset(struct auth_zone* z, struct auth_data* node, 1648 struct auth_rrset* r, FILE* out) 1649 { 1650 size_t i, count = r->data->count + r->data->rrsig_count; 1651 char buf[LDNS_RR_BUF_SIZE]; 1652 for(i=0; i<count; i++) { 1653 if(!auth_rr_to_string(node->name, node->namelen, r->type, 1654 z->dclass, r->data, i, buf, sizeof(buf))) { 1655 verbose(VERB_ALGO, "failed to rr2str rr %d", (int)i); 1656 continue; 1657 } 1658 if(!write_out(out, buf, strlen(buf))) 1659 return 0; 1660 } 1661 return 1; 1662 } 1663 1664 /** write domain to file */ 1665 static int 1666 auth_zone_write_domain(struct auth_zone* z, struct auth_data* n, FILE* out) 1667 { 1668 struct auth_rrset* r; 1669 /* if this is zone apex, write SOA first */ 1670 if(z->namelen == n->namelen) { 1671 struct auth_rrset* soa = az_domain_rrset(n, LDNS_RR_TYPE_SOA); 1672 if(soa) { 1673 if(!auth_zone_write_rrset(z, n, soa, out)) 1674 return 0; 1675 } 1676 } 1677 /* write all the RRsets for this domain */ 1678 for(r = n->rrsets; r; r = r->next) { 1679 if(z->namelen == n->namelen && 1680 r->type == LDNS_RR_TYPE_SOA) 1681 continue; /* skip SOA here */ 1682 if(!auth_zone_write_rrset(z, n, r, out)) 1683 return 0; 1684 } 1685 return 1; 1686 } 1687 1688 int auth_zone_write_file(struct auth_zone* z, const char* fname) 1689 { 1690 FILE* out; 1691 struct auth_data* n; 1692 out = fopen(fname, "w"); 1693 if(!out) { 1694 log_err("could not open %s: %s", fname, strerror(errno)); 1695 return 0; 1696 } 1697 RBTREE_FOR(n, struct auth_data*, &z->data) { 1698 if(!auth_zone_write_domain(z, n, out)) { 1699 log_err("could not write domain to %s", fname); 1700 fclose(out); 1701 return 0; 1702 } 1703 } 1704 fclose(out); 1705 return 1; 1706 } 1707 1708 /** read all auth zones from file (if they have) */ 1709 static int 1710 auth_zones_read_zones(struct auth_zones* az) 1711 { 1712 struct auth_zone* z; 1713 lock_rw_wrlock(&az->lock); 1714 RBTREE_FOR(z, struct auth_zone*, &az->ztree) { 1715 lock_rw_wrlock(&z->lock); 1716 if(!auth_zone_read_zonefile(z)) { 1717 lock_rw_unlock(&z->lock); 1718 lock_rw_unlock(&az->lock); 1719 return 0; 1720 } 1721 lock_rw_unlock(&z->lock); 1722 } 1723 lock_rw_unlock(&az->lock); 1724 return 1; 1725 } 1726 1727 /** find serial number of zone or false if none */ 1728 int 1729 auth_zone_get_serial(struct auth_zone* z, uint32_t* serial) 1730 { 1731 struct auth_data* apex; 1732 struct auth_rrset* soa; 1733 struct packed_rrset_data* d; 1734 apex = az_find_name(z, z->name, z->namelen); 1735 if(!apex) return 0; 1736 soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); 1737 if(!soa || soa->data->count==0) 1738 return 0; /* no RRset or no RRs in rrset */ 1739 if(soa->data->rr_len[0] < 2+4*5) return 0; /* SOA too short */ 1740 d = soa->data; 1741 *serial = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-20)); 1742 return 1; 1743 } 1744 1745 /** Find auth_zone SOA and populate the values in xfr(soa values). */ 1746 static int 1747 xfr_find_soa(struct auth_zone* z, struct auth_xfer* xfr) 1748 { 1749 struct auth_data* apex; 1750 struct auth_rrset* soa; 1751 struct packed_rrset_data* d; 1752 apex = az_find_name(z, z->name, z->namelen); 1753 if(!apex) return 0; 1754 soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); 1755 if(!soa || soa->data->count==0) 1756 return 0; /* no RRset or no RRs in rrset */ 1757 if(soa->data->rr_len[0] < 2+4*5) return 0; /* SOA too short */ 1758 /* SOA record ends with serial, refresh, retry, expiry, minimum, 1759 * as 4 byte fields */ 1760 d = soa->data; 1761 xfr->have_zone = 1; 1762 xfr->serial = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-20)); 1763 xfr->refresh = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-16)); 1764 xfr->retry = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-12)); 1765 xfr->expiry = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-8)); 1766 /* soa minimum at d->rr_len[0]-4 */ 1767 return 1; 1768 } 1769 1770 /** 1771 * Setup auth_xfer zone 1772 * This populates the have_zone, soa values, and so on times. 1773 * Doesn't do network traffic yet, can set option flags. 1774 * @param z: locked by caller, and modified for setup 1775 * @param x: locked by caller, and modified. 1776 * @return false on failure. 1777 */ 1778 static int 1779 auth_xfer_setup(struct auth_zone* z, struct auth_xfer* x) 1780 { 1781 /* for a zone without zone transfers, x==NULL, so skip them, 1782 * i.e. the zone config is fixed with no masters or urls */ 1783 if(!z || !x) return 1; 1784 if(!xfr_find_soa(z, x)) { 1785 return 1; 1786 } 1787 /* nothing for probe, nextprobe and transfer tasks */ 1788 return 1; 1789 } 1790 1791 /** 1792 * Setup all zones 1793 * @param az: auth zones structure 1794 * @return false on failure. 1795 */ 1796 static int 1797 auth_zones_setup_zones(struct auth_zones* az) 1798 { 1799 struct auth_zone* z; 1800 struct auth_xfer* x; 1801 lock_rw_wrlock(&az->lock); 1802 RBTREE_FOR(z, struct auth_zone*, &az->ztree) { 1803 lock_rw_wrlock(&z->lock); 1804 x = auth_xfer_find(az, z->name, z->namelen, z->dclass); 1805 if(x) { 1806 lock_basic_lock(&x->lock); 1807 } 1808 if(!auth_xfer_setup(z, x)) { 1809 if(x) { 1810 lock_basic_unlock(&x->lock); 1811 } 1812 lock_rw_unlock(&z->lock); 1813 lock_rw_unlock(&az->lock); 1814 return 0; 1815 } 1816 if(x) { 1817 lock_basic_unlock(&x->lock); 1818 } 1819 lock_rw_unlock(&z->lock); 1820 } 1821 lock_rw_unlock(&az->lock); 1822 return 1; 1823 } 1824 1825 /** set config items and create zones */ 1826 static int 1827 auth_zones_cfg(struct auth_zones* az, struct config_auth* c) 1828 { 1829 struct auth_zone* z; 1830 struct auth_xfer* x = NULL; 1831 1832 /* create zone */ 1833 lock_rw_wrlock(&az->lock); 1834 if(!(z=auth_zones_find_or_add_zone(az, c->name))) { 1835 lock_rw_unlock(&az->lock); 1836 return 0; 1837 } 1838 if(c->masters || c->urls) { 1839 if(!(x=auth_zones_find_or_add_xfer(az, z))) { 1840 lock_rw_unlock(&az->lock); 1841 lock_rw_unlock(&z->lock); 1842 return 0; 1843 } 1844 } 1845 if(c->for_downstream) 1846 az->have_downstream = 1; 1847 lock_rw_unlock(&az->lock); 1848 1849 /* set options */ 1850 z->zone_deleted = 0; 1851 if(!auth_zone_set_zonefile(z, c->zonefile)) { 1852 if(x) { 1853 lock_basic_unlock(&x->lock); 1854 } 1855 lock_rw_unlock(&z->lock); 1856 return 0; 1857 } 1858 z->for_downstream = c->for_downstream; 1859 z->for_upstream = c->for_upstream; 1860 z->fallback_enabled = c->fallback_enabled; 1861 1862 /* xfer zone */ 1863 if(x) { 1864 z->zone_is_slave = 1; 1865 /* set options on xfer zone */ 1866 if(!xfer_set_masters(&x->task_probe->masters, c, 0)) { 1867 lock_basic_unlock(&x->lock); 1868 lock_rw_unlock(&z->lock); 1869 return 0; 1870 } 1871 if(!xfer_set_masters(&x->task_transfer->masters, c, 1)) { 1872 lock_basic_unlock(&x->lock); 1873 lock_rw_unlock(&z->lock); 1874 return 0; 1875 } 1876 lock_basic_unlock(&x->lock); 1877 } 1878 1879 lock_rw_unlock(&z->lock); 1880 return 1; 1881 } 1882 1883 /** set all auth zones deleted, then in auth_zones_cfg, it marks them 1884 * as nondeleted (if they are still in the config), and then later 1885 * we can find deleted zones */ 1886 static void 1887 az_setall_deleted(struct auth_zones* az) 1888 { 1889 struct auth_zone* z; 1890 lock_rw_wrlock(&az->lock); 1891 RBTREE_FOR(z, struct auth_zone*, &az->ztree) { 1892 lock_rw_wrlock(&z->lock); 1893 z->zone_deleted = 1; 1894 lock_rw_unlock(&z->lock); 1895 } 1896 lock_rw_unlock(&az->lock); 1897 } 1898 1899 /** find zones that are marked deleted and delete them. 1900 * This is called from apply_cfg, and there are no threads and no 1901 * workers, so the xfr can just be deleted. */ 1902 static void 1903 az_delete_deleted_zones(struct auth_zones* az) 1904 { 1905 struct auth_zone* z; 1906 struct auth_zone* delete_list = NULL, *next; 1907 struct auth_xfer* xfr; 1908 lock_rw_wrlock(&az->lock); 1909 RBTREE_FOR(z, struct auth_zone*, &az->ztree) { 1910 lock_rw_wrlock(&z->lock); 1911 if(z->zone_deleted) { 1912 /* we cannot alter the rbtree right now, but 1913 * we can put it on a linked list and then 1914 * delete it */ 1915 z->delete_next = delete_list; 1916 delete_list = z; 1917 } 1918 lock_rw_unlock(&z->lock); 1919 } 1920 /* now we are out of the tree loop and we can loop and delete 1921 * the zones */ 1922 z = delete_list; 1923 while(z) { 1924 next = z->delete_next; 1925 xfr = auth_xfer_find(az, z->name, z->namelen, z->dclass); 1926 if(xfr) { 1927 (void)rbtree_delete(&az->xtree, &xfr->node); 1928 auth_xfer_delete(xfr); 1929 } 1930 (void)rbtree_delete(&az->ztree, &z->node); 1931 auth_zone_delete(z); 1932 z = next; 1933 } 1934 lock_rw_unlock(&az->lock); 1935 } 1936 1937 int auth_zones_apply_cfg(struct auth_zones* az, struct config_file* cfg, 1938 int setup) 1939 { 1940 struct config_auth* p; 1941 az_setall_deleted(az); 1942 for(p = cfg->auths; p; p = p->next) { 1943 if(!p->name || p->name[0] == 0) { 1944 log_warn("auth-zone without a name, skipped"); 1945 continue; 1946 } 1947 if(!auth_zones_cfg(az, p)) { 1948 log_err("cannot config auth zone %s", p->name); 1949 return 0; 1950 } 1951 } 1952 az_delete_deleted_zones(az); 1953 if(!auth_zones_read_zones(az)) 1954 return 0; 1955 if(setup) { 1956 if(!auth_zones_setup_zones(az)) 1957 return 0; 1958 } 1959 return 1; 1960 } 1961 1962 /** delete chunks 1963 * @param at: transfer structure with chunks list. The chunks and their 1964 * data are freed. 1965 */ 1966 static void 1967 auth_chunks_delete(struct auth_transfer* at) 1968 { 1969 if(at->chunks_first) { 1970 struct auth_chunk* c, *cn; 1971 c = at->chunks_first; 1972 while(c) { 1973 cn = c->next; 1974 free(c->data); 1975 free(c); 1976 c = cn; 1977 } 1978 } 1979 at->chunks_first = NULL; 1980 at->chunks_last = NULL; 1981 } 1982 1983 /** free master addr list */ 1984 static void 1985 auth_free_master_addrs(struct auth_addr* list) 1986 { 1987 struct auth_addr *n; 1988 while(list) { 1989 n = list->next; 1990 free(list); 1991 list = n; 1992 } 1993 } 1994 1995 /** free the masters list */ 1996 static void 1997 auth_free_masters(struct auth_master* list) 1998 { 1999 struct auth_master* n; 2000 while(list) { 2001 n = list->next; 2002 auth_free_master_addrs(list->list); 2003 free(list->host); 2004 free(list->file); 2005 free(list); 2006 list = n; 2007 } 2008 } 2009 2010 /** delete auth xfer structure 2011 * @param xfr: delete this xfer and its tasks. 2012 */ 2013 static void 2014 auth_xfer_delete(struct auth_xfer* xfr) 2015 { 2016 if(!xfr) return; 2017 lock_basic_destroy(&xfr->lock); 2018 free(xfr->name); 2019 if(xfr->task_nextprobe) { 2020 comm_timer_delete(xfr->task_nextprobe->timer); 2021 free(xfr->task_nextprobe); 2022 } 2023 if(xfr->task_probe) { 2024 auth_free_masters(xfr->task_probe->masters); 2025 comm_point_delete(xfr->task_probe->cp); 2026 free(xfr->task_probe); 2027 } 2028 if(xfr->task_transfer) { 2029 auth_free_masters(xfr->task_transfer->masters); 2030 comm_point_delete(xfr->task_transfer->cp); 2031 if(xfr->task_transfer->chunks_first) { 2032 auth_chunks_delete(xfr->task_transfer); 2033 } 2034 free(xfr->task_transfer); 2035 } 2036 auth_free_masters(xfr->allow_notify_list); 2037 free(xfr); 2038 } 2039 2040 /** helper traverse to delete zones */ 2041 static void 2042 auth_zone_del(rbnode_type* n, void* ATTR_UNUSED(arg)) 2043 { 2044 struct auth_zone* z = (struct auth_zone*)n->key; 2045 auth_zone_delete(z); 2046 } 2047 2048 /** helper traverse to delete xfer zones */ 2049 static void 2050 auth_xfer_del(rbnode_type* n, void* ATTR_UNUSED(arg)) 2051 { 2052 struct auth_xfer* z = (struct auth_xfer*)n->key; 2053 auth_xfer_delete(z); 2054 } 2055 2056 void auth_zones_delete(struct auth_zones* az) 2057 { 2058 if(!az) return; 2059 lock_rw_destroy(&az->lock); 2060 traverse_postorder(&az->ztree, auth_zone_del, NULL); 2061 traverse_postorder(&az->xtree, auth_xfer_del, NULL); 2062 free(az); 2063 } 2064 2065 /** true if domain has only nsec3 */ 2066 static int 2067 domain_has_only_nsec3(struct auth_data* n) 2068 { 2069 struct auth_rrset* rrset = n->rrsets; 2070 int nsec3_seen = 0; 2071 while(rrset) { 2072 if(rrset->type == LDNS_RR_TYPE_NSEC3) { 2073 nsec3_seen = 1; 2074 } else if(rrset->type != LDNS_RR_TYPE_RRSIG) { 2075 return 0; 2076 } 2077 rrset = rrset->next; 2078 } 2079 return nsec3_seen; 2080 } 2081 2082 /** see if the domain has a wildcard child '*.domain' */ 2083 static struct auth_data* 2084 az_find_wildcard_domain(struct auth_zone* z, uint8_t* nm, size_t nmlen) 2085 { 2086 uint8_t wc[LDNS_MAX_DOMAINLEN]; 2087 if(nmlen+2 > sizeof(wc)) 2088 return NULL; /* result would be too long */ 2089 wc[0] = 1; /* length of wildcard label */ 2090 wc[1] = (uint8_t)'*'; /* wildcard label */ 2091 memmove(wc+2, nm, nmlen); 2092 return az_find_name(z, wc, nmlen+2); 2093 } 2094 2095 /** find wildcard between qname and cename */ 2096 static struct auth_data* 2097 az_find_wildcard(struct auth_zone* z, struct query_info* qinfo, 2098 struct auth_data* ce) 2099 { 2100 uint8_t* nm = qinfo->qname; 2101 size_t nmlen = qinfo->qname_len; 2102 struct auth_data* node; 2103 if(!dname_subdomain_c(nm, z->name)) 2104 return NULL; /* out of zone */ 2105 while((node=az_find_wildcard_domain(z, nm, nmlen))==NULL) { 2106 /* see if we can go up to find the wildcard */ 2107 if(nmlen == z->namelen) 2108 return NULL; /* top of zone reached */ 2109 if(ce && nmlen == ce->namelen) 2110 return NULL; /* ce reached */ 2111 if(dname_is_root(nm)) 2112 return NULL; /* cannot go up */ 2113 dname_remove_label(&nm, &nmlen); 2114 } 2115 return node; 2116 } 2117 2118 /** domain is not exact, find first candidate ce (name that matches 2119 * a part of qname) in tree */ 2120 static struct auth_data* 2121 az_find_candidate_ce(struct auth_zone* z, struct query_info* qinfo, 2122 struct auth_data* n) 2123 { 2124 uint8_t* nm; 2125 size_t nmlen; 2126 if(n) { 2127 nm = dname_get_shared_topdomain(qinfo->qname, n->name); 2128 } else { 2129 nm = qinfo->qname; 2130 } 2131 dname_count_size_labels(nm, &nmlen); 2132 n = az_find_name(z, nm, nmlen); 2133 /* delete labels and go up on name */ 2134 while(!n) { 2135 if(dname_is_root(nm)) 2136 return NULL; /* cannot go up */ 2137 dname_remove_label(&nm, &nmlen); 2138 n = az_find_name(z, nm, nmlen); 2139 } 2140 return n; 2141 } 2142 2143 /** go up the auth tree to next existing name. */ 2144 static struct auth_data* 2145 az_domain_go_up(struct auth_zone* z, struct auth_data* n) 2146 { 2147 uint8_t* nm = n->name; 2148 size_t nmlen = n->namelen; 2149 while(!dname_is_root(nm)) { 2150 dname_remove_label(&nm, &nmlen); 2151 if((n=az_find_name(z, nm, nmlen)) != NULL) 2152 return n; 2153 } 2154 return NULL; 2155 } 2156 2157 /** Find the closest encloser, an name that exists and is above the 2158 * qname. 2159 * return true if the node (param node) is existing, nonobscured and 2160 * can be used to generate answers from. It is then also node_exact. 2161 * returns false if the node is not good enough (or it wasn't node_exact) 2162 * in this case the ce can be filled. 2163 * if ce is NULL, no ce exists, and likely the zone is completely empty, 2164 * not even with a zone apex. 2165 * if ce is nonNULL it is the closest enclosing upper name (that exists 2166 * itself for answer purposes). That name may have DNAME, NS or wildcard 2167 * rrset is the closest DNAME or NS rrset that was found. 2168 */ 2169 static int 2170 az_find_ce(struct auth_zone* z, struct query_info* qinfo, 2171 struct auth_data* node, int node_exact, struct auth_data** ce, 2172 struct auth_rrset** rrset) 2173 { 2174 struct auth_data* n = node; 2175 *ce = NULL; 2176 *rrset = NULL; 2177 if(!node_exact) { 2178 /* if not exact, lookup closest exact match */ 2179 n = az_find_candidate_ce(z, qinfo, n); 2180 } else { 2181 /* if exact, the node itself is the first candidate ce */ 2182 *ce = n; 2183 } 2184 2185 /* no direct answer from nsec3-only domains */ 2186 if(n && domain_has_only_nsec3(n)) { 2187 node_exact = 0; 2188 *ce = NULL; 2189 } 2190 2191 /* with exact matches, walk up the labels until we find the 2192 * delegation, or DNAME or zone end */ 2193 while(n) { 2194 /* see if the current candidate has issues */ 2195 /* not zone apex and has type NS */ 2196 if(n->namelen != z->namelen && 2197 (*rrset=az_domain_rrset(n, LDNS_RR_TYPE_NS)) && 2198 /* delegate here, but DS at exact the dp has notype */ 2199 (qinfo->qtype != LDNS_RR_TYPE_DS || 2200 n->namelen != qinfo->qname_len)) { 2201 /* referral */ 2202 /* this is ce and the lowernode is nonexisting */ 2203 *ce = n; 2204 return 0; 2205 } 2206 /* not equal to qname and has type DNAME */ 2207 if(n->namelen != qinfo->qname_len && 2208 (*rrset=az_domain_rrset(n, LDNS_RR_TYPE_DNAME))) { 2209 /* this is ce and the lowernode is nonexisting */ 2210 *ce = n; 2211 return 0; 2212 } 2213 2214 if(*ce == NULL && !domain_has_only_nsec3(n)) { 2215 /* if not found yet, this exact name must be 2216 * our lowest match (but not nsec3onlydomain) */ 2217 *ce = n; 2218 } 2219 2220 /* walk up the tree by removing labels from name and lookup */ 2221 n = az_domain_go_up(z, n); 2222 } 2223 /* found no problems, if it was an exact node, it is fine to use */ 2224 return node_exact; 2225 } 2226 2227 /** add additional A/AAAA from domain names in rrset rdata (+offset) 2228 * offset is number of bytes in rdata where the dname is located. */ 2229 static int 2230 az_add_additionals_from(struct auth_zone* z, struct regional* region, 2231 struct dns_msg* msg, struct auth_rrset* rrset, size_t offset) 2232 { 2233 struct packed_rrset_data* d = rrset->data; 2234 size_t i; 2235 if(!d) return 0; 2236 for(i=0; i<d->count; i++) { 2237 size_t dlen; 2238 struct auth_data* domain; 2239 struct auth_rrset* ref; 2240 if(d->rr_len[i] < 2+offset) 2241 continue; /* too short */ 2242 if(!(dlen = dname_valid(d->rr_data[i]+2+offset, 2243 d->rr_len[i]-2-offset))) 2244 continue; /* malformed */ 2245 domain = az_find_name(z, d->rr_data[i]+2+offset, dlen); 2246 if(!domain) 2247 continue; 2248 if((ref=az_domain_rrset(domain, LDNS_RR_TYPE_A)) != NULL) { 2249 if(!msg_add_rrset_ar(z, region, msg, domain, ref)) 2250 return 0; 2251 } 2252 if((ref=az_domain_rrset(domain, LDNS_RR_TYPE_AAAA)) != NULL) { 2253 if(!msg_add_rrset_ar(z, region, msg, domain, ref)) 2254 return 0; 2255 } 2256 } 2257 return 1; 2258 } 2259 2260 /** add negative SOA record (with negative TTL) */ 2261 static int 2262 az_add_negative_soa(struct auth_zone* z, struct regional* region, 2263 struct dns_msg* msg) 2264 { 2265 uint32_t minimum; 2266 struct packed_rrset_data* d; 2267 struct auth_rrset* soa; 2268 struct auth_data* apex = az_find_name(z, z->name, z->namelen); 2269 if(!apex) return 0; 2270 soa = az_domain_rrset(apex, LDNS_RR_TYPE_SOA); 2271 if(!soa) return 0; 2272 /* must be first to put in message; we want to fix the TTL with 2273 * one RRset here, otherwise we'd need to loop over the RRs to get 2274 * the resulting lower TTL */ 2275 log_assert(msg->rep->rrset_count == 0); 2276 if(!msg_add_rrset_ns(z, region, msg, apex, soa)) return 0; 2277 /* fixup TTL */ 2278 d = (struct packed_rrset_data*)msg->rep->rrsets[msg->rep->rrset_count-1]->entry.data; 2279 /* last 4 bytes are minimum ttl in network format */ 2280 if(d->count == 0) return 0; 2281 if(d->rr_len[0] < 2+4) return 0; 2282 minimum = sldns_read_uint32(d->rr_data[0]+(d->rr_len[0]-4)); 2283 d->ttl = (time_t)minimum; 2284 d->rr_ttl[0] = (time_t)minimum; 2285 msg->rep->ttl = get_rrset_ttl(msg->rep->rrsets[0]); 2286 msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); 2287 return 1; 2288 } 2289 2290 /** See if the query goes to empty nonterminal (that has no auth_data, 2291 * but there are nodes underneath. We already checked that there are 2292 * not NS, or DNAME above, so that we only need to check if some node 2293 * exists below (with nonempty rr list), return true if emptynonterminal */ 2294 static int 2295 az_empty_nonterminal(struct auth_zone* z, struct query_info* qinfo, 2296 struct auth_data* node) 2297 { 2298 struct auth_data* next; 2299 if(!node) { 2300 /* no smaller was found, use first (smallest) node as the 2301 * next one */ 2302 next = (struct auth_data*)rbtree_first(&z->data); 2303 } else { 2304 next = (struct auth_data*)rbtree_next(&node->node); 2305 } 2306 while(next && (rbnode_type*)next != RBTREE_NULL && next->rrsets == NULL) { 2307 /* the next name has empty rrsets, is an empty nonterminal 2308 * itself, see if there exists something below it */ 2309 next = (struct auth_data*)rbtree_next(&node->node); 2310 } 2311 if((rbnode_type*)next == RBTREE_NULL || !next) { 2312 /* there is no next node, so something below it cannot 2313 * exist */ 2314 return 0; 2315 } 2316 /* a next node exists, if there was something below the query, 2317 * this node has to be it. See if it is below the query name */ 2318 if(dname_strict_subdomain_c(next->name, qinfo->qname)) 2319 return 1; 2320 return 0; 2321 } 2322 2323 /** create synth cname target name in buffer, or fail if too long */ 2324 static size_t 2325 synth_cname_buf(uint8_t* qname, size_t qname_len, size_t dname_len, 2326 uint8_t* dtarg, size_t dtarglen, uint8_t* buf, size_t buflen) 2327 { 2328 size_t newlen = qname_len + dtarglen - dname_len; 2329 if(newlen > buflen) { 2330 /* YXDOMAIN error */ 2331 return 0; 2332 } 2333 /* new name is concatenation of qname front (without DNAME owner) 2334 * and DNAME target name */ 2335 memcpy(buf, qname, qname_len-dname_len); 2336 memmove(buf+(qname_len-dname_len), dtarg, dtarglen); 2337 return newlen; 2338 } 2339 2340 /** create synthetic CNAME rrset for in a DNAME answer in region, 2341 * false on alloc failure, cname==NULL when name too long. */ 2342 static int 2343 create_synth_cname(uint8_t* qname, size_t qname_len, struct regional* region, 2344 struct auth_data* node, struct auth_rrset* dname, uint16_t dclass, 2345 struct ub_packed_rrset_key** cname) 2346 { 2347 uint8_t buf[LDNS_MAX_DOMAINLEN]; 2348 uint8_t* dtarg; 2349 size_t dtarglen, newlen; 2350 struct packed_rrset_data* d; 2351 2352 /* get DNAME target name */ 2353 if(dname->data->count < 1) return 0; 2354 if(dname->data->rr_len[0] < 3) return 0; /* at least rdatalen +1 */ 2355 dtarg = dname->data->rr_data[0]+2; 2356 dtarglen = dname->data->rr_len[0]-2; 2357 if(sldns_read_uint16(dname->data->rr_data[0]) != dtarglen) 2358 return 0; /* rdatalen in DNAME rdata is malformed */ 2359 if(dname_valid(dtarg, dtarglen) != dtarglen) 2360 return 0; /* DNAME RR has malformed rdata */ 2361 2362 /* synthesize a CNAME */ 2363 newlen = synth_cname_buf(qname, qname_len, node->namelen, 2364 dtarg, dtarglen, buf, sizeof(buf)); 2365 if(newlen == 0) { 2366 /* YXDOMAIN error */ 2367 *cname = NULL; 2368 return 1; 2369 } 2370 *cname = (struct ub_packed_rrset_key*)regional_alloc(region, 2371 sizeof(struct ub_packed_rrset_key)); 2372 if(!*cname) 2373 return 0; /* out of memory */ 2374 memset(&(*cname)->entry, 0, sizeof((*cname)->entry)); 2375 (*cname)->entry.key = (*cname); 2376 (*cname)->rk.type = htons(LDNS_RR_TYPE_CNAME); 2377 (*cname)->rk.rrset_class = htons(dclass); 2378 (*cname)->rk.flags = 0; 2379 (*cname)->rk.dname = regional_alloc_init(region, qname, qname_len); 2380 if(!(*cname)->rk.dname) 2381 return 0; /* out of memory */ 2382 (*cname)->rk.dname_len = qname_len; 2383 (*cname)->entry.hash = rrset_key_hash(&(*cname)->rk); 2384 d = (struct packed_rrset_data*)regional_alloc_zero(region, 2385 sizeof(struct packed_rrset_data) + sizeof(size_t) + 2386 sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t) 2387 + newlen); 2388 if(!d) 2389 return 0; /* out of memory */ 2390 (*cname)->entry.data = d; 2391 d->ttl = 0; /* 0 for synthesized CNAME TTL */ 2392 d->count = 1; 2393 d->rrsig_count = 0; 2394 d->trust = rrset_trust_ans_noAA; 2395 d->rr_len = (size_t*)((uint8_t*)d + 2396 sizeof(struct packed_rrset_data)); 2397 d->rr_len[0] = newlen + sizeof(uint16_t); 2398 packed_rrset_ptr_fixup(d); 2399 d->rr_ttl[0] = d->ttl; 2400 sldns_write_uint16(d->rr_data[0], newlen); 2401 memmove(d->rr_data[0] + sizeof(uint16_t), buf, newlen); 2402 return 1; 2403 } 2404 2405 /** add a synthesized CNAME to the answer section */ 2406 static int 2407 add_synth_cname(struct auth_zone* z, uint8_t* qname, size_t qname_len, 2408 struct regional* region, struct dns_msg* msg, struct auth_data* dname, 2409 struct auth_rrset* rrset) 2410 { 2411 struct ub_packed_rrset_key* cname; 2412 /* synthesize a CNAME */ 2413 if(!create_synth_cname(qname, qname_len, region, dname, rrset, 2414 z->dclass, &cname)) { 2415 /* out of memory */ 2416 return 0; 2417 } 2418 if(!cname) { 2419 /* cname cannot be create because of YXDOMAIN */ 2420 msg->rep->flags |= LDNS_RCODE_YXDOMAIN; 2421 return 1; 2422 } 2423 /* add cname to message */ 2424 if(!msg_grow_array(region, msg)) 2425 return 0; 2426 msg->rep->rrsets[msg->rep->rrset_count] = cname; 2427 msg->rep->rrset_count++; 2428 msg->rep->an_numrrsets++; 2429 msg_ttl(msg); 2430 return 1; 2431 } 2432 2433 /** Change a dname to a different one, for wildcard namechange */ 2434 static void 2435 az_change_dnames(struct dns_msg* msg, uint8_t* oldname, uint8_t* newname, 2436 size_t newlen, int an_only) 2437 { 2438 size_t i; 2439 size_t start = 0, end = msg->rep->rrset_count; 2440 if(!an_only) start = msg->rep->an_numrrsets; 2441 if(an_only) end = msg->rep->an_numrrsets; 2442 for(i=start; i<end; i++) { 2443 /* allocated in region so we can change the ptrs */ 2444 if(query_dname_compare(msg->rep->rrsets[i]->rk.dname, oldname) 2445 == 0) { 2446 msg->rep->rrsets[i]->rk.dname = newname; 2447 msg->rep->rrsets[i]->rk.dname_len = newlen; 2448 } 2449 } 2450 } 2451 2452 /** find NSEC record covering the query */ 2453 static struct auth_rrset* 2454 az_find_nsec_cover(struct auth_zone* z, struct auth_data** node) 2455 { 2456 uint8_t* nm = (*node)->name; 2457 size_t nmlen = (*node)->namelen; 2458 struct auth_rrset* rrset; 2459 /* find the NSEC for the smallest-or-equal node */ 2460 /* if node == NULL, we did not find a smaller name. But the zone 2461 * name is the smallest name and should have an NSEC. So there is 2462 * no NSEC to return (for a properly signed zone) */ 2463 /* for empty nonterminals, the auth-data node should not exist, 2464 * and thus we don't need to go rbtree_previous here to find 2465 * a domain with an NSEC record */ 2466 /* but there could be glue, and if this is node, then it has no NSEC. 2467 * Go up to find nonglue (previous) NSEC-holding nodes */ 2468 while((rrset=az_domain_rrset(*node, LDNS_RR_TYPE_NSEC)) == NULL) { 2469 if(dname_is_root(nm)) return NULL; 2470 if(nmlen == z->namelen) return NULL; 2471 dname_remove_label(&nm, &nmlen); 2472 /* adjust *node for the nsec rrset to find in */ 2473 *node = az_find_name(z, nm, nmlen); 2474 } 2475 return rrset; 2476 } 2477 2478 /** Find NSEC and add for wildcard denial */ 2479 static int 2480 az_nsec_wildcard_denial(struct auth_zone* z, struct regional* region, 2481 struct dns_msg* msg, uint8_t* cenm, size_t cenmlen) 2482 { 2483 struct query_info qinfo; 2484 int node_exact; 2485 struct auth_data* node; 2486 struct auth_rrset* nsec; 2487 uint8_t wc[LDNS_MAX_DOMAINLEN]; 2488 if(cenmlen+2 > sizeof(wc)) 2489 return 0; /* result would be too long */ 2490 wc[0] = 1; /* length of wildcard label */ 2491 wc[1] = (uint8_t)'*'; /* wildcard label */ 2492 memmove(wc+2, cenm, cenmlen); 2493 2494 /* we have '*.ce' in wc wildcard name buffer */ 2495 /* get nsec cover for that */ 2496 qinfo.qname = wc; 2497 qinfo.qname_len = cenmlen+2; 2498 qinfo.qtype = 0; 2499 qinfo.qclass = 0; 2500 az_find_domain(z, &qinfo, &node_exact, &node); 2501 if((nsec=az_find_nsec_cover(z, &node)) != NULL) { 2502 if(!msg_add_rrset_ns(z, region, msg, node, nsec)) return 0; 2503 } 2504 return 1; 2505 } 2506 2507 /** Find the NSEC3PARAM rrset (if any) and if true you have the parameters */ 2508 static int 2509 az_nsec3_param(struct auth_zone* z, int* algo, size_t* iter, uint8_t** salt, 2510 size_t* saltlen) 2511 { 2512 struct auth_data* apex; 2513 struct auth_rrset* param; 2514 size_t i; 2515 apex = az_find_name(z, z->name, z->namelen); 2516 if(!apex) return 0; 2517 param = az_domain_rrset(apex, LDNS_RR_TYPE_NSEC3PARAM); 2518 if(!param || param->data->count==0) 2519 return 0; /* no RRset or no RRs in rrset */ 2520 /* find out which NSEC3PARAM RR has supported parameters */ 2521 /* skip unknown flags (dynamic signer is recalculating nsec3 chain) */ 2522 for(i=0; i<param->data->count; i++) { 2523 uint8_t* rdata = param->data->rr_data[i]+2; 2524 size_t rdatalen = param->data->rr_len[i]; 2525 if(rdatalen < 2+5) 2526 continue; /* too short */ 2527 if(!nsec3_hash_algo_size_supported((int)(rdata[0]))) 2528 continue; /* unsupported algo */ 2529 if(rdatalen < (size_t)(2+5+(size_t)rdata[4])) 2530 continue; /* salt missing */ 2531 if((rdata[1]&NSEC3_UNKNOWN_FLAGS)!=0) 2532 continue; /* unknown flags */ 2533 *algo = (int)(rdata[0]); 2534 *iter = sldns_read_uint16(rdata+2); 2535 *saltlen = rdata[4]; 2536 if(*saltlen == 0) 2537 *salt = NULL; 2538 else *salt = rdata+5; 2539 return 1; 2540 } 2541 /* no supported params */ 2542 return 0; 2543 } 2544 2545 /** Hash a name with nsec3param into buffer, it has zone name appended. 2546 * return length of hash */ 2547 static size_t 2548 az_nsec3_hash(uint8_t* buf, size_t buflen, uint8_t* nm, size_t nmlen, 2549 int algo, size_t iter, uint8_t* salt, size_t saltlen) 2550 { 2551 size_t hlen = nsec3_hash_algo_size_supported(algo); 2552 /* buffer has domain name, nsec3hash, and 256 is for max saltlen 2553 * (salt has 0-255 length) */ 2554 unsigned char p[LDNS_MAX_DOMAINLEN+1+N3HASHBUFLEN+256]; 2555 size_t i; 2556 if(nmlen+saltlen > sizeof(p) || hlen+saltlen > sizeof(p)) 2557 return 0; 2558 if(hlen > buflen) 2559 return 0; /* somehow too large for destination buffer */ 2560 /* hashfunc(name, salt) */ 2561 memmove(p, nm, nmlen); 2562 query_dname_tolower(p); 2563 memmove(p+nmlen, salt, saltlen); 2564 (void)secalgo_nsec3_hash(algo, p, nmlen+saltlen, (unsigned char*)buf); 2565 for(i=0; i<iter; i++) { 2566 /* hashfunc(hash, salt) */ 2567 memmove(p, buf, hlen); 2568 memmove(p+hlen, salt, saltlen); 2569 (void)secalgo_nsec3_hash(algo, p, hlen+saltlen, 2570 (unsigned char*)buf); 2571 } 2572 return hlen; 2573 } 2574 2575 /** Hash name and return b32encoded hashname for lookup, zone name appended */ 2576 static int 2577 az_nsec3_hashname(struct auth_zone* z, uint8_t* hashname, size_t* hashnmlen, 2578 uint8_t* nm, size_t nmlen, int algo, size_t iter, uint8_t* salt, 2579 size_t saltlen) 2580 { 2581 uint8_t hash[N3HASHBUFLEN]; 2582 size_t hlen; 2583 int ret; 2584 hlen = az_nsec3_hash(hash, sizeof(hash), nm, nmlen, algo, iter, 2585 salt, saltlen); 2586 if(!hlen) return 0; 2587 /* b32 encode */ 2588 if(*hashnmlen < hlen*2+1+z->namelen) /* approx b32 as hexb16 */ 2589 return 0; 2590 ret = sldns_b32_ntop_extended_hex(hash, hlen, (char*)(hashname+1), 2591 (*hashnmlen)-1); 2592 if(ret<1) 2593 return 0; 2594 hashname[0] = (uint8_t)ret; 2595 ret++; 2596 if((*hashnmlen) - ret < z->namelen) 2597 return 0; 2598 memmove(hashname+ret, z->name, z->namelen); 2599 *hashnmlen = z->namelen+(size_t)ret; 2600 return 1; 2601 } 2602 2603 /** Find the datanode that covers the nsec3hash-name */ 2604 static struct auth_data* 2605 az_nsec3_findnode(struct auth_zone* z, uint8_t* hashnm, size_t hashnmlen) 2606 { 2607 struct query_info qinfo; 2608 struct auth_data* node; 2609 int node_exact; 2610 qinfo.qclass = 0; 2611 qinfo.qtype = 0; 2612 qinfo.qname = hashnm; 2613 qinfo.qname_len = hashnmlen; 2614 /* because canonical ordering and b32 nsec3 ordering are the same. 2615 * this is a good lookup to find the nsec3 name. */ 2616 az_find_domain(z, &qinfo, &node_exact, &node); 2617 /* but we may have to skip non-nsec3 nodes */ 2618 /* this may be a lot, the way to speed that up is to have a 2619 * separate nsec3 tree with nsec3 nodes */ 2620 while(node && (rbnode_type*)node != RBTREE_NULL && 2621 !az_domain_rrset(node, LDNS_RR_TYPE_NSEC3)) { 2622 node = (struct auth_data*)rbtree_previous(&node->node); 2623 } 2624 if((rbnode_type*)node == RBTREE_NULL) 2625 node = NULL; 2626 return node; 2627 } 2628 2629 /** Find cover for hashed(nm, nmlen) (or NULL) */ 2630 static struct auth_data* 2631 az_nsec3_find_cover(struct auth_zone* z, uint8_t* nm, size_t nmlen, 2632 int algo, size_t iter, uint8_t* salt, size_t saltlen) 2633 { 2634 struct auth_data* node; 2635 uint8_t hname[LDNS_MAX_DOMAINLEN]; 2636 size_t hlen = sizeof(hname); 2637 if(!az_nsec3_hashname(z, hname, &hlen, nm, nmlen, algo, iter, 2638 salt, saltlen)) 2639 return NULL; 2640 node = az_nsec3_findnode(z, hname, hlen); 2641 if(node) 2642 return node; 2643 /* we did not find any, perhaps because the NSEC3 hash is before 2644 * the first hash, we have to find the 'last hash' in the zone */ 2645 node = (struct auth_data*)rbtree_last(&z->data); 2646 while(node && (rbnode_type*)node != RBTREE_NULL && 2647 !az_domain_rrset(node, LDNS_RR_TYPE_NSEC3)) { 2648 node = (struct auth_data*)rbtree_previous(&node->node); 2649 } 2650 if((rbnode_type*)node == RBTREE_NULL) 2651 node = NULL; 2652 return node; 2653 } 2654 2655 /** Find exact match for hashed(nm, nmlen) NSEC3 record or NULL */ 2656 static struct auth_data* 2657 az_nsec3_find_exact(struct auth_zone* z, uint8_t* nm, size_t nmlen, 2658 int algo, size_t iter, uint8_t* salt, size_t saltlen) 2659 { 2660 struct auth_data* node; 2661 uint8_t hname[LDNS_MAX_DOMAINLEN]; 2662 size_t hlen = sizeof(hname); 2663 if(!az_nsec3_hashname(z, hname, &hlen, nm, nmlen, algo, iter, 2664 salt, saltlen)) 2665 return NULL; 2666 node = az_find_name(z, hname, hlen); 2667 if(az_domain_rrset(node, LDNS_RR_TYPE_NSEC3)) 2668 return node; 2669 return NULL; 2670 } 2671 2672 /** Return nextcloser name (as a ref into the qname). This is one label 2673 * more than the cenm (cename must be a suffix of qname) */ 2674 static void 2675 az_nsec3_get_nextcloser(uint8_t* cenm, uint8_t* qname, size_t qname_len, 2676 uint8_t** nx, size_t* nxlen) 2677 { 2678 int celabs = dname_count_labels(cenm); 2679 int qlabs = dname_count_labels(qname); 2680 int strip = qlabs - celabs -1; 2681 log_assert(dname_strict_subdomain(qname, qlabs, cenm, celabs)); 2682 *nx = qname; 2683 *nxlen = qname_len; 2684 if(strip>0) 2685 dname_remove_labels(nx, nxlen, strip); 2686 } 2687 2688 /** Find the closest encloser that has exact NSEC3. 2689 * updated cenm to the new name. If it went up no-exact-ce is true. */ 2690 static struct auth_data* 2691 az_nsec3_find_ce(struct auth_zone* z, uint8_t** cenm, size_t* cenmlen, 2692 int* no_exact_ce, int algo, size_t iter, uint8_t* salt, size_t saltlen) 2693 { 2694 struct auth_data* node; 2695 while((node = az_nsec3_find_exact(z, *cenm, *cenmlen, 2696 algo, iter, salt, saltlen)) == NULL) { 2697 if(*cenmlen == z->namelen) { 2698 /* next step up would take us out of the zone. fail */ 2699 return NULL; 2700 } 2701 *no_exact_ce = 1; 2702 dname_remove_label(cenm, cenmlen); 2703 } 2704 return node; 2705 } 2706 2707 /* Insert NSEC3 record in authority section, if NULL does nothing */ 2708 static int 2709 az_nsec3_insert(struct auth_zone* z, struct regional* region, 2710 struct dns_msg* msg, struct auth_data* node) 2711 { 2712 struct auth_rrset* nsec3; 2713 if(!node) return 1; /* no node, skip this */ 2714 nsec3 = az_domain_rrset(node, LDNS_RR_TYPE_NSEC3); 2715 if(!nsec3) return 1; /* if no nsec3 RR, skip it */ 2716 if(!msg_add_rrset_ns(z, region, msg, node, nsec3)) return 0; 2717 return 1; 2718 } 2719 2720 /** add NSEC3 records to the zone for the nsec3 proof. 2721 * Specify with the flags with parts of the proof are required. 2722 * the ce is the exact matching name (for notype) but also delegation points. 2723 * qname is the one where the nextcloser name can be derived from. 2724 * If NSEC3 is not properly there (in the zone) nothing is added. 2725 * always enabled: include nsec3 proving about the Closest Encloser. 2726 * that is an exact match that should exist for it. 2727 * If that does not exist, a higher exact match + nxproof is enabled 2728 * (for some sort of opt-out empty nonterminal cases). 2729 * nxproof: include denial of the qname. 2730 * wcproof: include denial of wildcard (wildcard.ce). 2731 */ 2732 static int 2733 az_add_nsec3_proof(struct auth_zone* z, struct regional* region, 2734 struct dns_msg* msg, uint8_t* cenm, size_t cenmlen, uint8_t* qname, 2735 size_t qname_len, int nxproof, int wcproof) 2736 { 2737 int algo; 2738 size_t iter, saltlen; 2739 uint8_t* salt; 2740 int no_exact_ce = 0; 2741 struct auth_data* node; 2742 2743 /* find parameters of nsec3 proof */ 2744 if(!az_nsec3_param(z, &algo, &iter, &salt, &saltlen)) 2745 return 1; /* no nsec3 */ 2746 /* find ce that has an NSEC3 */ 2747 node = az_nsec3_find_ce(z, &cenm, &cenmlen, &no_exact_ce, 2748 algo, iter, salt, saltlen); 2749 if(no_exact_ce) nxproof = 1; 2750 if(!az_nsec3_insert(z, region, msg, node)) 2751 return 0; 2752 2753 if(nxproof) { 2754 uint8_t* nx; 2755 size_t nxlen; 2756 /* create nextcloser domain name */ 2757 az_nsec3_get_nextcloser(cenm, qname, qname_len, &nx, &nxlen); 2758 /* find nsec3 that matches or covers it */ 2759 node = az_nsec3_find_cover(z, nx, nxlen, algo, iter, salt, 2760 saltlen); 2761 if(!az_nsec3_insert(z, region, msg, node)) 2762 return 0; 2763 } 2764 if(wcproof) { 2765 /* create wildcard name *.ce */ 2766 uint8_t wc[LDNS_MAX_DOMAINLEN]; 2767 size_t wclen; 2768 if(cenmlen+2 > sizeof(wc)) 2769 return 0; /* result would be too long */ 2770 wc[0] = 1; /* length of wildcard label */ 2771 wc[1] = (uint8_t)'*'; /* wildcard label */ 2772 memmove(wc+2, cenm, cenmlen); 2773 wclen = cenmlen+2; 2774 /* find nsec3 that matches or covers it */ 2775 node = az_nsec3_find_cover(z, wc, wclen, algo, iter, salt, 2776 saltlen); 2777 if(!az_nsec3_insert(z, region, msg, node)) 2778 return 0; 2779 } 2780 return 1; 2781 } 2782 2783 /** generate answer for positive answer */ 2784 static int 2785 az_generate_positive_answer(struct auth_zone* z, struct regional* region, 2786 struct dns_msg* msg, struct auth_data* node, struct auth_rrset* rrset) 2787 { 2788 if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; 2789 /* see if we want additional rrs */ 2790 if(rrset->type == LDNS_RR_TYPE_MX) { 2791 if(!az_add_additionals_from(z, region, msg, rrset, 2)) 2792 return 0; 2793 } else if(rrset->type == LDNS_RR_TYPE_SRV) { 2794 if(!az_add_additionals_from(z, region, msg, rrset, 6)) 2795 return 0; 2796 } else if(rrset->type == LDNS_RR_TYPE_NS) { 2797 if(!az_add_additionals_from(z, region, msg, rrset, 0)) 2798 return 0; 2799 } 2800 return 1; 2801 } 2802 2803 /** generate answer for type ANY answer */ 2804 static int 2805 az_generate_any_answer(struct auth_zone* z, struct regional* region, 2806 struct dns_msg* msg, struct auth_data* node) 2807 { 2808 struct auth_rrset* rrset; 2809 int added = 0; 2810 /* add a couple (at least one) RRs */ 2811 if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_SOA)) != NULL) { 2812 if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; 2813 added++; 2814 } 2815 if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_MX)) != NULL) { 2816 if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; 2817 added++; 2818 } 2819 if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_A)) != NULL) { 2820 if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; 2821 added++; 2822 } 2823 if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_AAAA)) != NULL) { 2824 if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; 2825 added++; 2826 } 2827 if(added == 0 && node->rrsets) { 2828 if(!msg_add_rrset_an(z, region, msg, node, 2829 node->rrsets)) return 0; 2830 } 2831 return 1; 2832 } 2833 2834 /** follow cname chain and add more data to the answer section */ 2835 static int 2836 follow_cname_chain(struct auth_zone* z, uint16_t qtype, 2837 struct regional* region, struct dns_msg* msg, 2838 struct packed_rrset_data* d) 2839 { 2840 int maxchain = 0; 2841 /* see if we can add the target of the CNAME into the answer */ 2842 while(maxchain++ < MAX_CNAME_CHAIN) { 2843 struct auth_data* node; 2844 struct auth_rrset* rrset; 2845 size_t clen; 2846 /* d has cname rdata */ 2847 if(d->count == 0) break; /* no CNAME */ 2848 if(d->rr_len[0] < 2+1) break; /* too small */ 2849 if((clen=dname_valid(d->rr_data[0]+2, d->rr_len[0]-2))==0) 2850 break; /* malformed */ 2851 if(!dname_subdomain_c(d->rr_data[0]+2, z->name)) 2852 break; /* target out of zone */ 2853 if((node = az_find_name(z, d->rr_data[0]+2, clen))==NULL) 2854 break; /* no such target name */ 2855 if((rrset=az_domain_rrset(node, qtype))!=NULL) { 2856 /* done we found the target */ 2857 if(!msg_add_rrset_an(z, region, msg, node, rrset)) 2858 return 0; 2859 break; 2860 } 2861 if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_CNAME))==NULL) 2862 break; /* no further CNAME chain, notype */ 2863 if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; 2864 d = rrset->data; 2865 } 2866 return 1; 2867 } 2868 2869 /** generate answer for cname answer */ 2870 static int 2871 az_generate_cname_answer(struct auth_zone* z, struct query_info* qinfo, 2872 struct regional* region, struct dns_msg* msg, 2873 struct auth_data* node, struct auth_rrset* rrset) 2874 { 2875 if(!msg_add_rrset_an(z, region, msg, node, rrset)) return 0; 2876 if(!rrset) return 1; 2877 if(!follow_cname_chain(z, qinfo->qtype, region, msg, rrset->data)) 2878 return 0; 2879 return 1; 2880 } 2881 2882 /** generate answer for notype answer */ 2883 static int 2884 az_generate_notype_answer(struct auth_zone* z, struct regional* region, 2885 struct dns_msg* msg, struct auth_data* node) 2886 { 2887 struct auth_rrset* rrset; 2888 if(!az_add_negative_soa(z, region, msg)) return 0; 2889 /* DNSSEC denial NSEC */ 2890 if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_NSEC))!=NULL) { 2891 if(!msg_add_rrset_ns(z, region, msg, node, rrset)) return 0; 2892 } else if(node) { 2893 /* DNSSEC denial NSEC3 */ 2894 if(!az_add_nsec3_proof(z, region, msg, node->name, 2895 node->namelen, msg->qinfo.qname, 2896 msg->qinfo.qname_len, 0, 0)) 2897 return 0; 2898 } 2899 return 1; 2900 } 2901 2902 /** generate answer for referral answer */ 2903 static int 2904 az_generate_referral_answer(struct auth_zone* z, struct regional* region, 2905 struct dns_msg* msg, struct auth_data* ce, struct auth_rrset* rrset) 2906 { 2907 struct auth_rrset* ds, *nsec; 2908 /* turn off AA flag, referral is nonAA because it leaves the zone */ 2909 log_assert(ce); 2910 msg->rep->flags &= ~BIT_AA; 2911 if(!msg_add_rrset_ns(z, region, msg, ce, rrset)) return 0; 2912 /* add DS or deny it */ 2913 if((ds=az_domain_rrset(ce, LDNS_RR_TYPE_DS))!=NULL) { 2914 if(!msg_add_rrset_ns(z, region, msg, ce, ds)) return 0; 2915 } else { 2916 /* deny the DS */ 2917 if((nsec=az_domain_rrset(ce, LDNS_RR_TYPE_NSEC))!=NULL) { 2918 if(!msg_add_rrset_ns(z, region, msg, ce, nsec)) 2919 return 0; 2920 } else { 2921 if(!az_add_nsec3_proof(z, region, msg, ce->name, 2922 ce->namelen, msg->qinfo.qname, 2923 msg->qinfo.qname_len, 0, 0)) 2924 return 0; 2925 } 2926 } 2927 /* add additional rrs for type NS */ 2928 if(!az_add_additionals_from(z, region, msg, rrset, 0)) return 0; 2929 return 1; 2930 } 2931 2932 /** generate answer for DNAME answer */ 2933 static int 2934 az_generate_dname_answer(struct auth_zone* z, struct query_info* qinfo, 2935 struct regional* region, struct dns_msg* msg, struct auth_data* ce, 2936 struct auth_rrset* rrset) 2937 { 2938 log_assert(ce); 2939 /* add the DNAME and then a CNAME */ 2940 if(!msg_add_rrset_an(z, region, msg, ce, rrset)) return 0; 2941 if(!add_synth_cname(z, qinfo->qname, qinfo->qname_len, region, 2942 msg, ce, rrset)) return 0; 2943 if(FLAGS_GET_RCODE(msg->rep->flags) == LDNS_RCODE_YXDOMAIN) 2944 return 1; 2945 if(msg->rep->rrset_count == 0 || 2946 !msg->rep->rrsets[msg->rep->rrset_count-1]) 2947 return 0; 2948 if(!follow_cname_chain(z, qinfo->qtype, region, msg, 2949 (struct packed_rrset_data*)msg->rep->rrsets[ 2950 msg->rep->rrset_count-1]->entry.data)) 2951 return 0; 2952 return 1; 2953 } 2954 2955 /** generate answer for wildcard answer */ 2956 static int 2957 az_generate_wildcard_answer(struct auth_zone* z, struct query_info* qinfo, 2958 struct regional* region, struct dns_msg* msg, struct auth_data* ce, 2959 struct auth_data* wildcard, struct auth_data* node) 2960 { 2961 struct auth_rrset* rrset, *nsec; 2962 if((rrset=az_domain_rrset(wildcard, qinfo->qtype)) != NULL) { 2963 /* wildcard has type, add it */ 2964 if(!msg_add_rrset_an(z, region, msg, wildcard, rrset)) 2965 return 0; 2966 az_change_dnames(msg, wildcard->name, msg->qinfo.qname, 2967 msg->qinfo.qname_len, 1); 2968 } else if((rrset=az_domain_rrset(wildcard, LDNS_RR_TYPE_CNAME))!=NULL) { 2969 /* wildcard has cname instead, do that */ 2970 if(!msg_add_rrset_an(z, region, msg, wildcard, rrset)) 2971 return 0; 2972 az_change_dnames(msg, wildcard->name, msg->qinfo.qname, 2973 msg->qinfo.qname_len, 1); 2974 if(!follow_cname_chain(z, qinfo->qtype, region, msg, 2975 rrset->data)) 2976 return 0; 2977 } else if(qinfo->qtype == LDNS_RR_TYPE_ANY && wildcard->rrsets) { 2978 /* add ANY rrsets from wildcard node */ 2979 if(!az_generate_any_answer(z, region, msg, wildcard)) 2980 return 0; 2981 az_change_dnames(msg, wildcard->name, msg->qinfo.qname, 2982 msg->qinfo.qname_len, 1); 2983 } else { 2984 /* wildcard has nodata, notype answer */ 2985 /* call other notype routine for dnssec notype denials */ 2986 if(!az_generate_notype_answer(z, region, msg, wildcard)) 2987 return 0; 2988 } 2989 2990 /* ce and node for dnssec denial of wildcard original name */ 2991 if((nsec=az_find_nsec_cover(z, &node)) != NULL) { 2992 if(!msg_add_rrset_ns(z, region, msg, node, nsec)) return 0; 2993 } else if(ce) { 2994 if(!az_add_nsec3_proof(z, region, msg, ce->name, 2995 ce->namelen, msg->qinfo.qname, 2996 msg->qinfo.qname_len, 1, 0)) 2997 return 0; 2998 } 2999 3000 /* fixup name of wildcard from *.zone to qname, use already allocated 3001 * pointer to msg qname */ 3002 az_change_dnames(msg, wildcard->name, msg->qinfo.qname, 3003 msg->qinfo.qname_len, 0); 3004 return 1; 3005 } 3006 3007 /** generate answer for nxdomain answer */ 3008 static int 3009 az_generate_nxdomain_answer(struct auth_zone* z, struct regional* region, 3010 struct dns_msg* msg, struct auth_data* ce, struct auth_data* node) 3011 { 3012 struct auth_rrset* nsec; 3013 msg->rep->flags |= LDNS_RCODE_NXDOMAIN; 3014 if(!az_add_negative_soa(z, region, msg)) return 0; 3015 if((nsec=az_find_nsec_cover(z, &node)) != NULL) { 3016 if(!msg_add_rrset_ns(z, region, msg, node, nsec)) return 0; 3017 if(ce && !az_nsec_wildcard_denial(z, region, msg, ce->name, 3018 ce->namelen)) return 0; 3019 } else if(ce) { 3020 if(!az_add_nsec3_proof(z, region, msg, ce->name, 3021 ce->namelen, msg->qinfo.qname, 3022 msg->qinfo.qname_len, 1, 1)) 3023 return 0; 3024 } 3025 return 1; 3026 } 3027 3028 /** Create answers when an exact match exists for the domain name */ 3029 static int 3030 az_generate_answer_with_node(struct auth_zone* z, struct query_info* qinfo, 3031 struct regional* region, struct dns_msg* msg, struct auth_data* node) 3032 { 3033 struct auth_rrset* rrset; 3034 /* positive answer, rrset we are looking for exists */ 3035 if((rrset=az_domain_rrset(node, qinfo->qtype)) != NULL) { 3036 return az_generate_positive_answer(z, region, msg, node, rrset); 3037 } 3038 /* CNAME? */ 3039 if((rrset=az_domain_rrset(node, LDNS_RR_TYPE_CNAME)) != NULL) { 3040 return az_generate_cname_answer(z, qinfo, region, msg, 3041 node, rrset); 3042 } 3043 /* type ANY ? */ 3044 if(qinfo->qtype == LDNS_RR_TYPE_ANY) { 3045 return az_generate_any_answer(z, region, msg, node); 3046 } 3047 /* NOERROR/NODATA (no such type at domain name) */ 3048 return az_generate_notype_answer(z, region, msg, node); 3049 } 3050 3051 /** Generate answer without an existing-node that we can use. 3052 * So it'll be a referral, DNAME or nxdomain */ 3053 static int 3054 az_generate_answer_nonexistnode(struct auth_zone* z, struct query_info* qinfo, 3055 struct regional* region, struct dns_msg* msg, struct auth_data* ce, 3056 struct auth_rrset* rrset, struct auth_data* node) 3057 { 3058 struct auth_data* wildcard; 3059 3060 /* we do not have an exact matching name (that exists) */ 3061 /* see if we have a NS or DNAME in the ce */ 3062 if(ce && rrset && rrset->type == LDNS_RR_TYPE_NS) { 3063 return az_generate_referral_answer(z, region, msg, ce, rrset); 3064 } 3065 if(ce && rrset && rrset->type == LDNS_RR_TYPE_DNAME) { 3066 return az_generate_dname_answer(z, qinfo, region, msg, ce, 3067 rrset); 3068 } 3069 /* if there is an empty nonterminal, wildcard and nxdomain don't 3070 * happen, it is a notype answer */ 3071 if(az_empty_nonterminal(z, qinfo, node)) { 3072 return az_generate_notype_answer(z, region, msg, node); 3073 } 3074 /* see if we have a wildcard under the ce */ 3075 if((wildcard=az_find_wildcard(z, qinfo, ce)) != NULL) { 3076 return az_generate_wildcard_answer(z, qinfo, region, msg, 3077 ce, wildcard, node); 3078 } 3079 /* generate nxdomain answer */ 3080 return az_generate_nxdomain_answer(z, region, msg, ce, node); 3081 } 3082 3083 /** Lookup answer in a zone. */ 3084 static int 3085 auth_zone_generate_answer(struct auth_zone* z, struct query_info* qinfo, 3086 struct regional* region, struct dns_msg** msg, int* fallback) 3087 { 3088 struct auth_data* node, *ce; 3089 struct auth_rrset* rrset; 3090 int node_exact, node_exists; 3091 /* does the zone want fallback in case of failure? */ 3092 *fallback = z->fallback_enabled; 3093 if(!(*msg=msg_create(region, qinfo))) return 0; 3094 3095 /* lookup if there is a matching domain name for the query */ 3096 az_find_domain(z, qinfo, &node_exact, &node); 3097 3098 /* see if node exists for generating answers from (i.e. not glue and 3099 * obscured by NS or DNAME or NSEC3-only), and also return the 3100 * closest-encloser from that, closest node that should be used 3101 * to generate answers from that is above the query */ 3102 node_exists = az_find_ce(z, qinfo, node, node_exact, &ce, &rrset); 3103 3104 if(verbosity >= VERB_ALGO) { 3105 char zname[256], qname[256], nname[256], cename[256], 3106 tpstr[32], rrstr[32]; 3107 sldns_wire2str_dname_buf(qinfo->qname, qinfo->qname_len, qname, 3108 sizeof(qname)); 3109 sldns_wire2str_type_buf(qinfo->qtype, tpstr, sizeof(tpstr)); 3110 sldns_wire2str_dname_buf(z->name, z->namelen, zname, 3111 sizeof(zname)); 3112 if(node) 3113 sldns_wire2str_dname_buf(node->name, node->namelen, 3114 nname, sizeof(nname)); 3115 else snprintf(nname, sizeof(nname), "NULL"); 3116 if(ce) 3117 sldns_wire2str_dname_buf(ce->name, ce->namelen, 3118 cename, sizeof(cename)); 3119 else snprintf(cename, sizeof(cename), "NULL"); 3120 if(rrset) sldns_wire2str_type_buf(rrset->type, rrstr, 3121 sizeof(rrstr)); 3122 else snprintf(rrstr, sizeof(rrstr), "NULL"); 3123 log_info("auth_zone %s query %s %s, domain %s %s %s, " 3124 "ce %s, rrset %s", zname, qname, tpstr, nname, 3125 (node_exact?"exact":"notexact"), 3126 (node_exists?"exist":"notexist"), cename, rrstr); 3127 } 3128 3129 if(node_exists) { 3130 /* the node is fine, generate answer from node */ 3131 return az_generate_answer_with_node(z, qinfo, region, *msg, 3132 node); 3133 } 3134 return az_generate_answer_nonexistnode(z, qinfo, region, *msg, 3135 ce, rrset, node); 3136 } 3137 3138 int auth_zones_lookup(struct auth_zones* az, struct query_info* qinfo, 3139 struct regional* region, struct dns_msg** msg, int* fallback, 3140 uint8_t* dp_nm, size_t dp_nmlen) 3141 { 3142 int r; 3143 struct auth_zone* z; 3144 /* find the zone that should contain the answer. */ 3145 lock_rw_rdlock(&az->lock); 3146 z = auth_zone_find(az, dp_nm, dp_nmlen, qinfo->qclass); 3147 if(!z) { 3148 lock_rw_unlock(&az->lock); 3149 /* no auth zone, fallback to internet */ 3150 *fallback = 1; 3151 return 0; 3152 } 3153 lock_rw_rdlock(&z->lock); 3154 lock_rw_unlock(&az->lock); 3155 3156 /* if not for upstream queries, fallback */ 3157 if(!z->for_upstream) { 3158 lock_rw_unlock(&z->lock); 3159 *fallback = 1; 3160 return 0; 3161 } 3162 /* see what answer that zone would generate */ 3163 r = auth_zone_generate_answer(z, qinfo, region, msg, fallback); 3164 lock_rw_unlock(&z->lock); 3165 return r; 3166 } 3167 3168 /** encode auth answer */ 3169 static void 3170 auth_answer_encode(struct query_info* qinfo, struct module_env* env, 3171 struct edns_data* edns, sldns_buffer* buf, struct regional* temp, 3172 struct dns_msg* msg) 3173 { 3174 uint16_t udpsize; 3175 udpsize = edns->udp_size; 3176 edns->edns_version = EDNS_ADVERTISED_VERSION; 3177 edns->udp_size = EDNS_ADVERTISED_SIZE; 3178 edns->ext_rcode = 0; 3179 edns->bits &= EDNS_DO; 3180 3181 if(!inplace_cb_reply_local_call(env, qinfo, NULL, msg->rep, 3182 (int)FLAGS_GET_RCODE(msg->rep->flags), edns, temp) 3183 || !reply_info_answer_encode(qinfo, msg->rep, 3184 *(uint16_t*)sldns_buffer_begin(buf), 3185 sldns_buffer_read_u16_at(buf, 2), 3186 buf, 0, 0, temp, udpsize, edns, 3187 (int)(edns->bits&EDNS_DO), 0)) { 3188 error_encode(buf, (LDNS_RCODE_SERVFAIL|BIT_AA), qinfo, 3189 *(uint16_t*)sldns_buffer_begin(buf), 3190 sldns_buffer_read_u16_at(buf, 2), edns); 3191 } 3192 } 3193 3194 /** encode auth error answer */ 3195 static void 3196 auth_error_encode(struct query_info* qinfo, struct module_env* env, 3197 struct edns_data* edns, sldns_buffer* buf, struct regional* temp, 3198 int rcode) 3199 { 3200 edns->edns_version = EDNS_ADVERTISED_VERSION; 3201 edns->udp_size = EDNS_ADVERTISED_SIZE; 3202 edns->ext_rcode = 0; 3203 edns->bits &= EDNS_DO; 3204 3205 if(!inplace_cb_reply_local_call(env, qinfo, NULL, NULL, 3206 rcode, edns, temp)) 3207 edns->opt_list = NULL; 3208 error_encode(buf, rcode|BIT_AA, qinfo, 3209 *(uint16_t*)sldns_buffer_begin(buf), 3210 sldns_buffer_read_u16_at(buf, 2), edns); 3211 } 3212 3213 int auth_zones_answer(struct auth_zones* az, struct module_env* env, 3214 struct query_info* qinfo, struct edns_data* edns, struct sldns_buffer* buf, 3215 struct regional* temp) 3216 { 3217 struct dns_msg* msg = NULL; 3218 struct auth_zone* z; 3219 int r; 3220 int fallback = 0; 3221 3222 lock_rw_rdlock(&az->lock); 3223 if(!az->have_downstream) { 3224 /* no downstream auth zones */ 3225 lock_rw_unlock(&az->lock); 3226 return 0; 3227 } 3228 if(qinfo->qtype == LDNS_RR_TYPE_DS) { 3229 uint8_t* delname = qinfo->qname; 3230 size_t delnamelen = qinfo->qname_len; 3231 dname_remove_label(&delname, &delnamelen); 3232 z = auth_zones_find_zone(az, delname, delnamelen, 3233 qinfo->qclass); 3234 } else { 3235 z = auth_zones_find_zone(az, qinfo->qname, qinfo->qname_len, 3236 qinfo->qclass); 3237 } 3238 if(!z) { 3239 /* no zone above it */ 3240 lock_rw_unlock(&az->lock); 3241 return 0; 3242 } 3243 lock_rw_rdlock(&z->lock); 3244 lock_rw_unlock(&az->lock); 3245 if(!z->for_downstream) { 3246 lock_rw_unlock(&z->lock); 3247 return 0; 3248 } 3249 3250 /* answer it from zone z */ 3251 r = auth_zone_generate_answer(z, qinfo, temp, &msg, &fallback); 3252 lock_rw_unlock(&z->lock); 3253 if(!r && fallback) { 3254 /* fallback to regular answering (recursive) */ 3255 return 0; 3256 } 3257 lock_rw_wrlock(&az->lock); 3258 az->num_query_down++; 3259 lock_rw_unlock(&az->lock); 3260 3261 /* encode answer */ 3262 if(!r) 3263 auth_error_encode(qinfo, env, edns, buf, temp, 3264 LDNS_RCODE_SERVFAIL); 3265 else auth_answer_encode(qinfo, env, edns, buf, temp, msg); 3266 3267 return 1; 3268 } 3269 3270 int auth_zones_can_fallback(struct auth_zones* az, uint8_t* nm, size_t nmlen, 3271 uint16_t dclass) 3272 { 3273 int r; 3274 struct auth_zone* z; 3275 lock_rw_rdlock(&az->lock); 3276 z = auth_zone_find(az, nm, nmlen, dclass); 3277 if(!z) { 3278 lock_rw_unlock(&az->lock); 3279 /* no such auth zone, fallback */ 3280 return 1; 3281 } 3282 lock_rw_rdlock(&z->lock); 3283 lock_rw_unlock(&az->lock); 3284 r = z->fallback_enabled || (!z->for_upstream); 3285 lock_rw_unlock(&z->lock); 3286 return r; 3287 } 3288 3289 int 3290 auth_zone_parse_notify_serial(sldns_buffer* pkt, uint32_t *serial) 3291 { 3292 struct query_info q; 3293 uint16_t rdlen; 3294 memset(&q, 0, sizeof(q)); 3295 sldns_buffer_set_position(pkt, 0); 3296 if(!query_info_parse(&q, pkt)) return 0; 3297 if(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) == 0) return 0; 3298 /* skip name of RR in answer section */ 3299 if(sldns_buffer_remaining(pkt) < 1) return 0; 3300 if(pkt_dname_len(pkt) == 0) return 0; 3301 /* check type */ 3302 if(sldns_buffer_remaining(pkt) < 10 /* type,class,ttl,rdatalen*/) 3303 return 0; 3304 if(sldns_buffer_read_u16(pkt) != LDNS_RR_TYPE_SOA) return 0; 3305 sldns_buffer_skip(pkt, 2); /* class */ 3306 sldns_buffer_skip(pkt, 4); /* ttl */ 3307 rdlen = sldns_buffer_read_u16(pkt); /* rdatalen */ 3308 if(sldns_buffer_remaining(pkt) < rdlen) return 0; 3309 if(rdlen < 22) return 0; /* bad soa length */ 3310 sldns_buffer_skip(pkt, (ssize_t)(rdlen-20)); 3311 *serial = sldns_buffer_read_u32(pkt); 3312 /* return true when has serial in answer section */ 3313 return 1; 3314 } 3315 3316 /** see if addr appears in the list */ 3317 static int 3318 addr_in_list(struct auth_addr* list, struct sockaddr_storage* addr, 3319 socklen_t addrlen) 3320 { 3321 struct auth_addr* p; 3322 for(p=list; p; p=p->next) { 3323 if(sockaddr_cmp_addr(addr, addrlen, &p->addr, p->addrlen)==0) 3324 return 1; 3325 } 3326 return 0; 3327 } 3328 3329 /** check if an address matches a master specification (or one of its 3330 * addresses in the addr list) */ 3331 static int 3332 addr_matches_master(struct auth_master* master, struct sockaddr_storage* addr, 3333 socklen_t addrlen, struct auth_master** fromhost) 3334 { 3335 struct sockaddr_storage a; 3336 socklen_t alen = 0; 3337 int net = 0; 3338 if(addr_in_list(master->list, addr, addrlen)) { 3339 *fromhost = master; 3340 return 1; 3341 } 3342 /* compare address (but not port number, that is the destination 3343 * port of the master, the port number of the received notify is 3344 * allowed to by any port on that master) */ 3345 if(extstrtoaddr(master->host, &a, &alen) && 3346 sockaddr_cmp_addr(addr, addrlen, &a, alen)==0) { 3347 *fromhost = master; 3348 return 1; 3349 } 3350 /* prefixes, addr/len, like 10.0.0.0/8 */ 3351 /* not http and has a / and there is one / */ 3352 if(master->allow_notify && !master->http && 3353 strchr(master->host, '/') != NULL && 3354 strchr(master->host, '/') == strrchr(master->host, '/') && 3355 netblockstrtoaddr(master->host, UNBOUND_DNS_PORT, &a, &alen, 3356 &net) && alen == addrlen) { 3357 if(addr_in_common(addr, (addr_is_ip6(addr, addrlen)?128:32), 3358 &a, net, alen) >= net) { 3359 *fromhost = NULL; /* prefix does not have destination 3360 to send the probe or transfer with */ 3361 return 1; /* matches the netblock */ 3362 } 3363 } 3364 return 0; 3365 } 3366 3367 /** check access list for notifies */ 3368 static int 3369 az_xfr_allowed_notify(struct auth_xfer* xfr, struct sockaddr_storage* addr, 3370 socklen_t addrlen, struct auth_master** fromhost) 3371 { 3372 struct auth_master* p; 3373 for(p=xfr->allow_notify_list; p; p=p->next) { 3374 if(addr_matches_master(p, addr, addrlen, fromhost)) { 3375 return 1; 3376 } 3377 } 3378 return 0; 3379 } 3380 3381 /** see if the serial means the zone has to be updated, i.e. the serial 3382 * is newer than the zone serial, or we have no zone */ 3383 static int 3384 xfr_serial_means_update(struct auth_xfer* xfr, uint32_t serial) 3385 { 3386 if(!xfr->have_zone) 3387 return 1; /* no zone, anything is better */ 3388 if(xfr->zone_expired) 3389 return 1; /* expired, the sent serial is better than expired 3390 data */ 3391 if(compare_serial(xfr->serial, serial) < 0) 3392 return 1; /* our serial is smaller than the sent serial, 3393 the data is newer, fetch it */ 3394 return 0; 3395 } 3396 3397 /** note notify serial, updates the notify information in the xfr struct */ 3398 static void 3399 xfr_note_notify_serial(struct auth_xfer* xfr, int has_serial, uint32_t serial) 3400 { 3401 if(xfr->notify_received && xfr->notify_has_serial && has_serial) { 3402 /* see if this serial is newer */ 3403 if(compare_serial(xfr->notify_serial, serial) < 0) 3404 xfr->notify_serial = serial; 3405 } else if(xfr->notify_received && xfr->notify_has_serial && 3406 !has_serial) { 3407 /* remove serial, we have notify without serial */ 3408 xfr->notify_has_serial = 0; 3409 xfr->notify_serial = 0; 3410 } else if(xfr->notify_received && !xfr->notify_has_serial) { 3411 /* we already have notify without serial, keep it 3412 * that way; no serial check when current operation 3413 * is done */ 3414 } else { 3415 xfr->notify_received = 1; 3416 xfr->notify_has_serial = has_serial; 3417 xfr->notify_serial = serial; 3418 } 3419 } 3420 3421 /** process a notify serial, start new probe or note serial. xfr is locked */ 3422 static void 3423 xfr_process_notify(struct auth_xfer* xfr, struct module_env* env, 3424 int has_serial, uint32_t serial, struct auth_master* fromhost) 3425 { 3426 /* if the serial of notify is older than we have, don't fetch 3427 * a zone, we already have it */ 3428 if(has_serial && !xfr_serial_means_update(xfr, serial)) 3429 return; 3430 /* start new probe with this addr src, or note serial */ 3431 if(!xfr_start_probe(xfr, env, fromhost)) { 3432 /* not started because already in progress, note the serial */ 3433 xfr_note_notify_serial(xfr, has_serial, serial); 3434 lock_basic_unlock(&xfr->lock); 3435 } 3436 } 3437 3438 int auth_zones_notify(struct auth_zones* az, struct module_env* env, 3439 uint8_t* nm, size_t nmlen, uint16_t dclass, 3440 struct sockaddr_storage* addr, socklen_t addrlen, int has_serial, 3441 uint32_t serial, int* refused) 3442 { 3443 struct auth_xfer* xfr; 3444 struct auth_master* fromhost = NULL; 3445 /* see which zone this is */ 3446 lock_rw_rdlock(&az->lock); 3447 xfr = auth_xfer_find(az, nm, nmlen, dclass); 3448 if(!xfr) { 3449 lock_rw_unlock(&az->lock); 3450 /* no such zone, refuse the notify */ 3451 *refused = 1; 3452 return 0; 3453 } 3454 lock_basic_lock(&xfr->lock); 3455 lock_rw_unlock(&az->lock); 3456 3457 /* check access list for notifies */ 3458 if(!az_xfr_allowed_notify(xfr, addr, addrlen, &fromhost)) { 3459 lock_basic_unlock(&xfr->lock); 3460 /* notify not allowed, refuse the notify */ 3461 *refused = 1; 3462 return 0; 3463 } 3464 3465 /* process the notify */ 3466 xfr_process_notify(xfr, env, has_serial, serial, fromhost); 3467 return 1; 3468 } 3469 3470 /** set a zone expired */ 3471 static void 3472 auth_xfer_set_expired(struct auth_xfer* xfr, struct module_env* env, 3473 int expired) 3474 { 3475 struct auth_zone* z; 3476 3477 /* expire xfr */ 3478 lock_basic_lock(&xfr->lock); 3479 xfr->zone_expired = expired; 3480 lock_basic_unlock(&xfr->lock); 3481 3482 /* find auth_zone */ 3483 lock_rw_rdlock(&env->auth_zones->lock); 3484 z = auth_zone_find(env->auth_zones, xfr->name, xfr->namelen, 3485 xfr->dclass); 3486 if(!z) { 3487 lock_rw_unlock(&env->auth_zones->lock); 3488 return; 3489 } 3490 lock_rw_wrlock(&z->lock); 3491 lock_rw_unlock(&env->auth_zones->lock); 3492 3493 /* expire auth_zone */ 3494 z->zone_expired = expired; 3495 lock_rw_unlock(&z->lock); 3496 } 3497 3498 /** find master (from notify or probe) in list of masters */ 3499 static struct auth_master* 3500 find_master_by_host(struct auth_master* list, char* host) 3501 { 3502 struct auth_master* p; 3503 for(p=list; p; p=p->next) { 3504 if(strcmp(p->host, host) == 0) 3505 return p; 3506 } 3507 return NULL; 3508 } 3509 3510 /** delete the looked up auth_addrs for all the masters in the list */ 3511 static void 3512 xfr_masterlist_free_addrs(struct auth_master* list) 3513 { 3514 struct auth_master* m; 3515 for(m=list; m; m=m->next) { 3516 if(m->list) { 3517 auth_free_master_addrs(m->list); 3518 m->list = NULL; 3519 } 3520 } 3521 } 3522 3523 /** copy a list of auth_addrs */ 3524 static struct auth_addr* 3525 auth_addr_list_copy(struct auth_addr* source) 3526 { 3527 struct auth_addr* list = NULL, *last = NULL; 3528 struct auth_addr* p; 3529 for(p=source; p; p=p->next) { 3530 struct auth_addr* a = (struct auth_addr*)memdup(p, sizeof(*p)); 3531 if(!a) { 3532 log_err("malloc failure"); 3533 auth_free_master_addrs(list); 3534 return NULL; 3535 } 3536 a->next = NULL; 3537 if(last) last->next = a; 3538 if(!list) list = a; 3539 last = a; 3540 } 3541 return list; 3542 } 3543 3544 /** copy a master to a new structure, NULL on alloc failure */ 3545 static struct auth_master* 3546 auth_master_copy(struct auth_master* o) 3547 { 3548 struct auth_master* m; 3549 if(!o) return NULL; 3550 m = (struct auth_master*)memdup(o, sizeof(*o)); 3551 if(!m) { 3552 log_err("malloc failure"); 3553 return NULL; 3554 } 3555 m->next = NULL; 3556 if(m->host) { 3557 m->host = strdup(m->host); 3558 if(!m->host) { 3559 free(m); 3560 log_err("malloc failure"); 3561 return NULL; 3562 } 3563 } 3564 if(m->file) { 3565 m->file = strdup(m->file); 3566 if(!m->file) { 3567 free(m->host); 3568 free(m); 3569 log_err("malloc failure"); 3570 return NULL; 3571 } 3572 } 3573 if(m->list) { 3574 m->list = auth_addr_list_copy(m->list); 3575 if(!m->list) { 3576 free(m->file); 3577 free(m->host); 3578 free(m); 3579 return NULL; 3580 } 3581 } 3582 return m; 3583 } 3584 3585 /** copy the master addresses from the task_probe lookups to the allow_notify 3586 * list of masters */ 3587 static void 3588 probe_copy_masters_for_allow_notify(struct auth_xfer* xfr) 3589 { 3590 struct auth_master* list = NULL, *last = NULL; 3591 struct auth_master* p; 3592 /* build up new list with copies */ 3593 for(p = xfr->task_probe->masters; p; p=p->next) { 3594 struct auth_master* m = auth_master_copy(p); 3595 if(!m) { 3596 auth_free_masters(list); 3597 /* failed because of malloc failure, use old list */ 3598 return; 3599 } 3600 m->next = NULL; 3601 if(last) last->next = m; 3602 if(!list) list = m; 3603 last = m; 3604 } 3605 /* success, replace list */ 3606 auth_free_masters(xfr->allow_notify_list); 3607 xfr->allow_notify_list = list; 3608 } 3609 3610 /** start the lookups for task_transfer */ 3611 static void 3612 xfr_transfer_start_lookups(struct auth_xfer* xfr) 3613 { 3614 /* delete all the looked up addresses in the list */ 3615 xfr_masterlist_free_addrs(xfr->task_transfer->masters); 3616 3617 /* start lookup at the first master */ 3618 xfr->task_transfer->lookup_target = xfr->task_transfer->masters; 3619 xfr->task_transfer->lookup_aaaa = 0; 3620 } 3621 3622 /** move to the next lookup of hostname for task_transfer */ 3623 static void 3624 xfr_transfer_move_to_next_lookup(struct auth_xfer* xfr, struct module_env* env) 3625 { 3626 if(!xfr->task_transfer->lookup_target) 3627 return; /* already at end of list */ 3628 if(!xfr->task_transfer->lookup_aaaa && env->cfg->do_ip6) { 3629 /* move to lookup AAAA */ 3630 xfr->task_transfer->lookup_aaaa = 1; 3631 return; 3632 } 3633 xfr->task_transfer->lookup_target = 3634 xfr->task_transfer->lookup_target->next; 3635 xfr->task_transfer->lookup_aaaa = 0; 3636 if(!env->cfg->do_ip4 && xfr->task_transfer->lookup_target!=NULL) 3637 xfr->task_transfer->lookup_aaaa = 1; 3638 } 3639 3640 /** start the lookups for task_probe */ 3641 static void 3642 xfr_probe_start_lookups(struct auth_xfer* xfr) 3643 { 3644 /* delete all the looked up addresses in the list */ 3645 xfr_masterlist_free_addrs(xfr->task_probe->masters); 3646 3647 /* start lookup at the first master */ 3648 xfr->task_probe->lookup_target = xfr->task_probe->masters; 3649 xfr->task_probe->lookup_aaaa = 0; 3650 } 3651 3652 /** move to the next lookup of hostname for task_probe */ 3653 static void 3654 xfr_probe_move_to_next_lookup(struct auth_xfer* xfr, struct module_env* env) 3655 { 3656 if(!xfr->task_probe->lookup_target) 3657 return; /* already at end of list */ 3658 if(!xfr->task_probe->lookup_aaaa && env->cfg->do_ip6) { 3659 /* move to lookup AAAA */ 3660 xfr->task_probe->lookup_aaaa = 1; 3661 return; 3662 } 3663 xfr->task_probe->lookup_target = xfr->task_probe->lookup_target->next; 3664 xfr->task_probe->lookup_aaaa = 0; 3665 if(!env->cfg->do_ip4 && xfr->task_probe->lookup_target!=NULL) 3666 xfr->task_probe->lookup_aaaa = 1; 3667 } 3668 3669 /** start the iteration of the task_transfer list of masters */ 3670 static void 3671 xfr_transfer_start_list(struct auth_xfer* xfr, struct auth_master* spec) 3672 { 3673 if(spec) { 3674 xfr->task_transfer->scan_specific = find_master_by_host( 3675 xfr->task_transfer->masters, spec->host); 3676 if(xfr->task_transfer->scan_specific) { 3677 xfr->task_transfer->scan_target = NULL; 3678 xfr->task_transfer->scan_addr = NULL; 3679 if(xfr->task_transfer->scan_specific->list) 3680 xfr->task_transfer->scan_addr = 3681 xfr->task_transfer->scan_specific->list; 3682 return; 3683 } 3684 } 3685 /* no specific (notified) host to scan */ 3686 xfr->task_transfer->scan_specific = NULL; 3687 xfr->task_transfer->scan_addr = NULL; 3688 /* pick up first scan target */ 3689 xfr->task_transfer->scan_target = xfr->task_transfer->masters; 3690 if(xfr->task_transfer->scan_target && xfr->task_transfer-> 3691 scan_target->list) 3692 xfr->task_transfer->scan_addr = 3693 xfr->task_transfer->scan_target->list; 3694 } 3695 3696 /** start the iteration of the task_probe list of masters */ 3697 static void 3698 xfr_probe_start_list(struct auth_xfer* xfr, struct auth_master* spec) 3699 { 3700 if(spec) { 3701 xfr->task_probe->scan_specific = find_master_by_host( 3702 xfr->task_probe->masters, spec->host); 3703 if(xfr->task_probe->scan_specific) { 3704 xfr->task_probe->scan_target = NULL; 3705 xfr->task_probe->scan_addr = NULL; 3706 if(xfr->task_probe->scan_specific->list) 3707 xfr->task_probe->scan_addr = 3708 xfr->task_probe->scan_specific->list; 3709 return; 3710 } 3711 } 3712 /* no specific (notified) host to scan */ 3713 xfr->task_probe->scan_specific = NULL; 3714 xfr->task_probe->scan_addr = NULL; 3715 /* pick up first scan target */ 3716 xfr->task_probe->scan_target = xfr->task_probe->masters; 3717 if(xfr->task_probe->scan_target && xfr->task_probe->scan_target->list) 3718 xfr->task_probe->scan_addr = 3719 xfr->task_probe->scan_target->list; 3720 } 3721 3722 /** pick up the master that is being scanned right now, task_transfer */ 3723 static struct auth_master* 3724 xfr_transfer_current_master(struct auth_xfer* xfr) 3725 { 3726 if(xfr->task_transfer->scan_specific) 3727 return xfr->task_transfer->scan_specific; 3728 return xfr->task_transfer->scan_target; 3729 } 3730 3731 /** pick up the master that is being scanned right now, task_probe */ 3732 static struct auth_master* 3733 xfr_probe_current_master(struct auth_xfer* xfr) 3734 { 3735 if(xfr->task_probe->scan_specific) 3736 return xfr->task_probe->scan_specific; 3737 return xfr->task_probe->scan_target; 3738 } 3739 3740 /** true if at end of list, task_transfer */ 3741 static int 3742 xfr_transfer_end_of_list(struct auth_xfer* xfr) 3743 { 3744 return !xfr->task_transfer->scan_specific && 3745 !xfr->task_transfer->scan_target; 3746 } 3747 3748 /** true if at end of list, task_probe */ 3749 static int 3750 xfr_probe_end_of_list(struct auth_xfer* xfr) 3751 { 3752 return !xfr->task_probe->scan_specific && !xfr->task_probe->scan_target; 3753 } 3754 3755 /** move to next master in list, task_transfer */ 3756 static void 3757 xfr_transfer_nextmaster(struct auth_xfer* xfr) 3758 { 3759 if(!xfr->task_transfer->scan_specific && 3760 !xfr->task_transfer->scan_target) 3761 return; 3762 if(xfr->task_transfer->scan_addr) { 3763 xfr->task_transfer->scan_addr = 3764 xfr->task_transfer->scan_addr->next; 3765 if(xfr->task_transfer->scan_addr) 3766 return; 3767 } 3768 if(xfr->task_transfer->scan_specific) { 3769 xfr->task_transfer->scan_specific = NULL; 3770 xfr->task_transfer->scan_target = xfr->task_transfer->masters; 3771 if(xfr->task_transfer->scan_target && xfr->task_transfer-> 3772 scan_target->list) 3773 xfr->task_transfer->scan_addr = 3774 xfr->task_transfer->scan_target->list; 3775 return; 3776 } 3777 if(!xfr->task_transfer->scan_target) 3778 return; 3779 xfr->task_transfer->scan_target = xfr->task_transfer->scan_target->next; 3780 if(xfr->task_transfer->scan_target && xfr->task_transfer-> 3781 scan_target->list) 3782 xfr->task_transfer->scan_addr = 3783 xfr->task_transfer->scan_target->list; 3784 return; 3785 } 3786 3787 /** move to next master in list, task_probe */ 3788 static void 3789 xfr_probe_nextmaster(struct auth_xfer* xfr) 3790 { 3791 if(!xfr->task_probe->scan_specific && !xfr->task_probe->scan_target) 3792 return; 3793 if(xfr->task_probe->scan_addr) { 3794 xfr->task_probe->scan_addr = xfr->task_probe->scan_addr->next; 3795 if(xfr->task_probe->scan_addr) 3796 return; 3797 } 3798 if(xfr->task_probe->scan_specific) { 3799 xfr->task_probe->scan_specific = NULL; 3800 xfr->task_probe->scan_target = xfr->task_probe->masters; 3801 if(xfr->task_probe->scan_target && xfr->task_probe-> 3802 scan_target->list) 3803 xfr->task_probe->scan_addr = 3804 xfr->task_probe->scan_target->list; 3805 return; 3806 } 3807 if(!xfr->task_probe->scan_target) 3808 return; 3809 xfr->task_probe->scan_target = xfr->task_probe->scan_target->next; 3810 if(xfr->task_probe->scan_target && xfr->task_probe-> 3811 scan_target->list) 3812 xfr->task_probe->scan_addr = 3813 xfr->task_probe->scan_target->list; 3814 return; 3815 } 3816 3817 /** create SOA probe packet for xfr */ 3818 static void 3819 xfr_create_soa_probe_packet(struct auth_xfer* xfr, sldns_buffer* buf, 3820 uint16_t id) 3821 { 3822 struct query_info qinfo; 3823 3824 memset(&qinfo, 0, sizeof(qinfo)); 3825 qinfo.qname = xfr->name; 3826 qinfo.qname_len = xfr->namelen; 3827 qinfo.qtype = LDNS_RR_TYPE_SOA; 3828 qinfo.qclass = xfr->dclass; 3829 qinfo_query_encode(buf, &qinfo); 3830 sldns_buffer_write_u16_at(buf, 0, id); 3831 } 3832 3833 /** create IXFR/AXFR packet for xfr */ 3834 static void 3835 xfr_create_ixfr_packet(struct auth_xfer* xfr, sldns_buffer* buf, uint16_t id, 3836 struct auth_master* master) 3837 { 3838 struct query_info qinfo; 3839 uint32_t serial; 3840 int have_zone; 3841 have_zone = xfr->have_zone; 3842 serial = xfr->serial; 3843 3844 memset(&qinfo, 0, sizeof(qinfo)); 3845 qinfo.qname = xfr->name; 3846 qinfo.qname_len = xfr->namelen; 3847 xfr->task_transfer->got_xfr_serial = 0; 3848 xfr->task_transfer->rr_scan_num = 0; 3849 xfr->task_transfer->incoming_xfr_serial = 0; 3850 xfr->task_transfer->on_ixfr_is_axfr = 0; 3851 xfr->task_transfer->on_ixfr = 1; 3852 qinfo.qtype = LDNS_RR_TYPE_IXFR; 3853 if(!have_zone || xfr->task_transfer->ixfr_fail || !master->ixfr) { 3854 qinfo.qtype = LDNS_RR_TYPE_AXFR; 3855 xfr->task_transfer->ixfr_fail = 0; 3856 xfr->task_transfer->on_ixfr = 0; 3857 } 3858 3859 qinfo.qclass = xfr->dclass; 3860 qinfo_query_encode(buf, &qinfo); 3861 sldns_buffer_write_u16_at(buf, 0, id); 3862 3863 /* append serial for IXFR */ 3864 if(qinfo.qtype == LDNS_RR_TYPE_IXFR) { 3865 size_t end = sldns_buffer_limit(buf); 3866 sldns_buffer_clear(buf); 3867 sldns_buffer_set_position(buf, end); 3868 /* auth section count 1 */ 3869 sldns_buffer_write_u16_at(buf, LDNS_NSCOUNT_OFF, 1); 3870 /* write SOA */ 3871 sldns_buffer_write_u8(buf, 0xC0); /* compressed ptr to qname */ 3872 sldns_buffer_write_u8(buf, 0x0C); 3873 sldns_buffer_write_u16(buf, LDNS_RR_TYPE_SOA); 3874 sldns_buffer_write_u16(buf, qinfo.qclass); 3875 sldns_buffer_write_u32(buf, 0); /* ttl */ 3876 sldns_buffer_write_u16(buf, 22); /* rdata length */ 3877 sldns_buffer_write_u8(buf, 0); /* . */ 3878 sldns_buffer_write_u8(buf, 0); /* . */ 3879 sldns_buffer_write_u32(buf, serial); /* serial */ 3880 sldns_buffer_write_u32(buf, 0); /* refresh */ 3881 sldns_buffer_write_u32(buf, 0); /* retry */ 3882 sldns_buffer_write_u32(buf, 0); /* expire */ 3883 sldns_buffer_write_u32(buf, 0); /* minimum */ 3884 sldns_buffer_flip(buf); 3885 } 3886 } 3887 3888 /** check if returned packet is OK */ 3889 static int 3890 check_packet_ok(sldns_buffer* pkt, uint16_t qtype, struct auth_xfer* xfr, 3891 uint32_t* serial) 3892 { 3893 /* parse to see if packet worked, valid reply */ 3894 3895 /* check serial number of SOA */ 3896 if(sldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) 3897 return 0; 3898 3899 /* check ID */ 3900 if(LDNS_ID_WIRE(sldns_buffer_begin(pkt)) != xfr->task_probe->id) 3901 return 0; 3902 3903 /* check flag bits and rcode */ 3904 if(!LDNS_QR_WIRE(sldns_buffer_begin(pkt))) 3905 return 0; 3906 if(LDNS_OPCODE_WIRE(sldns_buffer_begin(pkt)) != LDNS_PACKET_QUERY) 3907 return 0; 3908 if(LDNS_RCODE_WIRE(sldns_buffer_begin(pkt)) != LDNS_RCODE_NOERROR) 3909 return 0; 3910 3911 /* check qname */ 3912 if(LDNS_QDCOUNT(sldns_buffer_begin(pkt)) != 1) 3913 return 0; 3914 sldns_buffer_skip(pkt, LDNS_HEADER_SIZE); 3915 if(sldns_buffer_remaining(pkt) < xfr->namelen) 3916 return 0; 3917 if(query_dname_compare(sldns_buffer_current(pkt), xfr->name) != 0) 3918 return 0; 3919 sldns_buffer_skip(pkt, (ssize_t)xfr->namelen); 3920 3921 /* check qtype, qclass */ 3922 if(sldns_buffer_remaining(pkt) < 4) 3923 return 0; 3924 if(sldns_buffer_read_u16(pkt) != qtype) 3925 return 0; 3926 if(sldns_buffer_read_u16(pkt) != xfr->dclass) 3927 return 0; 3928 3929 if(serial) { 3930 uint16_t rdlen; 3931 /* read serial number, from answer section SOA */ 3932 if(LDNS_ANCOUNT(sldns_buffer_begin(pkt)) == 0) 3933 return 0; 3934 /* read from first record SOA record */ 3935 if(sldns_buffer_remaining(pkt) < 1) 3936 return 0; 3937 if(dname_pkt_compare(pkt, sldns_buffer_current(pkt), 3938 xfr->name) != 0) 3939 return 0; 3940 if(!pkt_dname_len(pkt)) 3941 return 0; 3942 /* type, class, ttl, rdatalen */ 3943 if(sldns_buffer_remaining(pkt) < 4+4+2) 3944 return 0; 3945 if(sldns_buffer_read_u16(pkt) != qtype) 3946 return 0; 3947 if(sldns_buffer_read_u16(pkt) != xfr->dclass) 3948 return 0; 3949 sldns_buffer_skip(pkt, 4); /* ttl */ 3950 rdlen = sldns_buffer_read_u16(pkt); 3951 if(sldns_buffer_remaining(pkt) < rdlen) 3952 return 0; 3953 if(sldns_buffer_remaining(pkt) < 1) 3954 return 0; 3955 if(!pkt_dname_len(pkt)) /* soa name */ 3956 return 0; 3957 if(sldns_buffer_remaining(pkt) < 1) 3958 return 0; 3959 if(!pkt_dname_len(pkt)) /* soa name */ 3960 return 0; 3961 if(sldns_buffer_remaining(pkt) < 20) 3962 return 0; 3963 *serial = sldns_buffer_read_u32(pkt); 3964 } 3965 return 1; 3966 } 3967 3968 /** read one line from chunks into buffer at current position */ 3969 static int 3970 chunkline_get_line(struct auth_chunk** chunk, size_t* chunk_pos, 3971 sldns_buffer* buf) 3972 { 3973 int readsome = 0; 3974 while(*chunk) { 3975 /* more text in this chunk? */ 3976 if(*chunk_pos < (*chunk)->len) { 3977 readsome = 1; 3978 while(*chunk_pos < (*chunk)->len) { 3979 char c = (char)((*chunk)->data[*chunk_pos]); 3980 (*chunk_pos)++; 3981 if(sldns_buffer_remaining(buf) < 2) { 3982 /* buffer too short */ 3983 verbose(VERB_ALGO, "http chunkline, " 3984 "line too long"); 3985 return 0; 3986 } 3987 sldns_buffer_write_u8(buf, (uint8_t)c); 3988 if(c == '\n') { 3989 /* we are done */ 3990 return 1; 3991 } 3992 } 3993 } 3994 /* move to next chunk */ 3995 *chunk = (*chunk)->next; 3996 *chunk_pos = 0; 3997 } 3998 /* no more text */ 3999 if(readsome) return 1; 4000 return 0; 4001 } 4002 4003 /** count number of open and closed parenthesis in a chunkline */ 4004 static int 4005 chunkline_count_parens(sldns_buffer* buf, size_t start) 4006 { 4007 size_t end = sldns_buffer_position(buf); 4008 size_t i; 4009 int count = 0; 4010 int squote = 0, dquote = 0; 4011 for(i=start; i<end; i++) { 4012 char c = (char)sldns_buffer_read_u8_at(buf, i); 4013 if(squote && c != '\'') continue; 4014 if(dquote && c != '"') continue; 4015 if(c == '"') 4016 dquote = !dquote; /* skip quoted part */ 4017 else if(c == '\'') 4018 squote = !squote; /* skip quoted part */ 4019 else if(c == '(') 4020 count ++; 4021 else if(c == ')') 4022 count --; 4023 else if(c == ';') { 4024 /* rest is a comment */ 4025 return count; 4026 } 4027 } 4028 return count; 4029 } 4030 4031 /** remove trailing ;... comment from a line in the chunkline buffer */ 4032 static void 4033 chunkline_remove_trailcomment(sldns_buffer* buf, size_t start) 4034 { 4035 size_t end = sldns_buffer_position(buf); 4036 size_t i; 4037 int squote = 0, dquote = 0; 4038 for(i=start; i<end; i++) { 4039 char c = (char)sldns_buffer_read_u8_at(buf, i); 4040 if(squote && c != '\'') continue; 4041 if(dquote && c != '"') continue; 4042 if(c == '"') 4043 dquote = !dquote; /* skip quoted part */ 4044 else if(c == '\'') 4045 squote = !squote; /* skip quoted part */ 4046 else if(c == ';') { 4047 /* rest is a comment */ 4048 sldns_buffer_set_position(buf, i); 4049 return; 4050 } 4051 } 4052 /* nothing to remove */ 4053 } 4054 4055 /** see if a chunkline is a comment line (or empty line) */ 4056 static int 4057 chunkline_is_comment_line_or_empty(sldns_buffer* buf) 4058 { 4059 size_t i, end = sldns_buffer_limit(buf); 4060 for(i=0; i<end; i++) { 4061 char c = (char)sldns_buffer_read_u8_at(buf, i); 4062 if(c == ';') 4063 return 1; /* comment */ 4064 else if(c != ' ' && c != '\t' && c != '\r' && c != '\n') 4065 return 0; /* not a comment */ 4066 } 4067 return 1; /* empty */ 4068 } 4069 4070 /** find a line with ( ) collated */ 4071 static int 4072 chunkline_get_line_collated(struct auth_chunk** chunk, size_t* chunk_pos, 4073 sldns_buffer* buf) 4074 { 4075 size_t pos; 4076 int parens = 0; 4077 sldns_buffer_clear(buf); 4078 pos = sldns_buffer_position(buf); 4079 if(!chunkline_get_line(chunk, chunk_pos, buf)) { 4080 if(sldns_buffer_position(buf) < sldns_buffer_limit(buf)) 4081 sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf), 0); 4082 else sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf)-1, 0); 4083 sldns_buffer_flip(buf); 4084 return 0; 4085 } 4086 parens += chunkline_count_parens(buf, pos); 4087 while(parens > 0) { 4088 chunkline_remove_trailcomment(buf, pos); 4089 pos = sldns_buffer_position(buf); 4090 if(!chunkline_get_line(chunk, chunk_pos, buf)) { 4091 if(sldns_buffer_position(buf) < sldns_buffer_limit(buf)) 4092 sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf), 0); 4093 else sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf)-1, 0); 4094 sldns_buffer_flip(buf); 4095 return 0; 4096 } 4097 parens += chunkline_count_parens(buf, pos); 4098 } 4099 4100 if(sldns_buffer_remaining(buf) < 1) { 4101 verbose(VERB_ALGO, "http chunkline: " 4102 "line too long"); 4103 return 0; 4104 } 4105 sldns_buffer_write_u8_at(buf, sldns_buffer_position(buf), 0); 4106 sldns_buffer_flip(buf); 4107 return 1; 4108 } 4109 4110 /** process $ORIGIN for http */ 4111 static int 4112 http_parse_origin(sldns_buffer* buf, struct sldns_file_parse_state* pstate) 4113 { 4114 char* line = (char*)sldns_buffer_begin(buf); 4115 if(strncmp(line, "$ORIGIN", 7) == 0 && 4116 isspace((unsigned char)line[7])) { 4117 int s; 4118 pstate->origin_len = sizeof(pstate->origin); 4119 s = sldns_str2wire_dname_buf(sldns_strip_ws(line+8), 4120 pstate->origin, &pstate->origin_len); 4121 if(s) pstate->origin_len = 0; 4122 return 1; 4123 } 4124 return 0; 4125 } 4126 4127 /** process $TTL for http */ 4128 static int 4129 http_parse_ttl(sldns_buffer* buf, struct sldns_file_parse_state* pstate) 4130 { 4131 char* line = (char*)sldns_buffer_begin(buf); 4132 if(strncmp(line, "$TTL", 4) == 0 && 4133 isspace((unsigned char)line[4])) { 4134 const char* end = NULL; 4135 pstate->default_ttl = sldns_str2period( 4136 sldns_strip_ws(line+5), &end); 4137 return 1; 4138 } 4139 return 0; 4140 } 4141 4142 /** find noncomment RR line in chunks, collates lines if ( ) format */ 4143 static int 4144 chunkline_non_comment_RR(struct auth_chunk** chunk, size_t* chunk_pos, 4145 sldns_buffer* buf, struct sldns_file_parse_state* pstate) 4146 { 4147 while(chunkline_get_line_collated(chunk, chunk_pos, buf)) { 4148 if(chunkline_is_comment_line_or_empty(buf)) { 4149 /* a comment, go to next line */ 4150 continue; 4151 } 4152 if(http_parse_origin(buf, pstate)) { 4153 continue; /* $ORIGIN has been handled */ 4154 } 4155 if(http_parse_ttl(buf, pstate)) { 4156 continue; /* $TTL has been handled */ 4157 } 4158 return 1; 4159 } 4160 /* no noncomments, fail */ 4161 return 0; 4162 } 4163 4164 /** check syntax of chunklist zonefile, parse SOA RR, return false on 4165 * failure and return a string in the scratch buffer (SOA RR string) 4166 * on failure. */ 4167 static int 4168 http_zonefile_syntax_check(struct auth_xfer* xfr, sldns_buffer* buf) 4169 { 4170 uint8_t rr[LDNS_RR_BUF_SIZE]; 4171 size_t rr_len, dname_len = 0; 4172 struct sldns_file_parse_state pstate; 4173 struct auth_chunk* chunk; 4174 size_t chunk_pos; 4175 int e; 4176 memset(&pstate, 0, sizeof(pstate)); 4177 pstate.default_ttl = 3600; 4178 if(xfr->namelen < sizeof(pstate.origin)) { 4179 pstate.origin_len = xfr->namelen; 4180 memmove(pstate.origin, xfr->name, xfr->namelen); 4181 } 4182 chunk = xfr->task_transfer->chunks_first; 4183 chunk_pos = 0; 4184 if(!chunkline_non_comment_RR(&chunk, &chunk_pos, buf, &pstate)) { 4185 return 0; 4186 } 4187 rr_len = sizeof(rr); 4188 e=sldns_str2wire_rr_buf((char*)sldns_buffer_begin(buf), rr, &rr_len, 4189 &dname_len, pstate.default_ttl, 4190 pstate.origin_len?pstate.origin:NULL, pstate.origin_len, 4191 pstate.prev_rr_len?pstate.prev_rr:NULL, pstate.prev_rr_len); 4192 if(e != 0) { 4193 log_err("parse failure on SOA RR[%d]: %s", 4194 LDNS_WIREPARSE_OFFSET(e), 4195 sldns_get_errorstr_parse(LDNS_WIREPARSE_ERROR(e))); 4196 return 0; 4197 } 4198 /* check that name is correct */ 4199 if(query_dname_compare(rr, xfr->name) != 0) { 4200 char nm[255+1], zname[255+1]; 4201 dname_str(rr, nm); 4202 dname_str(xfr->name, zname); 4203 log_err("parse failure for %s, SOA RR for %s found instead", 4204 zname, nm); 4205 return 0; 4206 } 4207 /* check that type is SOA */ 4208 if(sldns_wirerr_get_type(rr, rr_len, dname_len) != LDNS_RR_TYPE_SOA) { 4209 log_err("parse failure: first record in downloaded zonefile " 4210 "not of type SOA"); 4211 return 0; 4212 } 4213 /* check that class is correct */ 4214 if(sldns_wirerr_get_class(rr, rr_len, dname_len) != xfr->dclass) { 4215 log_err("parse failure: first record in downloaded zonefile " 4216 "from wrong RR class"); 4217 return 0; 4218 } 4219 return 1; 4220 } 4221 4222 /** sum sizes of chunklist */ 4223 static size_t 4224 chunklist_sum(struct auth_chunk* list) 4225 { 4226 struct auth_chunk* p; 4227 size_t s = 0; 4228 for(p=list; p; p=p->next) { 4229 s += p->len; 4230 } 4231 return s; 4232 } 4233 4234 /** remove newlines from collated line */ 4235 static void 4236 chunkline_newline_removal(sldns_buffer* buf) 4237 { 4238 size_t i, end=sldns_buffer_limit(buf); 4239 for(i=0; i<end; i++) { 4240 char c = (char)sldns_buffer_read_u8_at(buf, i); 4241 if(c == '\n' && i==end-1) { 4242 sldns_buffer_write_u8_at(buf, i, 0); 4243 sldns_buffer_set_limit(buf, end-1); 4244 return; 4245 } 4246 if(c == '\n') 4247 sldns_buffer_write_u8_at(buf, i, (uint8_t)' '); 4248 } 4249 } 4250 4251 /** for http download, parse and add RR to zone */ 4252 static int 4253 http_parse_add_rr(struct auth_xfer* xfr, struct auth_zone* z, 4254 sldns_buffer* buf, struct sldns_file_parse_state* pstate) 4255 { 4256 uint8_t rr[LDNS_RR_BUF_SIZE]; 4257 size_t rr_len, dname_len = 0; 4258 int e; 4259 char* line = (char*)sldns_buffer_begin(buf); 4260 rr_len = sizeof(rr); 4261 e = sldns_str2wire_rr_buf(line, rr, &rr_len, &dname_len, 4262 pstate->default_ttl, 4263 pstate->origin_len?pstate->origin:NULL, pstate->origin_len, 4264 pstate->prev_rr_len?pstate->prev_rr:NULL, pstate->prev_rr_len); 4265 if(e != 0) { 4266 log_err("%s/%s parse failure RR[%d]: %s in '%s'", 4267 xfr->task_transfer->master->host, 4268 xfr->task_transfer->master->file, 4269 LDNS_WIREPARSE_OFFSET(e), 4270 sldns_get_errorstr_parse(LDNS_WIREPARSE_ERROR(e)), 4271 line); 4272 return 0; 4273 } 4274 if(rr_len == 0) 4275 return 1; /* empty line or so */ 4276 4277 /* set prev */ 4278 if(dname_len < sizeof(pstate->prev_rr)) { 4279 memmove(pstate->prev_rr, rr, dname_len); 4280 pstate->prev_rr_len = dname_len; 4281 } 4282 4283 return az_insert_rr(z, rr, rr_len, dname_len, NULL); 4284 } 4285 4286 /** RR list iterator, returns RRs from answer section one by one from the 4287 * dns packets in the chunklist */ 4288 static void 4289 chunk_rrlist_start(struct auth_xfer* xfr, struct auth_chunk** rr_chunk, 4290 int* rr_num, size_t* rr_pos) 4291 { 4292 *rr_chunk = xfr->task_transfer->chunks_first; 4293 *rr_num = 0; 4294 *rr_pos = 0; 4295 } 4296 4297 /** RR list iterator, see if we are at the end of the list */ 4298 static int 4299 chunk_rrlist_end(struct auth_chunk* rr_chunk, int rr_num) 4300 { 4301 while(rr_chunk) { 4302 if(rr_chunk->len < LDNS_HEADER_SIZE) 4303 return 1; 4304 if(rr_num < (int)LDNS_ANCOUNT(rr_chunk->data)) 4305 return 0; 4306 /* no more RRs in this chunk */ 4307 /* continue with next chunk, see if it has RRs */ 4308 rr_chunk = rr_chunk->next; 4309 rr_num = 0; 4310 } 4311 return 1; 4312 } 4313 4314 /** RR list iterator, move to next RR */ 4315 static void 4316 chunk_rrlist_gonext(struct auth_chunk** rr_chunk, int* rr_num, 4317 size_t* rr_pos, size_t rr_nextpos) 4318 { 4319 /* already at end of chunks? */ 4320 if(!*rr_chunk) 4321 return; 4322 /* move within this chunk */ 4323 if((*rr_chunk)->len >= LDNS_HEADER_SIZE && 4324 (*rr_num)+1 < (int)LDNS_ANCOUNT((*rr_chunk)->data)) { 4325 (*rr_num) += 1; 4326 *rr_pos = rr_nextpos; 4327 return; 4328 } 4329 /* no more RRs in this chunk */ 4330 /* continue with next chunk, see if it has RRs */ 4331 if(*rr_chunk) 4332 *rr_chunk = (*rr_chunk)->next; 4333 while(*rr_chunk) { 4334 *rr_num = 0; 4335 *rr_pos = 0; 4336 if((*rr_chunk)->len >= LDNS_HEADER_SIZE && 4337 LDNS_ANCOUNT((*rr_chunk)->data) > 0) { 4338 return; 4339 } 4340 *rr_chunk = (*rr_chunk)->next; 4341 } 4342 } 4343 4344 /** RR iterator, get current RR information, false on parse error */ 4345 static int 4346 chunk_rrlist_get_current(struct auth_chunk* rr_chunk, int rr_num, 4347 size_t rr_pos, uint8_t** rr_dname, uint16_t* rr_type, 4348 uint16_t* rr_class, uint32_t* rr_ttl, uint16_t* rr_rdlen, 4349 uint8_t** rr_rdata, size_t* rr_nextpos) 4350 { 4351 sldns_buffer pkt; 4352 /* integrity checks on position */ 4353 if(!rr_chunk) return 0; 4354 if(rr_chunk->len < LDNS_HEADER_SIZE) return 0; 4355 if(rr_num >= (int)LDNS_ANCOUNT(rr_chunk->data)) return 0; 4356 if(rr_pos >= rr_chunk->len) return 0; 4357 4358 /* fetch rr information */ 4359 sldns_buffer_init_frm_data(&pkt, rr_chunk->data, rr_chunk->len); 4360 if(rr_pos == 0) { 4361 size_t i; 4362 /* skip question section */ 4363 sldns_buffer_set_position(&pkt, LDNS_HEADER_SIZE); 4364 for(i=0; i<LDNS_QDCOUNT(rr_chunk->data); i++) { 4365 if(pkt_dname_len(&pkt) == 0) return 0; 4366 if(sldns_buffer_remaining(&pkt) < 4) return 0; 4367 sldns_buffer_skip(&pkt, 4); /* type and class */ 4368 } 4369 } else { 4370 sldns_buffer_set_position(&pkt, rr_pos); 4371 } 4372 *rr_dname = sldns_buffer_current(&pkt); 4373 if(pkt_dname_len(&pkt) == 0) return 0; 4374 if(sldns_buffer_remaining(&pkt) < 10) return 0; 4375 *rr_type = sldns_buffer_read_u16(&pkt); 4376 *rr_class = sldns_buffer_read_u16(&pkt); 4377 *rr_ttl = sldns_buffer_read_u32(&pkt); 4378 *rr_rdlen = sldns_buffer_read_u16(&pkt); 4379 if(sldns_buffer_remaining(&pkt) < (*rr_rdlen)) return 0; 4380 *rr_rdata = sldns_buffer_current(&pkt); 4381 sldns_buffer_skip(&pkt, (ssize_t)(*rr_rdlen)); 4382 *rr_nextpos = sldns_buffer_position(&pkt); 4383 return 1; 4384 } 4385 4386 /** print log message where we are in parsing the zone transfer */ 4387 static void 4388 log_rrlist_position(const char* label, struct auth_chunk* rr_chunk, 4389 uint8_t* rr_dname, uint16_t rr_type, size_t rr_counter) 4390 { 4391 sldns_buffer pkt; 4392 size_t dlen; 4393 uint8_t buf[256]; 4394 char str[256]; 4395 char typestr[32]; 4396 sldns_buffer_init_frm_data(&pkt, rr_chunk->data, rr_chunk->len); 4397 sldns_buffer_set_position(&pkt, (size_t)(rr_dname - 4398 sldns_buffer_begin(&pkt))); 4399 if((dlen=pkt_dname_len(&pkt)) == 0) return; 4400 if(dlen >= sizeof(buf)) return; 4401 dname_pkt_copy(&pkt, buf, rr_dname); 4402 dname_str(buf, str); 4403 (void)sldns_wire2str_type_buf(rr_type, typestr, sizeof(typestr)); 4404 verbose(VERB_ALGO, "%s at[%d] %s %s", label, (int)rr_counter, 4405 str, typestr); 4406 } 4407 4408 /** check that start serial is OK for ixfr. we are at rr_counter == 0, 4409 * and we are going to check rr_counter == 1 (has to be type SOA) serial */ 4410 static int 4411 ixfr_start_serial(struct auth_chunk* rr_chunk, int rr_num, size_t rr_pos, 4412 uint8_t* rr_dname, uint16_t rr_type, uint16_t rr_class, 4413 uint32_t rr_ttl, uint16_t rr_rdlen, uint8_t* rr_rdata, 4414 size_t rr_nextpos, uint32_t transfer_serial, uint32_t xfr_serial) 4415 { 4416 uint32_t startserial; 4417 /* move forward on RR */ 4418 chunk_rrlist_gonext(&rr_chunk, &rr_num, &rr_pos, rr_nextpos); 4419 if(chunk_rrlist_end(rr_chunk, rr_num)) { 4420 /* no second SOA */ 4421 verbose(VERB_OPS, "IXFR has no second SOA record"); 4422 return 0; 4423 } 4424 if(!chunk_rrlist_get_current(rr_chunk, rr_num, rr_pos, 4425 &rr_dname, &rr_type, &rr_class, &rr_ttl, &rr_rdlen, 4426 &rr_rdata, &rr_nextpos)) { 4427 verbose(VERB_OPS, "IXFR cannot parse second SOA record"); 4428 /* failed to parse RR */ 4429 return 0; 4430 } 4431 if(rr_type != LDNS_RR_TYPE_SOA) { 4432 verbose(VERB_OPS, "IXFR second record is not type SOA"); 4433 return 0; 4434 } 4435 if(rr_rdlen < 22) { 4436 verbose(VERB_OPS, "IXFR, second SOA has short rdlength"); 4437 return 0; /* bad SOA rdlen */ 4438 } 4439 startserial = sldns_read_uint32(rr_rdata+rr_rdlen-20); 4440 if(startserial == transfer_serial) { 4441 /* empty AXFR, not an IXFR */ 4442 verbose(VERB_OPS, "IXFR second serial same as first"); 4443 return 0; 4444 } 4445 if(startserial != xfr_serial) { 4446 /* wrong start serial, it does not match the serial in 4447 * memory */ 4448 verbose(VERB_OPS, "IXFR is from serial %u to %u but %u " 4449 "in memory, rejecting the zone transfer", 4450 (unsigned)startserial, (unsigned)transfer_serial, 4451 (unsigned)xfr_serial); 4452 return 0; 4453 } 4454 /* everything OK in second SOA serial */ 4455 return 1; 4456 } 4457 4458 /** apply IXFR to zone in memory. z is locked. false on failure(mallocfail) */ 4459 static int 4460 apply_ixfr(struct auth_xfer* xfr, struct auth_zone* z, 4461 struct sldns_buffer* scratch_buffer) 4462 { 4463 struct auth_chunk* rr_chunk; 4464 int rr_num; 4465 size_t rr_pos; 4466 uint8_t* rr_dname, *rr_rdata; 4467 uint16_t rr_type, rr_class, rr_rdlen; 4468 uint32_t rr_ttl; 4469 size_t rr_nextpos; 4470 int have_transfer_serial = 0; 4471 uint32_t transfer_serial = 0; 4472 size_t rr_counter = 0; 4473 int delmode = 0; 4474 int softfail = 0; 4475 4476 /* start RR iterator over chunklist of packets */ 4477 chunk_rrlist_start(xfr, &rr_chunk, &rr_num, &rr_pos); 4478 while(!chunk_rrlist_end(rr_chunk, rr_num)) { 4479 if(!chunk_rrlist_get_current(rr_chunk, rr_num, rr_pos, 4480 &rr_dname, &rr_type, &rr_class, &rr_ttl, &rr_rdlen, 4481 &rr_rdata, &rr_nextpos)) { 4482 /* failed to parse RR */ 4483 return 0; 4484 } 4485 if(verbosity>=7) log_rrlist_position("apply ixfr", 4486 rr_chunk, rr_dname, rr_type, rr_counter); 4487 /* twiddle add/del mode and check for start and end */ 4488 if(rr_counter == 0 && rr_type != LDNS_RR_TYPE_SOA) 4489 return 0; 4490 if(rr_counter == 1 && rr_type != LDNS_RR_TYPE_SOA) { 4491 /* this is an AXFR returned from the IXFR master */ 4492 /* but that should already have been detected, by 4493 * on_ixfr_is_axfr */ 4494 return 0; 4495 } 4496 if(rr_type == LDNS_RR_TYPE_SOA) { 4497 uint32_t serial; 4498 if(rr_rdlen < 22) return 0; /* bad SOA rdlen */ 4499 serial = sldns_read_uint32(rr_rdata+rr_rdlen-20); 4500 if(have_transfer_serial == 0) { 4501 have_transfer_serial = 1; 4502 transfer_serial = serial; 4503 delmode = 1; /* gets negated below */ 4504 /* check second RR before going any further */ 4505 if(!ixfr_start_serial(rr_chunk, rr_num, rr_pos, 4506 rr_dname, rr_type, rr_class, rr_ttl, 4507 rr_rdlen, rr_rdata, rr_nextpos, 4508 transfer_serial, xfr->serial)) { 4509 return 0; 4510 } 4511 } else if(transfer_serial == serial) { 4512 have_transfer_serial++; 4513 if(rr_counter == 1) { 4514 /* empty AXFR, with SOA; SOA; */ 4515 /* should have been detected by 4516 * on_ixfr_is_axfr */ 4517 return 0; 4518 } 4519 if(have_transfer_serial == 3) { 4520 /* see serial three times for end */ 4521 /* eg. IXFR: 4522 * SOA 3 start 4523 * SOA 1 second RR, followed by del 4524 * SOA 2 followed by add 4525 * SOA 2 followed by del 4526 * SOA 3 followed by add 4527 * SOA 3 end */ 4528 /* ended by SOA record */ 4529 xfr->serial = transfer_serial; 4530 break; 4531 } 4532 } 4533 /* twiddle add/del mode */ 4534 /* switch from delete part to add part and back again 4535 * just before the soa, it gets deleted and added too 4536 * this means we switch to delete mode for the final 4537 * SOA(so skip that one) */ 4538 delmode = !delmode; 4539 } 4540 /* process this RR */ 4541 /* if the RR is deleted twice or added twice, then we 4542 * softfail, and continue with the rest of the IXFR, so 4543 * that we serve something fairly nice during the refetch */ 4544 if(verbosity>=7) log_rrlist_position((delmode?"del":"add"), 4545 rr_chunk, rr_dname, rr_type, rr_counter); 4546 if(delmode) { 4547 /* delete this RR */ 4548 int nonexist = 0; 4549 if(!az_remove_rr_decompress(z, rr_chunk->data, 4550 rr_chunk->len, scratch_buffer, rr_dname, 4551 rr_type, rr_class, rr_ttl, rr_rdata, rr_rdlen, 4552 &nonexist)) { 4553 /* failed, malloc error or so */ 4554 return 0; 4555 } 4556 if(nonexist) { 4557 /* it was removal of a nonexisting RR */ 4558 if(verbosity>=4) log_rrlist_position( 4559 "IXFR error nonexistent RR", 4560 rr_chunk, rr_dname, rr_type, rr_counter); 4561 softfail = 1; 4562 } 4563 } else if(rr_counter != 0) { 4564 /* skip first SOA RR for addition, it is added in 4565 * the addition part near the end of the ixfr, when 4566 * that serial is seen the second time. */ 4567 int duplicate = 0; 4568 /* add this RR */ 4569 if(!az_insert_rr_decompress(z, rr_chunk->data, 4570 rr_chunk->len, scratch_buffer, rr_dname, 4571 rr_type, rr_class, rr_ttl, rr_rdata, rr_rdlen, 4572 &duplicate)) { 4573 /* failed, malloc error or so */ 4574 return 0; 4575 } 4576 if(duplicate) { 4577 /* it was a duplicate */ 4578 if(verbosity>=4) log_rrlist_position( 4579 "IXFR error duplicate RR", 4580 rr_chunk, rr_dname, rr_type, rr_counter); 4581 softfail = 1; 4582 } 4583 } 4584 4585 rr_counter++; 4586 chunk_rrlist_gonext(&rr_chunk, &rr_num, &rr_pos, rr_nextpos); 4587 } 4588 if(softfail) { 4589 verbose(VERB_ALGO, "IXFR did not apply cleanly, fetching full zone"); 4590 return 0; 4591 } 4592 return 1; 4593 } 4594 4595 /** apply AXFR to zone in memory. z is locked. false on failure(mallocfail) */ 4596 static int 4597 apply_axfr(struct auth_xfer* xfr, struct auth_zone* z, 4598 struct sldns_buffer* scratch_buffer) 4599 { 4600 struct auth_chunk* rr_chunk; 4601 int rr_num; 4602 size_t rr_pos; 4603 uint8_t* rr_dname, *rr_rdata; 4604 uint16_t rr_type, rr_class, rr_rdlen; 4605 uint32_t rr_ttl; 4606 uint32_t serial = 0; 4607 size_t rr_nextpos; 4608 size_t rr_counter = 0; 4609 int have_end_soa = 0; 4610 4611 /* clear the data tree */ 4612 traverse_postorder(&z->data, auth_data_del, NULL); 4613 rbtree_init(&z->data, &auth_data_cmp); 4614 xfr->have_zone = 0; 4615 xfr->serial = 0; 4616 4617 /* insert all RRs in to the zone */ 4618 /* insert the SOA only once, skip the last one */ 4619 /* start RR iterator over chunklist of packets */ 4620 chunk_rrlist_start(xfr, &rr_chunk, &rr_num, &rr_pos); 4621 while(!chunk_rrlist_end(rr_chunk, rr_num)) { 4622 if(!chunk_rrlist_get_current(rr_chunk, rr_num, rr_pos, 4623 &rr_dname, &rr_type, &rr_class, &rr_ttl, &rr_rdlen, 4624 &rr_rdata, &rr_nextpos)) { 4625 /* failed to parse RR */ 4626 return 0; 4627 } 4628 if(verbosity>=7) log_rrlist_position("apply_axfr", 4629 rr_chunk, rr_dname, rr_type, rr_counter); 4630 if(rr_type == LDNS_RR_TYPE_SOA) { 4631 if(rr_counter != 0) { 4632 /* end of the axfr */ 4633 have_end_soa = 1; 4634 break; 4635 } 4636 if(rr_rdlen < 22) return 0; /* bad SOA rdlen */ 4637 serial = sldns_read_uint32(rr_rdata+rr_rdlen-20); 4638 } 4639 4640 /* add this RR */ 4641 if(!az_insert_rr_decompress(z, rr_chunk->data, rr_chunk->len, 4642 scratch_buffer, rr_dname, rr_type, rr_class, rr_ttl, 4643 rr_rdata, rr_rdlen, NULL)) { 4644 /* failed, malloc error or so */ 4645 return 0; 4646 } 4647 4648 rr_counter++; 4649 chunk_rrlist_gonext(&rr_chunk, &rr_num, &rr_pos, rr_nextpos); 4650 } 4651 if(!have_end_soa) { 4652 log_err("no end SOA record for AXFR"); 4653 return 0; 4654 } 4655 4656 xfr->serial = serial; 4657 xfr->have_zone = 1; 4658 return 1; 4659 } 4660 4661 /** apply HTTP to zone in memory. z is locked. false on failure(mallocfail) */ 4662 static int 4663 apply_http(struct auth_xfer* xfr, struct auth_zone* z, 4664 struct sldns_buffer* scratch_buffer) 4665 { 4666 /* parse data in chunks */ 4667 /* parse RR's and read into memory. ignore $INCLUDE from the 4668 * downloaded file*/ 4669 struct sldns_file_parse_state pstate; 4670 struct auth_chunk* chunk; 4671 size_t chunk_pos; 4672 memset(&pstate, 0, sizeof(pstate)); 4673 pstate.default_ttl = 3600; 4674 if(xfr->namelen < sizeof(pstate.origin)) { 4675 pstate.origin_len = xfr->namelen; 4676 memmove(pstate.origin, xfr->name, xfr->namelen); 4677 } 4678 4679 if(verbosity >= VERB_ALGO) 4680 verbose(VERB_ALGO, "http download %s of size %d", 4681 xfr->task_transfer->master->file, 4682 (int)chunklist_sum(xfr->task_transfer->chunks_first)); 4683 if(xfr->task_transfer->chunks_first && verbosity >= VERB_ALGO) { 4684 char preview[1024]; 4685 if(xfr->task_transfer->chunks_first->len+1 > sizeof(preview)) { 4686 memmove(preview, xfr->task_transfer->chunks_first->data, 4687 sizeof(preview)-1); 4688 preview[sizeof(preview)-1]=0; 4689 } else { 4690 memmove(preview, xfr->task_transfer->chunks_first->data, 4691 xfr->task_transfer->chunks_first->len); 4692 preview[xfr->task_transfer->chunks_first->len]=0; 4693 } 4694 log_info("auth zone http downloaded content preview: %s", 4695 preview); 4696 } 4697 4698 /* perhaps a little syntax check before we try to apply the data? */ 4699 if(!http_zonefile_syntax_check(xfr, scratch_buffer)) { 4700 log_err("http download %s/%s does not contain a zonefile, " 4701 "but got '%s'", xfr->task_transfer->master->host, 4702 xfr->task_transfer->master->file, 4703 sldns_buffer_begin(scratch_buffer)); 4704 return 0; 4705 } 4706 4707 /* clear the data tree */ 4708 traverse_postorder(&z->data, auth_data_del, NULL); 4709 rbtree_init(&z->data, &auth_data_cmp); 4710 xfr->have_zone = 0; 4711 xfr->serial = 0; 4712 4713 chunk = xfr->task_transfer->chunks_first; 4714 chunk_pos = 0; 4715 pstate.lineno = 0; 4716 while(chunkline_get_line_collated(&chunk, &chunk_pos, scratch_buffer)) { 4717 /* process this line */ 4718 pstate.lineno++; 4719 chunkline_newline_removal(scratch_buffer); 4720 if(chunkline_is_comment_line_or_empty(scratch_buffer)) { 4721 continue; 4722 } 4723 /* parse line and add RR */ 4724 if(http_parse_origin(scratch_buffer, &pstate)) { 4725 continue; /* $ORIGIN has been handled */ 4726 } 4727 if(http_parse_ttl(scratch_buffer, &pstate)) { 4728 continue; /* $TTL has been handled */ 4729 } 4730 if(!http_parse_add_rr(xfr, z, scratch_buffer, &pstate)) { 4731 verbose(VERB_ALGO, "error parsing line [%s:%d] %s", 4732 xfr->task_transfer->master->file, 4733 pstate.lineno, 4734 sldns_buffer_begin(scratch_buffer)); 4735 return 0; 4736 } 4737 } 4738 return 1; 4739 } 4740 4741 /** write http chunks to zonefile to create downloaded file */ 4742 static int 4743 auth_zone_write_chunks(struct auth_xfer* xfr, const char* fname) 4744 { 4745 FILE* out; 4746 struct auth_chunk* p; 4747 out = fopen(fname, "w"); 4748 if(!out) { 4749 log_err("could not open %s: %s", fname, strerror(errno)); 4750 return 0; 4751 } 4752 for(p = xfr->task_transfer->chunks_first; p ; p = p->next) { 4753 if(!write_out(out, (char*)p->data, p->len)) { 4754 log_err("could not write http download to %s", fname); 4755 fclose(out); 4756 return 0; 4757 } 4758 } 4759 fclose(out); 4760 return 1; 4761 } 4762 4763 /** write to zonefile after zone has been updated */ 4764 static void 4765 xfr_write_after_update(struct auth_xfer* xfr, struct module_env* env) 4766 { 4767 struct auth_zone* z; 4768 char tmpfile[1024]; 4769 lock_basic_unlock(&xfr->lock); 4770 4771 /* get lock again, so it is a readlock and concurrently queries 4772 * can be answered */ 4773 lock_rw_rdlock(&env->auth_zones->lock); 4774 z = auth_zone_find(env->auth_zones, xfr->name, xfr->namelen, 4775 xfr->dclass); 4776 if(!z) { 4777 lock_rw_unlock(&env->auth_zones->lock); 4778 /* the zone is gone, ignore xfr results */ 4779 lock_basic_lock(&xfr->lock); 4780 return; 4781 } 4782 lock_rw_rdlock(&z->lock); 4783 lock_basic_lock(&xfr->lock); 4784 lock_rw_unlock(&env->auth_zones->lock); 4785 4786 if(z->zonefile == NULL) { 4787 lock_rw_unlock(&z->lock); 4788 /* no write needed, no zonefile set */ 4789 return; 4790 } 4791 4792 /* write to tempfile first */ 4793 if((size_t)strlen(z->zonefile) + 16 > sizeof(tmpfile)) { 4794 verbose(VERB_ALGO, "tmpfilename too long, cannot update " 4795 " zonefile %s", z->zonefile); 4796 lock_rw_unlock(&z->lock); 4797 return; 4798 } 4799 snprintf(tmpfile, sizeof(tmpfile), "%s.tmp%u", z->zonefile, 4800 (unsigned)getpid()); 4801 if(xfr->task_transfer->master->http) { 4802 /* use the stored chunk list to write them */ 4803 if(!auth_zone_write_chunks(xfr, tmpfile)) { 4804 unlink(tmpfile); 4805 lock_rw_unlock(&z->lock); 4806 } 4807 } else if(!auth_zone_write_file(z, tmpfile)) { 4808 unlink(tmpfile); 4809 lock_rw_unlock(&z->lock); 4810 return; 4811 } 4812 if(rename(tmpfile, z->zonefile) < 0) { 4813 log_err("could not rename(%s, %s): %s", tmpfile, z->zonefile, 4814 strerror(errno)); 4815 unlink(tmpfile); 4816 lock_rw_unlock(&z->lock); 4817 return; 4818 } 4819 lock_rw_unlock(&z->lock); 4820 } 4821 4822 /** process chunk list and update zone in memory, 4823 * return false if it did not work */ 4824 static int 4825 xfr_process_chunk_list(struct auth_xfer* xfr, struct module_env* env, 4826 int* ixfr_fail) 4827 { 4828 struct auth_zone* z; 4829 4830 /* obtain locks and structures */ 4831 /* release xfr lock, then, while holding az->lock grab both 4832 * z->lock and xfr->lock */ 4833 lock_basic_unlock(&xfr->lock); 4834 lock_rw_rdlock(&env->auth_zones->lock); 4835 z = auth_zone_find(env->auth_zones, xfr->name, xfr->namelen, 4836 xfr->dclass); 4837 if(!z) { 4838 lock_rw_unlock(&env->auth_zones->lock); 4839 /* the zone is gone, ignore xfr results */ 4840 lock_basic_lock(&xfr->lock); 4841 return 0; 4842 } 4843 lock_rw_wrlock(&z->lock); 4844 lock_basic_lock(&xfr->lock); 4845 lock_rw_unlock(&env->auth_zones->lock); 4846 4847 /* apply data */ 4848 if(xfr->task_transfer->master->http) { 4849 if(!apply_http(xfr, z, env->scratch_buffer)) { 4850 lock_rw_unlock(&z->lock); 4851 verbose(VERB_ALGO, "http from %s: could not store data", 4852 xfr->task_transfer->master->host); 4853 return 0; 4854 } 4855 } else if(xfr->task_transfer->on_ixfr && 4856 !xfr->task_transfer->on_ixfr_is_axfr) { 4857 if(!apply_ixfr(xfr, z, env->scratch_buffer)) { 4858 lock_rw_unlock(&z->lock); 4859 verbose(VERB_ALGO, "xfr from %s: could not store IXFR" 4860 " data", xfr->task_transfer->master->host); 4861 *ixfr_fail = 1; 4862 return 0; 4863 } 4864 } else { 4865 if(!apply_axfr(xfr, z, env->scratch_buffer)) { 4866 lock_rw_unlock(&z->lock); 4867 verbose(VERB_ALGO, "xfr from %s: could not store AXFR" 4868 " data", xfr->task_transfer->master->host); 4869 return 0; 4870 } 4871 } 4872 xfr->zone_expired = 0; 4873 z->zone_expired = 0; 4874 if(!xfr_find_soa(z, xfr)) { 4875 lock_rw_unlock(&z->lock); 4876 verbose(VERB_ALGO, "xfr from %s: no SOA in zone after update" 4877 " (or malformed RR)", xfr->task_transfer->master->host); 4878 return 0; 4879 } 4880 if(xfr->have_zone) 4881 xfr->lease_time = *env->now; 4882 4883 /* unlock */ 4884 lock_rw_unlock(&z->lock); 4885 4886 if(verbosity >= VERB_QUERY && xfr->have_zone) { 4887 char zname[256]; 4888 dname_str(xfr->name, zname); 4889 verbose(VERB_QUERY, "auth zone %s updated to serial %u", zname, 4890 (unsigned)xfr->serial); 4891 } 4892 /* see if we need to write to a zonefile */ 4893 xfr_write_after_update(xfr, env); 4894 return 1; 4895 } 4896 4897 /** disown task_transfer. caller must hold xfr.lock */ 4898 static void 4899 xfr_transfer_disown(struct auth_xfer* xfr) 4900 { 4901 /* remove the commpoint */ 4902 comm_point_delete(xfr->task_transfer->cp); 4903 xfr->task_transfer->cp = NULL; 4904 /* we don't own this item anymore */ 4905 xfr->task_transfer->worker = NULL; 4906 xfr->task_transfer->env = NULL; 4907 } 4908 4909 /** lookup a host name for its addresses, if needed */ 4910 static int 4911 xfr_transfer_lookup_host(struct auth_xfer* xfr, struct module_env* env) 4912 { 4913 struct sockaddr_storage addr; 4914 socklen_t addrlen = 0; 4915 struct auth_master* master = xfr->task_transfer->lookup_target; 4916 struct query_info qinfo; 4917 uint16_t qflags = BIT_RD; 4918 uint8_t dname[LDNS_MAX_DOMAINLEN+1]; 4919 struct edns_data edns; 4920 sldns_buffer* buf = env->scratch_buffer; 4921 if(!master) return 0; 4922 if(extstrtoaddr(master->host, &addr, &addrlen)) { 4923 /* not needed, host is in IP addr format */ 4924 return 0; 4925 } 4926 if(master->allow_notify) 4927 return 0; /* allow-notifies are not transferred from, no 4928 lookup is needed */ 4929 4930 /* use mesh_new_callback to probe for non-addr hosts, 4931 * and then wait for them to be looked up (in cache, or query) */ 4932 qinfo.qname_len = sizeof(dname); 4933 if(sldns_str2wire_dname_buf(master->host, dname, &qinfo.qname_len) 4934 != 0) { 4935 log_err("cannot parse host name of master %s", master->host); 4936 return 0; 4937 } 4938 qinfo.qname = dname; 4939 qinfo.qclass = xfr->dclass; 4940 qinfo.qtype = LDNS_RR_TYPE_A; 4941 if(xfr->task_transfer->lookup_aaaa) 4942 qinfo.qtype = LDNS_RR_TYPE_AAAA; 4943 qinfo.local_alias = NULL; 4944 if(verbosity >= VERB_ALGO) { 4945 char buf[512]; 4946 char buf2[LDNS_MAX_DOMAINLEN+1]; 4947 dname_str(xfr->name, buf2); 4948 snprintf(buf, sizeof(buf), "auth zone %s: master lookup" 4949 " for task_transfer", buf2); 4950 log_query_info(VERB_ALGO, buf, &qinfo); 4951 } 4952 edns.edns_present = 1; 4953 edns.ext_rcode = 0; 4954 edns.edns_version = 0; 4955 edns.bits = EDNS_DO; 4956 edns.opt_list = NULL; 4957 if(sldns_buffer_capacity(buf) < 65535) 4958 edns.udp_size = (uint16_t)sldns_buffer_capacity(buf); 4959 else edns.udp_size = 65535; 4960 4961 /* unlock xfr during mesh_new_callback() because the callback can be 4962 * called straight away */ 4963 lock_basic_unlock(&xfr->lock); 4964 if(!mesh_new_callback(env->mesh, &qinfo, qflags, &edns, buf, 0, 4965 &auth_xfer_transfer_lookup_callback, xfr)) { 4966 lock_basic_lock(&xfr->lock); 4967 log_err("out of memory lookup up master %s", master->host); 4968 return 0; 4969 } 4970 lock_basic_lock(&xfr->lock); 4971 return 1; 4972 } 4973 4974 /** initiate TCP to the target and fetch zone. 4975 * returns true if that was successfully started, and timeout setup. */ 4976 static int 4977 xfr_transfer_init_fetch(struct auth_xfer* xfr, struct module_env* env) 4978 { 4979 struct sockaddr_storage addr; 4980 socklen_t addrlen = 0; 4981 struct auth_master* master = xfr->task_transfer->master; 4982 if(!master) return 0; 4983 if(master->allow_notify) return 0; /* only for notify */ 4984 4985 /* get master addr */ 4986 if(xfr->task_transfer->scan_addr) { 4987 addrlen = xfr->task_transfer->scan_addr->addrlen; 4988 memmove(&addr, &xfr->task_transfer->scan_addr->addr, addrlen); 4989 } else { 4990 if(!extstrtoaddr(master->host, &addr, &addrlen)) { 4991 /* the ones that are not in addr format are supposed 4992 * to be looked up. The lookup has failed however, 4993 * so skip them */ 4994 char zname[255+1]; 4995 dname_str(xfr->name, zname); 4996 log_err("%s: failed lookup, cannot transfer from master %s", 4997 zname, master->host); 4998 return 0; 4999 } 5000 } 5001 5002 /* remove previous TCP connection (if any) */ 5003 if(xfr->task_transfer->cp) { 5004 comm_point_delete(xfr->task_transfer->cp); 5005 xfr->task_transfer->cp = NULL; 5006 } 5007 5008 if(master->http) { 5009 /* perform http fetch */ 5010 /* store http port number into sockaddr, 5011 * unless someone used unbound's host@port notation */ 5012 if(strchr(master->host, '@') == NULL) 5013 sockaddr_store_port(&addr, addrlen, master->port); 5014 xfr->task_transfer->cp = outnet_comm_point_for_http( 5015 env->outnet, auth_xfer_transfer_http_callback, xfr, 5016 &addr, addrlen, AUTH_TRANSFER_TIMEOUT, master->ssl, 5017 master->host, master->file); 5018 if(!xfr->task_transfer->cp) { 5019 char zname[255+1]; 5020 dname_str(xfr->name, zname); 5021 verbose(VERB_ALGO, "cannot create http cp " 5022 "connection for %s to %s", zname, 5023 master->host); 5024 return 0; 5025 } 5026 return 1; 5027 } 5028 5029 /* perform AXFR/IXFR */ 5030 /* set the packet to be written */ 5031 /* create new ID */ 5032 xfr->task_transfer->id = (uint16_t)(ub_random(env->rnd)&0xffff); 5033 xfr_create_ixfr_packet(xfr, env->scratch_buffer, 5034 xfr->task_transfer->id, master); 5035 5036 /* connect on fd */ 5037 xfr->task_transfer->cp = outnet_comm_point_for_tcp(env->outnet, 5038 auth_xfer_transfer_tcp_callback, xfr, &addr, addrlen, 5039 env->scratch_buffer, AUTH_TRANSFER_TIMEOUT); 5040 if(!xfr->task_transfer->cp) { 5041 char zname[255+1]; 5042 dname_str(xfr->name, zname); 5043 verbose(VERB_ALGO, "cannot create tcp cp connection for " 5044 "xfr %s to %s", zname, master->host); 5045 return 0; 5046 } 5047 return 1; 5048 } 5049 5050 /** perform next lookup, next transfer TCP, or end and resume wait time task */ 5051 static void 5052 xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env) 5053 { 5054 log_assert(xfr->task_transfer->worker == env->worker); 5055 5056 /* are we performing lookups? */ 5057 while(xfr->task_transfer->lookup_target) { 5058 if(xfr_transfer_lookup_host(xfr, env)) { 5059 /* wait for lookup to finish, 5060 * note that the hostname may be in unbound's cache 5061 * and we may then get an instant cache response, 5062 * and that calls the callback just like a full 5063 * lookup and lookup failures also call callback */ 5064 lock_basic_unlock(&xfr->lock); 5065 return; 5066 } 5067 xfr_transfer_move_to_next_lookup(xfr, env); 5068 } 5069 5070 /* initiate TCP and fetch the zone from the master */ 5071 /* and set timeout on it */ 5072 while(!xfr_transfer_end_of_list(xfr)) { 5073 xfr->task_transfer->master = xfr_transfer_current_master(xfr); 5074 if(xfr_transfer_init_fetch(xfr, env)) { 5075 /* successfully started, wait for callback */ 5076 lock_basic_unlock(&xfr->lock); 5077 return; 5078 } 5079 /* failed to fetch, next master */ 5080 xfr_transfer_nextmaster(xfr); 5081 } 5082 5083 /* we failed to fetch the zone, move to wait task 5084 * use the shorter retry timeout */ 5085 xfr_transfer_disown(xfr); 5086 5087 /* pick up the nextprobe task and wait */ 5088 xfr_set_timeout(xfr, env, 1, 0); 5089 lock_basic_unlock(&xfr->lock); 5090 } 5091 5092 /** add addrs from A or AAAA rrset to the master */ 5093 static void 5094 xfr_master_add_addrs(struct auth_master* m, struct ub_packed_rrset_key* rrset, 5095 uint16_t rrtype) 5096 { 5097 size_t i; 5098 struct packed_rrset_data* data; 5099 if(!m || !rrset) return; 5100 if(rrtype != LDNS_RR_TYPE_A && rrtype != LDNS_RR_TYPE_AAAA) 5101 return; 5102 data = (struct packed_rrset_data*)rrset->entry.data; 5103 for(i=0; i<data->count; i++) { 5104 struct auth_addr* a; 5105 size_t len = data->rr_len[i] - 2; 5106 uint8_t* rdata = data->rr_data[i]+2; 5107 if(rrtype == LDNS_RR_TYPE_A && len != INET_SIZE) 5108 continue; /* wrong length for A */ 5109 if(rrtype == LDNS_RR_TYPE_AAAA && len != INET6_SIZE) 5110 continue; /* wrong length for AAAA */ 5111 5112 /* add and alloc it */ 5113 a = (struct auth_addr*)calloc(1, sizeof(*a)); 5114 if(!a) { 5115 log_err("out of memory"); 5116 return; 5117 } 5118 if(rrtype == LDNS_RR_TYPE_A) { 5119 struct sockaddr_in* sa; 5120 a->addrlen = (socklen_t)sizeof(*sa); 5121 sa = (struct sockaddr_in*)&a->addr; 5122 sa->sin_family = AF_INET; 5123 sa->sin_port = (in_port_t)htons(UNBOUND_DNS_PORT); 5124 memmove(&sa->sin_addr, rdata, INET_SIZE); 5125 } else { 5126 struct sockaddr_in6* sa; 5127 a->addrlen = (socklen_t)sizeof(*sa); 5128 sa = (struct sockaddr_in6*)&a->addr; 5129 sa->sin6_family = AF_INET6; 5130 sa->sin6_port = (in_port_t)htons(UNBOUND_DNS_PORT); 5131 memmove(&sa->sin6_addr, rdata, INET6_SIZE); 5132 } 5133 if(verbosity >= VERB_ALGO) { 5134 char s[64]; 5135 addr_to_str(&a->addr, a->addrlen, s, sizeof(s)); 5136 verbose(VERB_ALGO, "auth host %s lookup %s", 5137 m->host, s); 5138 } 5139 /* append to list */ 5140 a->next = m->list; 5141 m->list = a; 5142 } 5143 } 5144 5145 /** callback for task_transfer lookup of host name, of A or AAAA */ 5146 void auth_xfer_transfer_lookup_callback(void* arg, int rcode, sldns_buffer* buf, 5147 enum sec_status ATTR_UNUSED(sec), char* ATTR_UNUSED(why_bogus)) 5148 { 5149 struct auth_xfer* xfr = (struct auth_xfer*)arg; 5150 struct module_env* env; 5151 log_assert(xfr->task_transfer); 5152 lock_basic_lock(&xfr->lock); 5153 env = xfr->task_transfer->env; 5154 if(env->outnet->want_to_quit) { 5155 lock_basic_unlock(&xfr->lock); 5156 return; /* stop on quit */ 5157 } 5158 5159 /* process result */ 5160 if(rcode == LDNS_RCODE_NOERROR) { 5161 uint16_t wanted_qtype = LDNS_RR_TYPE_A; 5162 struct regional* temp = env->scratch; 5163 struct query_info rq; 5164 struct reply_info* rep; 5165 if(xfr->task_transfer->lookup_aaaa) 5166 wanted_qtype = LDNS_RR_TYPE_AAAA; 5167 memset(&rq, 0, sizeof(rq)); 5168 rep = parse_reply_in_temp_region(buf, temp, &rq); 5169 if(rep && rq.qtype == wanted_qtype && 5170 FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR) { 5171 /* parsed successfully */ 5172 struct ub_packed_rrset_key* answer = 5173 reply_find_answer_rrset(&rq, rep); 5174 if(answer) { 5175 xfr_master_add_addrs(xfr->task_transfer-> 5176 lookup_target, answer, wanted_qtype); 5177 } 5178 } 5179 } 5180 if(xfr->task_transfer->lookup_target->list && 5181 xfr->task_transfer->lookup_target == xfr_transfer_current_master(xfr)) 5182 xfr->task_transfer->scan_addr = xfr->task_transfer->lookup_target->list; 5183 5184 /* move to lookup AAAA after A lookup, move to next hostname lookup, 5185 * or move to fetch the zone, or, if nothing to do, end task_transfer */ 5186 xfr_transfer_move_to_next_lookup(xfr, env); 5187 xfr_transfer_nexttarget_or_end(xfr, env); 5188 } 5189 5190 /** check if xfer (AXFR or IXFR) packet is OK. 5191 * return false if we lost connection (SERVFAIL, or unreadable). 5192 * return false if we need to move from IXFR to AXFR, with gonextonfail 5193 * set to false, so the same master is tried again, but with AXFR. 5194 * return true if fine to link into data. 5195 * return true with transferdone=true when the transfer has ended. 5196 */ 5197 static int 5198 check_xfer_packet(sldns_buffer* pkt, struct auth_xfer* xfr, 5199 int* gonextonfail, int* transferdone) 5200 { 5201 uint8_t* wire = sldns_buffer_begin(pkt); 5202 int i; 5203 if(sldns_buffer_limit(pkt) < LDNS_HEADER_SIZE) { 5204 verbose(VERB_ALGO, "xfr to %s failed, packet too small", 5205 xfr->task_transfer->master->host); 5206 return 0; 5207 } 5208 if(!LDNS_QR_WIRE(wire)) { 5209 verbose(VERB_ALGO, "xfr to %s failed, packet has no QR flag", 5210 xfr->task_transfer->master->host); 5211 return 0; 5212 } 5213 if(LDNS_TC_WIRE(wire)) { 5214 verbose(VERB_ALGO, "xfr to %s failed, packet has TC flag", 5215 xfr->task_transfer->master->host); 5216 return 0; 5217 } 5218 /* check ID */ 5219 if(LDNS_ID_WIRE(wire) != xfr->task_transfer->id) { 5220 verbose(VERB_ALGO, "xfr to %s failed, packet wrong ID", 5221 xfr->task_transfer->master->host); 5222 return 0; 5223 } 5224 if(LDNS_RCODE_WIRE(wire) != LDNS_RCODE_NOERROR) { 5225 char rcode[32]; 5226 sldns_wire2str_rcode_buf((int)LDNS_RCODE_WIRE(wire), rcode, 5227 sizeof(rcode)); 5228 /* if we are doing IXFR, check for fallback */ 5229 if(xfr->task_transfer->on_ixfr) { 5230 if(LDNS_RCODE_WIRE(wire) == LDNS_RCODE_NOTIMPL || 5231 LDNS_RCODE_WIRE(wire) == LDNS_RCODE_SERVFAIL || 5232 LDNS_RCODE_WIRE(wire) == LDNS_RCODE_REFUSED || 5233 LDNS_RCODE_WIRE(wire) == LDNS_RCODE_FORMERR) { 5234 verbose(VERB_ALGO, "xfr to %s, fallback " 5235 "from IXFR to AXFR (with rcode %s)", 5236 xfr->task_transfer->master->host, 5237 rcode); 5238 xfr->task_transfer->ixfr_fail = 1; 5239 *gonextonfail = 0; 5240 return 0; 5241 } 5242 } 5243 verbose(VERB_ALGO, "xfr to %s failed, packet with rcode %s", 5244 xfr->task_transfer->master->host, rcode); 5245 return 0; 5246 } 5247 if(LDNS_OPCODE_WIRE(wire) != LDNS_PACKET_QUERY) { 5248 verbose(VERB_ALGO, "xfr to %s failed, packet with bad opcode", 5249 xfr->task_transfer->master->host); 5250 return 0; 5251 } 5252 if(LDNS_QDCOUNT(wire) > 1) { 5253 verbose(VERB_ALGO, "xfr to %s failed, packet has qdcount %d", 5254 xfr->task_transfer->master->host, 5255 (int)LDNS_QDCOUNT(wire)); 5256 return 0; 5257 } 5258 5259 /* check qname */ 5260 sldns_buffer_set_position(pkt, LDNS_HEADER_SIZE); 5261 for(i=0; i<(int)LDNS_QDCOUNT(wire); i++) { 5262 size_t pos = sldns_buffer_position(pkt); 5263 uint16_t qtype, qclass; 5264 if(pkt_dname_len(pkt) == 0) { 5265 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5266 "malformed dname", 5267 xfr->task_transfer->master->host); 5268 return 0; 5269 } 5270 if(dname_pkt_compare(pkt, sldns_buffer_at(pkt, pos), 5271 xfr->name) != 0) { 5272 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5273 "wrong qname", 5274 xfr->task_transfer->master->host); 5275 return 0; 5276 } 5277 if(sldns_buffer_remaining(pkt) < 4) { 5278 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5279 "truncated query RR", 5280 xfr->task_transfer->master->host); 5281 return 0; 5282 } 5283 qtype = sldns_buffer_read_u16(pkt); 5284 qclass = sldns_buffer_read_u16(pkt); 5285 if(qclass != xfr->dclass) { 5286 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5287 "wrong qclass", 5288 xfr->task_transfer->master->host); 5289 return 0; 5290 } 5291 if(xfr->task_transfer->on_ixfr) { 5292 if(qtype != LDNS_RR_TYPE_IXFR) { 5293 verbose(VERB_ALGO, "xfr to %s failed, packet " 5294 "with wrong qtype, expected IXFR", 5295 xfr->task_transfer->master->host); 5296 return 0; 5297 } 5298 } else { 5299 if(qtype != LDNS_RR_TYPE_AXFR) { 5300 verbose(VERB_ALGO, "xfr to %s failed, packet " 5301 "with wrong qtype, expected AXFR", 5302 xfr->task_transfer->master->host); 5303 return 0; 5304 } 5305 } 5306 } 5307 5308 /* check parse of RRs in packet, store first SOA serial 5309 * to be able to detect last SOA (with that serial) to see if done */ 5310 /* also check for IXFR 'zone up to date' reply */ 5311 for(i=0; i<(int)LDNS_ANCOUNT(wire); i++) { 5312 size_t pos = sldns_buffer_position(pkt); 5313 uint16_t tp, rdlen; 5314 if(pkt_dname_len(pkt) == 0) { 5315 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5316 "malformed dname in answer section", 5317 xfr->task_transfer->master->host); 5318 return 0; 5319 } 5320 if(sldns_buffer_remaining(pkt) < 10) { 5321 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5322 "truncated RR", 5323 xfr->task_transfer->master->host); 5324 return 0; 5325 } 5326 tp = sldns_buffer_read_u16(pkt); 5327 (void)sldns_buffer_read_u16(pkt); /* class */ 5328 (void)sldns_buffer_read_u32(pkt); /* ttl */ 5329 rdlen = sldns_buffer_read_u16(pkt); 5330 if(sldns_buffer_remaining(pkt) < rdlen) { 5331 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5332 "truncated RR rdata", 5333 xfr->task_transfer->master->host); 5334 return 0; 5335 } 5336 5337 /* RR parses (haven't checked rdata itself), now look at 5338 * SOA records to see serial number */ 5339 if(xfr->task_transfer->rr_scan_num == 0 && 5340 tp != LDNS_RR_TYPE_SOA) { 5341 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5342 "malformed zone transfer, no start SOA", 5343 xfr->task_transfer->master->host); 5344 return 0; 5345 } 5346 if(xfr->task_transfer->rr_scan_num == 1 && 5347 tp != LDNS_RR_TYPE_SOA) { 5348 /* second RR is not a SOA record, this is not an IXFR 5349 * the master is replying with an AXFR */ 5350 xfr->task_transfer->on_ixfr_is_axfr = 1; 5351 } 5352 if(tp == LDNS_RR_TYPE_SOA) { 5353 uint32_t serial; 5354 if(rdlen < 22) { 5355 verbose(VERB_ALGO, "xfr to %s failed, packet " 5356 "with SOA with malformed rdata", 5357 xfr->task_transfer->master->host); 5358 return 0; 5359 } 5360 if(dname_pkt_compare(pkt, sldns_buffer_at(pkt, pos), 5361 xfr->name) != 0) { 5362 verbose(VERB_ALGO, "xfr to %s failed, packet " 5363 "with SOA with wrong dname", 5364 xfr->task_transfer->master->host); 5365 return 0; 5366 } 5367 5368 /* read serial number of SOA */ 5369 serial = sldns_buffer_read_u32_at(pkt, 5370 sldns_buffer_position(pkt)+rdlen-20); 5371 5372 /* check for IXFR 'zone has SOA x' reply */ 5373 if(xfr->task_transfer->on_ixfr && 5374 xfr->task_transfer->rr_scan_num == 0 && 5375 LDNS_ANCOUNT(wire)==1) { 5376 verbose(VERB_ALGO, "xfr to %s ended, " 5377 "IXFR reply that zone has serial %u", 5378 xfr->task_transfer->master->host, 5379 (unsigned)serial); 5380 return 0; 5381 } 5382 5383 /* if first SOA, store serial number */ 5384 if(xfr->task_transfer->got_xfr_serial == 0) { 5385 xfr->task_transfer->got_xfr_serial = 1; 5386 xfr->task_transfer->incoming_xfr_serial = 5387 serial; 5388 verbose(VERB_ALGO, "xfr %s: contains " 5389 "SOA serial %u", 5390 xfr->task_transfer->master->host, 5391 (unsigned)serial); 5392 /* see if end of AXFR */ 5393 } else if(!xfr->task_transfer->on_ixfr || 5394 xfr->task_transfer->on_ixfr_is_axfr) { 5395 /* second SOA with serial is the end 5396 * for AXFR */ 5397 *transferdone = 1; 5398 verbose(VERB_ALGO, "xfr %s: last AXFR packet", 5399 xfr->task_transfer->master->host); 5400 /* for IXFR, count SOA records with that serial */ 5401 } else if(xfr->task_transfer->incoming_xfr_serial == 5402 serial && xfr->task_transfer->got_xfr_serial 5403 == 1) { 5404 xfr->task_transfer->got_xfr_serial++; 5405 /* if not first soa, if serial==firstserial, the 5406 * third time we are at the end, for IXFR */ 5407 } else if(xfr->task_transfer->incoming_xfr_serial == 5408 serial && xfr->task_transfer->got_xfr_serial 5409 == 2) { 5410 verbose(VERB_ALGO, "xfr %s: last IXFR packet", 5411 xfr->task_transfer->master->host); 5412 *transferdone = 1; 5413 /* continue parse check, if that succeeds, 5414 * transfer is done */ 5415 } 5416 } 5417 xfr->task_transfer->rr_scan_num++; 5418 5419 /* skip over RR rdata to go to the next RR */ 5420 sldns_buffer_skip(pkt, (ssize_t)rdlen); 5421 } 5422 5423 /* check authority section */ 5424 /* we skip over the RRs checking packet format */ 5425 for(i=0; i<(int)LDNS_NSCOUNT(wire); i++) { 5426 uint16_t rdlen; 5427 if(pkt_dname_len(pkt) == 0) { 5428 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5429 "malformed dname in authority section", 5430 xfr->task_transfer->master->host); 5431 return 0; 5432 } 5433 if(sldns_buffer_remaining(pkt) < 10) { 5434 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5435 "truncated RR", 5436 xfr->task_transfer->master->host); 5437 return 0; 5438 } 5439 (void)sldns_buffer_read_u16(pkt); /* type */ 5440 (void)sldns_buffer_read_u16(pkt); /* class */ 5441 (void)sldns_buffer_read_u32(pkt); /* ttl */ 5442 rdlen = sldns_buffer_read_u16(pkt); 5443 if(sldns_buffer_remaining(pkt) < rdlen) { 5444 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5445 "truncated RR rdata", 5446 xfr->task_transfer->master->host); 5447 return 0; 5448 } 5449 /* skip over RR rdata to go to the next RR */ 5450 sldns_buffer_skip(pkt, (ssize_t)rdlen); 5451 } 5452 5453 /* check additional section */ 5454 for(i=0; i<(int)LDNS_ARCOUNT(wire); i++) { 5455 uint16_t rdlen; 5456 if(pkt_dname_len(pkt) == 0) { 5457 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5458 "malformed dname in additional section", 5459 xfr->task_transfer->master->host); 5460 return 0; 5461 } 5462 if(sldns_buffer_remaining(pkt) < 10) { 5463 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5464 "truncated RR", 5465 xfr->task_transfer->master->host); 5466 return 0; 5467 } 5468 (void)sldns_buffer_read_u16(pkt); /* type */ 5469 (void)sldns_buffer_read_u16(pkt); /* class */ 5470 (void)sldns_buffer_read_u32(pkt); /* ttl */ 5471 rdlen = sldns_buffer_read_u16(pkt); 5472 if(sldns_buffer_remaining(pkt) < rdlen) { 5473 verbose(VERB_ALGO, "xfr to %s failed, packet with " 5474 "truncated RR rdata", 5475 xfr->task_transfer->master->host); 5476 return 0; 5477 } 5478 /* skip over RR rdata to go to the next RR */ 5479 sldns_buffer_skip(pkt, (ssize_t)rdlen); 5480 } 5481 5482 return 1; 5483 } 5484 5485 /** Link the data from this packet into the worklist of transferred data */ 5486 static int 5487 xfer_link_data(sldns_buffer* pkt, struct auth_xfer* xfr) 5488 { 5489 /* alloc it */ 5490 struct auth_chunk* e; 5491 e = (struct auth_chunk*)calloc(1, sizeof(*e)); 5492 if(!e) return 0; 5493 e->next = NULL; 5494 e->len = sldns_buffer_limit(pkt); 5495 e->data = memdup(sldns_buffer_begin(pkt), e->len); 5496 if(!e->data) { 5497 free(e); 5498 return 0; 5499 } 5500 5501 /* alloc succeeded, link into list */ 5502 if(!xfr->task_transfer->chunks_first) 5503 xfr->task_transfer->chunks_first = e; 5504 if(xfr->task_transfer->chunks_last) 5505 xfr->task_transfer->chunks_last->next = e; 5506 xfr->task_transfer->chunks_last = e; 5507 return 1; 5508 } 5509 5510 /** task transfer. the list of data is complete. process it and if failed 5511 * move to next master, if succeeded, end the task transfer */ 5512 static void 5513 process_list_end_transfer(struct auth_xfer* xfr, struct module_env* env) 5514 { 5515 int ixfr_fail = 0; 5516 if(xfr_process_chunk_list(xfr, env, &ixfr_fail)) { 5517 /* it worked! */ 5518 auth_chunks_delete(xfr->task_transfer); 5519 5520 /* we fetched the zone, move to wait task */ 5521 xfr_transfer_disown(xfr); 5522 5523 if(xfr->notify_received && (!xfr->notify_has_serial || 5524 (xfr->notify_has_serial && 5525 xfr_serial_means_update(xfr, xfr->notify_serial)))) { 5526 uint32_t sr = xfr->notify_serial; 5527 int has_sr = xfr->notify_has_serial; 5528 /* we received a notify while probe/transfer was 5529 * in progress. start a new probe and transfer */ 5530 xfr->notify_received = 0; 5531 xfr->notify_has_serial = 0; 5532 xfr->notify_serial = 0; 5533 if(!xfr_start_probe(xfr, env, NULL)) { 5534 /* if we couldn't start it, already in 5535 * progress; restore notify serial, 5536 * while xfr still locked */ 5537 xfr->notify_received = 1; 5538 xfr->notify_has_serial = has_sr; 5539 xfr->notify_serial = sr; 5540 lock_basic_unlock(&xfr->lock); 5541 } 5542 return; 5543 } else { 5544 /* pick up the nextprobe task and wait (normail wait time) */ 5545 xfr_set_timeout(xfr, env, 0, 0); 5546 } 5547 lock_basic_unlock(&xfr->lock); 5548 return; 5549 } 5550 /* processing failed */ 5551 /* when done, delete data from list */ 5552 auth_chunks_delete(xfr->task_transfer); 5553 if(ixfr_fail) { 5554 xfr->task_transfer->ixfr_fail = 1; 5555 } else { 5556 xfr_transfer_nextmaster(xfr); 5557 } 5558 xfr_transfer_nexttarget_or_end(xfr, env); 5559 } 5560 5561 /** callback for task_transfer tcp connections */ 5562 int 5563 auth_xfer_transfer_tcp_callback(struct comm_point* c, void* arg, int err, 5564 struct comm_reply* ATTR_UNUSED(repinfo)) 5565 { 5566 struct auth_xfer* xfr = (struct auth_xfer*)arg; 5567 struct module_env* env; 5568 int gonextonfail = 1; 5569 int transferdone = 0; 5570 log_assert(xfr->task_transfer); 5571 lock_basic_lock(&xfr->lock); 5572 env = xfr->task_transfer->env; 5573 if(env->outnet->want_to_quit) { 5574 lock_basic_unlock(&xfr->lock); 5575 return 0; /* stop on quit */ 5576 } 5577 5578 if(err != NETEVENT_NOERROR) { 5579 /* connection failed, closed, or timeout */ 5580 /* stop this transfer, cleanup 5581 * and continue task_transfer*/ 5582 verbose(VERB_ALGO, "xfr stopped, connection lost to %s", 5583 xfr->task_transfer->master->host); 5584 failed: 5585 /* delete transferred data from list */ 5586 auth_chunks_delete(xfr->task_transfer); 5587 comm_point_delete(xfr->task_transfer->cp); 5588 xfr->task_transfer->cp = NULL; 5589 xfr_transfer_nextmaster(xfr); 5590 xfr_transfer_nexttarget_or_end(xfr, env); 5591 return 0; 5592 } 5593 5594 /* handle returned packet */ 5595 /* if it fails, cleanup and end this transfer */ 5596 /* if it needs to fallback from IXFR to AXFR, do that */ 5597 if(!check_xfer_packet(c->buffer, xfr, &gonextonfail, &transferdone)) { 5598 goto failed; 5599 } 5600 /* if it is good, link it into the list of data */ 5601 /* if the link into list of data fails (malloc fail) cleanup and end */ 5602 if(!xfer_link_data(c->buffer, xfr)) { 5603 verbose(VERB_ALGO, "xfr stopped to %s, malloc failed", 5604 xfr->task_transfer->master->host); 5605 goto failed; 5606 } 5607 /* if the transfer is done now, disconnect and process the list */ 5608 if(transferdone) { 5609 comm_point_delete(xfr->task_transfer->cp); 5610 xfr->task_transfer->cp = NULL; 5611 process_list_end_transfer(xfr, env); 5612 return 0; 5613 } 5614 5615 /* if we want to read more messages, setup the commpoint to read 5616 * a DNS packet, and the timeout */ 5617 lock_basic_unlock(&xfr->lock); 5618 c->tcp_is_reading = 1; 5619 sldns_buffer_clear(c->buffer); 5620 comm_point_start_listening(c, -1, AUTH_TRANSFER_TIMEOUT); 5621 return 0; 5622 } 5623 5624 /** callback for task_transfer http connections */ 5625 int 5626 auth_xfer_transfer_http_callback(struct comm_point* c, void* arg, int err, 5627 struct comm_reply* repinfo) 5628 { 5629 struct auth_xfer* xfr = (struct auth_xfer*)arg; 5630 struct module_env* env; 5631 log_assert(xfr->task_transfer); 5632 lock_basic_lock(&xfr->lock); 5633 env = xfr->task_transfer->env; 5634 if(env->outnet->want_to_quit) { 5635 lock_basic_unlock(&xfr->lock); 5636 return 0; /* stop on quit */ 5637 } 5638 verbose(VERB_ALGO, "auth zone transfer http callback"); 5639 5640 if(err != NETEVENT_NOERROR && err != NETEVENT_DONE) { 5641 /* connection failed, closed, or timeout */ 5642 /* stop this transfer, cleanup 5643 * and continue task_transfer*/ 5644 verbose(VERB_ALGO, "http stopped, connection lost to %s", 5645 xfr->task_transfer->master->host); 5646 failed: 5647 /* delete transferred data from list */ 5648 auth_chunks_delete(xfr->task_transfer); 5649 if(repinfo) repinfo->c = NULL; /* signal cp deleted to 5650 the routine calling this callback */ 5651 comm_point_delete(xfr->task_transfer->cp); 5652 xfr->task_transfer->cp = NULL; 5653 xfr_transfer_nextmaster(xfr); 5654 xfr_transfer_nexttarget_or_end(xfr, env); 5655 return 0; 5656 } 5657 5658 /* if it is good, link it into the list of data */ 5659 /* if the link into list of data fails (malloc fail) cleanup and end */ 5660 if(sldns_buffer_limit(c->buffer) > 0) { 5661 verbose(VERB_ALGO, "auth zone http queued up %d bytes", 5662 (int)sldns_buffer_limit(c->buffer)); 5663 if(!xfer_link_data(c->buffer, xfr)) { 5664 verbose(VERB_ALGO, "http stopped to %s, malloc failed", 5665 xfr->task_transfer->master->host); 5666 goto failed; 5667 } 5668 } 5669 /* if the transfer is done now, disconnect and process the list */ 5670 if(err == NETEVENT_DONE) { 5671 if(repinfo) repinfo->c = NULL; /* signal cp deleted to 5672 the routine calling this callback */ 5673 comm_point_delete(xfr->task_transfer->cp); 5674 xfr->task_transfer->cp = NULL; 5675 process_list_end_transfer(xfr, env); 5676 return 0; 5677 } 5678 5679 /* if we want to read more messages, setup the commpoint to read 5680 * a DNS packet, and the timeout */ 5681 lock_basic_unlock(&xfr->lock); 5682 c->tcp_is_reading = 1; 5683 sldns_buffer_clear(c->buffer); 5684 comm_point_start_listening(c, -1, AUTH_TRANSFER_TIMEOUT); 5685 return 0; 5686 } 5687 5688 5689 /** start transfer task by this worker , xfr is locked. */ 5690 static void 5691 xfr_start_transfer(struct auth_xfer* xfr, struct module_env* env, 5692 struct auth_master* master) 5693 { 5694 log_assert(xfr->task_transfer != NULL); 5695 log_assert(xfr->task_transfer->worker == NULL); 5696 log_assert(xfr->task_transfer->chunks_first == NULL); 5697 log_assert(xfr->task_transfer->chunks_last == NULL); 5698 xfr->task_transfer->worker = env->worker; 5699 xfr->task_transfer->env = env; 5700 5701 /* init transfer process */ 5702 /* find that master in the transfer's list of masters? */ 5703 xfr_transfer_start_list(xfr, master); 5704 /* start lookup for hostnames in transfer master list */ 5705 xfr_transfer_start_lookups(xfr); 5706 5707 /* initiate TCP, and set timeout on it */ 5708 xfr_transfer_nexttarget_or_end(xfr, env); 5709 } 5710 5711 /** disown task_probe. caller must hold xfr.lock */ 5712 static void 5713 xfr_probe_disown(struct auth_xfer* xfr) 5714 { 5715 /* remove timer (from this worker's event base) */ 5716 comm_timer_delete(xfr->task_probe->timer); 5717 xfr->task_probe->timer = NULL; 5718 /* remove the commpoint */ 5719 comm_point_delete(xfr->task_probe->cp); 5720 xfr->task_probe->cp = NULL; 5721 /* we don't own this item anymore */ 5722 xfr->task_probe->worker = NULL; 5723 xfr->task_probe->env = NULL; 5724 } 5725 5726 /** send the UDP probe to the master, this is part of task_probe */ 5727 static int 5728 xfr_probe_send_probe(struct auth_xfer* xfr, struct module_env* env, 5729 int timeout) 5730 { 5731 struct sockaddr_storage addr; 5732 socklen_t addrlen = 0; 5733 struct timeval t; 5734 /* pick master */ 5735 struct auth_master* master = xfr_probe_current_master(xfr); 5736 if(!master) return 0; 5737 if(master->allow_notify) return 0; /* only for notify */ 5738 if(master->http) return 0; /* only masters get SOA UDP probe, 5739 not urls, if those are in this list */ 5740 5741 /* get master addr */ 5742 if(xfr->task_probe->scan_addr) { 5743 addrlen = xfr->task_probe->scan_addr->addrlen; 5744 memmove(&addr, &xfr->task_probe->scan_addr->addr, addrlen); 5745 } else { 5746 if(!extstrtoaddr(master->host, &addr, &addrlen)) { 5747 /* the ones that are not in addr format are supposed 5748 * to be looked up. The lookup has failed however, 5749 * so skip them */ 5750 char zname[255+1]; 5751 dname_str(xfr->name, zname); 5752 log_err("%s: failed lookup, cannot probe to master %s", 5753 zname, master->host); 5754 return 0; 5755 } 5756 } 5757 5758 /* create packet */ 5759 /* create new ID for new probes, but not on timeout retries, 5760 * this means we'll accept replies to previous retries to same ip */ 5761 if(timeout == AUTH_PROBE_TIMEOUT) 5762 xfr->task_probe->id = (uint16_t)(ub_random(env->rnd)&0xffff); 5763 xfr_create_soa_probe_packet(xfr, env->scratch_buffer, 5764 xfr->task_probe->id); 5765 if(!xfr->task_probe->cp) { 5766 xfr->task_probe->cp = outnet_comm_point_for_udp(env->outnet, 5767 auth_xfer_probe_udp_callback, xfr, &addr, addrlen); 5768 if(!xfr->task_probe->cp) { 5769 char zname[255+1]; 5770 dname_str(xfr->name, zname); 5771 verbose(VERB_ALGO, "cannot create udp cp for " 5772 "probe %s to %s", zname, master->host); 5773 return 0; 5774 } 5775 } 5776 if(!xfr->task_probe->timer) { 5777 xfr->task_probe->timer = comm_timer_create(env->worker_base, 5778 auth_xfer_probe_timer_callback, xfr); 5779 if(!xfr->task_probe->timer) { 5780 log_err("malloc failure"); 5781 return 0; 5782 } 5783 } 5784 5785 /* send udp packet */ 5786 if(!comm_point_send_udp_msg(xfr->task_probe->cp, env->scratch_buffer, 5787 (struct sockaddr*)&addr, addrlen)) { 5788 char zname[255+1]; 5789 dname_str(xfr->name, zname); 5790 verbose(VERB_ALGO, "failed to send soa probe for %s to %s", 5791 zname, master->host); 5792 return 0; 5793 } 5794 xfr->task_probe->timeout = timeout; 5795 #ifndef S_SPLINT_S 5796 t.tv_sec = timeout/1000; 5797 t.tv_usec = (timeout%1000)*1000; 5798 #endif 5799 comm_timer_set(xfr->task_probe->timer, &t); 5800 5801 return 1; 5802 } 5803 5804 /** callback for task_probe timer */ 5805 void 5806 auth_xfer_probe_timer_callback(void* arg) 5807 { 5808 struct auth_xfer* xfr = (struct auth_xfer*)arg; 5809 struct module_env* env; 5810 log_assert(xfr->task_probe); 5811 lock_basic_lock(&xfr->lock); 5812 env = xfr->task_probe->env; 5813 if(env->outnet->want_to_quit) { 5814 lock_basic_unlock(&xfr->lock); 5815 return; /* stop on quit */ 5816 } 5817 5818 if(xfr->task_probe->timeout <= AUTH_PROBE_TIMEOUT_STOP) { 5819 /* try again with bigger timeout */ 5820 if(xfr_probe_send_probe(xfr, env, xfr->task_probe->timeout*2)) { 5821 lock_basic_unlock(&xfr->lock); 5822 return; 5823 } 5824 } 5825 /* delete commpoint so a new one is created, with a fresh port nr */ 5826 comm_point_delete(xfr->task_probe->cp); 5827 xfr->task_probe->cp = NULL; 5828 5829 /* too many timeouts (or fail to send), move to next or end */ 5830 xfr_probe_nextmaster(xfr); 5831 xfr_probe_send_or_end(xfr, env); 5832 } 5833 5834 /** callback for task_probe udp packets */ 5835 int 5836 auth_xfer_probe_udp_callback(struct comm_point* c, void* arg, int err, 5837 struct comm_reply* repinfo) 5838 { 5839 struct auth_xfer* xfr = (struct auth_xfer*)arg; 5840 struct module_env* env; 5841 log_assert(xfr->task_probe); 5842 lock_basic_lock(&xfr->lock); 5843 env = xfr->task_probe->env; 5844 if(env->outnet->want_to_quit) { 5845 lock_basic_unlock(&xfr->lock); 5846 return 0; /* stop on quit */ 5847 } 5848 5849 /* the comm_point_udp_callback is in a for loop for NUM_UDP_PER_SELECT 5850 * and we set rep.c=NULL to stop if from looking inside the commpoint*/ 5851 repinfo->c = NULL; 5852 /* stop the timer */ 5853 comm_timer_disable(xfr->task_probe->timer); 5854 5855 /* see if we got a packet and what that means */ 5856 if(err == NETEVENT_NOERROR) { 5857 uint32_t serial = 0; 5858 if(check_packet_ok(c->buffer, LDNS_RR_TYPE_SOA, xfr, 5859 &serial)) { 5860 /* successful lookup */ 5861 if(verbosity >= VERB_ALGO) { 5862 char buf[256]; 5863 dname_str(xfr->name, buf); 5864 verbose(VERB_ALGO, "auth zone %s: soa probe " 5865 "serial is %u", buf, (unsigned)serial); 5866 } 5867 /* see if this serial indicates that the zone has 5868 * to be updated */ 5869 if(xfr_serial_means_update(xfr, serial)) { 5870 /* if updated, start the transfer task, if needed */ 5871 verbose(VERB_ALGO, "auth_zone updated, start transfer"); 5872 if(xfr->task_transfer->worker == NULL) { 5873 struct auth_master* master = 5874 xfr_probe_current_master(xfr); 5875 /* if we have download URLs use them 5876 * in preference to this master we 5877 * just probed the SOA from */ 5878 if(xfr->task_transfer->masters && 5879 xfr->task_transfer->masters->http) 5880 master = NULL; 5881 xfr_probe_disown(xfr); 5882 xfr_start_transfer(xfr, env, master); 5883 return 0; 5884 5885 } 5886 } else { 5887 /* if zone not updated, start the wait timer again */ 5888 verbose(VERB_ALGO, "auth_zone unchanged, new lease, wait"); 5889 if(xfr->have_zone) 5890 xfr->lease_time = *env->now; 5891 if(xfr->task_nextprobe->worker == NULL) 5892 xfr_set_timeout(xfr, env, 0, 0); 5893 } 5894 /* other tasks are running, we don't do this anymore */ 5895 xfr_probe_disown(xfr); 5896 lock_basic_unlock(&xfr->lock); 5897 /* return, we don't sent a reply to this udp packet, 5898 * and we setup the tasks to do next */ 5899 return 0; 5900 } 5901 } 5902 if(verbosity >= VERB_ALGO) { 5903 char buf[256]; 5904 dname_str(xfr->name, buf); 5905 verbose(VERB_ALGO, "auth zone %s: soa probe failed", buf); 5906 } 5907 5908 /* failed lookup */ 5909 /* delete commpoint so a new one is created, with a fresh port nr */ 5910 comm_point_delete(xfr->task_probe->cp); 5911 xfr->task_probe->cp = NULL; 5912 5913 /* if the result was not a successfull probe, we need 5914 * to send the next one */ 5915 xfr_probe_nextmaster(xfr); 5916 xfr_probe_send_or_end(xfr, env); 5917 return 0; 5918 } 5919 5920 /** lookup a host name for its addresses, if needed */ 5921 static int 5922 xfr_probe_lookup_host(struct auth_xfer* xfr, struct module_env* env) 5923 { 5924 struct sockaddr_storage addr; 5925 socklen_t addrlen = 0; 5926 struct auth_master* master = xfr->task_probe->lookup_target; 5927 struct query_info qinfo; 5928 uint16_t qflags = BIT_RD; 5929 uint8_t dname[LDNS_MAX_DOMAINLEN+1]; 5930 struct edns_data edns; 5931 sldns_buffer* buf = env->scratch_buffer; 5932 if(!master) return 0; 5933 if(extstrtoaddr(master->host, &addr, &addrlen)) { 5934 /* not needed, host is in IP addr format */ 5935 return 0; 5936 } 5937 if(master->allow_notify && !master->http && 5938 strchr(master->host, '/') != NULL && 5939 strchr(master->host, '/') == strrchr(master->host, '/')) { 5940 return 0; /* is IP/prefix format, not something to look up */ 5941 } 5942 5943 /* use mesh_new_callback to probe for non-addr hosts, 5944 * and then wait for them to be looked up (in cache, or query) */ 5945 qinfo.qname_len = sizeof(dname); 5946 if(sldns_str2wire_dname_buf(master->host, dname, &qinfo.qname_len) 5947 != 0) { 5948 log_err("cannot parse host name of master %s", master->host); 5949 return 0; 5950 } 5951 qinfo.qname = dname; 5952 qinfo.qclass = xfr->dclass; 5953 qinfo.qtype = LDNS_RR_TYPE_A; 5954 if(xfr->task_probe->lookup_aaaa) 5955 qinfo.qtype = LDNS_RR_TYPE_AAAA; 5956 qinfo.local_alias = NULL; 5957 if(verbosity >= VERB_ALGO) { 5958 char buf[512]; 5959 char buf2[LDNS_MAX_DOMAINLEN+1]; 5960 dname_str(xfr->name, buf2); 5961 snprintf(buf, sizeof(buf), "auth zone %s: master lookup" 5962 " for task_probe", buf2); 5963 log_query_info(VERB_ALGO, buf, &qinfo); 5964 } 5965 edns.edns_present = 1; 5966 edns.ext_rcode = 0; 5967 edns.edns_version = 0; 5968 edns.bits = EDNS_DO; 5969 edns.opt_list = NULL; 5970 if(sldns_buffer_capacity(buf) < 65535) 5971 edns.udp_size = (uint16_t)sldns_buffer_capacity(buf); 5972 else edns.udp_size = 65535; 5973 5974 /* unlock xfr during mesh_new_callback() because the callback can be 5975 * called straight away */ 5976 lock_basic_unlock(&xfr->lock); 5977 if(!mesh_new_callback(env->mesh, &qinfo, qflags, &edns, buf, 0, 5978 &auth_xfer_probe_lookup_callback, xfr)) { 5979 lock_basic_lock(&xfr->lock); 5980 log_err("out of memory lookup up master %s", master->host); 5981 return 0; 5982 } 5983 lock_basic_lock(&xfr->lock); 5984 return 1; 5985 } 5986 5987 /** move to sending the probe packets, next if fails. task_probe */ 5988 static void 5989 xfr_probe_send_or_end(struct auth_xfer* xfr, struct module_env* env) 5990 { 5991 /* are we doing hostname lookups? */ 5992 while(xfr->task_probe->lookup_target) { 5993 if(xfr_probe_lookup_host(xfr, env)) { 5994 /* wait for lookup to finish, 5995 * note that the hostname may be in unbound's cache 5996 * and we may then get an instant cache response, 5997 * and that calls the callback just like a full 5998 * lookup and lookup failures also call callback */ 5999 lock_basic_unlock(&xfr->lock); 6000 return; 6001 } 6002 xfr_probe_move_to_next_lookup(xfr, env); 6003 } 6004 /* probe of list has ended. Create or refresh the list of of 6005 * allow_notify addrs */ 6006 probe_copy_masters_for_allow_notify(xfr); 6007 if(xfr->task_probe->only_lookup) { 6008 /* only wanted lookups for copy, stop probe and start wait */ 6009 xfr->task_probe->only_lookup = 0; 6010 xfr_probe_disown(xfr); 6011 xfr_set_timeout(xfr, env, 0, 0); 6012 lock_basic_unlock(&xfr->lock); 6013 return; 6014 } 6015 6016 /* send probe packets */ 6017 while(!xfr_probe_end_of_list(xfr)) { 6018 if(xfr_probe_send_probe(xfr, env, AUTH_PROBE_TIMEOUT)) { 6019 /* successfully sent probe, wait for callback */ 6020 lock_basic_unlock(&xfr->lock); 6021 return; 6022 } 6023 /* failed to send probe, next master */ 6024 xfr_probe_nextmaster(xfr); 6025 } 6026 6027 /* we failed to send this as well, move to the wait task, 6028 * use the shorter retry timeout */ 6029 xfr_probe_disown(xfr); 6030 6031 /* pick up the nextprobe task and wait */ 6032 xfr_set_timeout(xfr, env, 1, 0); 6033 lock_basic_unlock(&xfr->lock); 6034 } 6035 6036 /** callback for task_probe lookup of host name, of A or AAAA */ 6037 void auth_xfer_probe_lookup_callback(void* arg, int rcode, sldns_buffer* buf, 6038 enum sec_status ATTR_UNUSED(sec), char* ATTR_UNUSED(why_bogus)) 6039 { 6040 struct auth_xfer* xfr = (struct auth_xfer*)arg; 6041 struct module_env* env; 6042 log_assert(xfr->task_probe); 6043 lock_basic_lock(&xfr->lock); 6044 env = xfr->task_probe->env; 6045 if(env->outnet->want_to_quit) { 6046 lock_basic_unlock(&xfr->lock); 6047 return; /* stop on quit */ 6048 } 6049 6050 /* process result */ 6051 if(rcode == LDNS_RCODE_NOERROR) { 6052 uint16_t wanted_qtype = LDNS_RR_TYPE_A; 6053 struct regional* temp = env->scratch; 6054 struct query_info rq; 6055 struct reply_info* rep; 6056 if(xfr->task_probe->lookup_aaaa) 6057 wanted_qtype = LDNS_RR_TYPE_AAAA; 6058 memset(&rq, 0, sizeof(rq)); 6059 rep = parse_reply_in_temp_region(buf, temp, &rq); 6060 if(rep && rq.qtype == wanted_qtype && 6061 FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR) { 6062 /* parsed successfully */ 6063 struct ub_packed_rrset_key* answer = 6064 reply_find_answer_rrset(&rq, rep); 6065 if(answer) { 6066 xfr_master_add_addrs(xfr->task_probe-> 6067 lookup_target, answer, wanted_qtype); 6068 } 6069 } 6070 } 6071 if(xfr->task_probe->lookup_target->list && 6072 xfr->task_probe->lookup_target == xfr_probe_current_master(xfr)) 6073 xfr->task_probe->scan_addr = xfr->task_probe->lookup_target->list; 6074 6075 /* move to lookup AAAA after A lookup, move to next hostname lookup, 6076 * or move to send the probes, or, if nothing to do, end task_probe */ 6077 xfr_probe_move_to_next_lookup(xfr, env); 6078 xfr_probe_send_or_end(xfr, env); 6079 } 6080 6081 /** disown task_nextprobe. caller must hold xfr.lock */ 6082 static void 6083 xfr_nextprobe_disown(struct auth_xfer* xfr) 6084 { 6085 /* delete the timer, because the next worker to pick this up may 6086 * not have the same event base */ 6087 comm_timer_delete(xfr->task_nextprobe->timer); 6088 xfr->task_nextprobe->timer = NULL; 6089 xfr->task_nextprobe->next_probe = 0; 6090 /* we don't own this item anymore */ 6091 xfr->task_nextprobe->worker = NULL; 6092 xfr->task_nextprobe->env = NULL; 6093 } 6094 6095 /** xfer nextprobe timeout callback, this is part of task_nextprobe */ 6096 void 6097 auth_xfer_timer(void* arg) 6098 { 6099 struct auth_xfer* xfr = (struct auth_xfer*)arg; 6100 struct module_env* env; 6101 log_assert(xfr->task_nextprobe); 6102 lock_basic_lock(&xfr->lock); 6103 env = xfr->task_nextprobe->env; 6104 if(env->outnet->want_to_quit) { 6105 lock_basic_unlock(&xfr->lock); 6106 return; /* stop on quit */ 6107 } 6108 6109 /* see if zone has expired, and if so, also set auth_zone expired */ 6110 if(xfr->have_zone && !xfr->zone_expired && 6111 *env->now >= xfr->lease_time + xfr->expiry) { 6112 lock_basic_unlock(&xfr->lock); 6113 auth_xfer_set_expired(xfr, env, 1); 6114 lock_basic_lock(&xfr->lock); 6115 } 6116 6117 xfr_nextprobe_disown(xfr); 6118 6119 if(!xfr_start_probe(xfr, env, NULL)) { 6120 /* not started because already in progress */ 6121 lock_basic_unlock(&xfr->lock); 6122 } 6123 } 6124 6125 /** return true if there are probe (SOA UDP query) targets in the master list*/ 6126 static int 6127 have_probe_targets(struct auth_master* list) 6128 { 6129 struct auth_master* p; 6130 for(p=list; p; p = p->next) { 6131 if(!p->allow_notify && p->host) 6132 return 1; 6133 } 6134 return 0; 6135 } 6136 6137 /** start task_probe if possible, if no masters for probe start task_transfer 6138 * returns true if task has been started, and false if the task is already 6139 * in progress. */ 6140 static int 6141 xfr_start_probe(struct auth_xfer* xfr, struct module_env* env, 6142 struct auth_master* spec) 6143 { 6144 /* see if we need to start a probe (or maybe it is already in 6145 * progress (due to notify)) */ 6146 if(xfr->task_probe->worker == NULL) { 6147 if(!have_probe_targets(xfr->task_probe->masters) && 6148 !(xfr->task_probe->only_lookup && 6149 xfr->task_probe->masters != NULL)) { 6150 /* useless to pick up task_probe, no masters to 6151 * probe. Instead attempt to pick up task transfer */ 6152 if(xfr->task_transfer->worker == NULL) { 6153 xfr_start_transfer(xfr, env, spec); 6154 return 1; 6155 } 6156 /* task transfer already in progress */ 6157 return 0; 6158 } 6159 6160 /* pick up the probe task ourselves */ 6161 xfr->task_probe->worker = env->worker; 6162 xfr->task_probe->env = env; 6163 xfr->task_probe->cp = NULL; 6164 6165 /* start the task */ 6166 /* if this was a timeout, no specific first master to scan */ 6167 /* otherwise, spec is nonNULL the notified master, scan 6168 * first and also transfer first from it */ 6169 xfr_probe_start_list(xfr, spec); 6170 /* setup to start the lookup of hostnames of masters afresh */ 6171 xfr_probe_start_lookups(xfr); 6172 /* send the probe packet or next send, or end task */ 6173 xfr_probe_send_or_end(xfr, env); 6174 return 1; 6175 } 6176 return 0; 6177 } 6178 6179 /** for task_nextprobe. 6180 * determine next timeout for auth_xfer. Also (re)sets timer. 6181 * @param xfr: task structure 6182 * @param env: module environment, with worker and time. 6183 * @param failure: set true if timer should be set for failure retry. 6184 * @param lookup_only: only perform lookups when timer done, 0 sec timeout 6185 */ 6186 static void 6187 xfr_set_timeout(struct auth_xfer* xfr, struct module_env* env, 6188 int failure, int lookup_only) 6189 { 6190 struct timeval tv; 6191 log_assert(xfr->task_nextprobe != NULL); 6192 log_assert(xfr->task_nextprobe->worker == NULL || 6193 xfr->task_nextprobe->worker == env->worker); 6194 /* normally, nextprobe = startoflease + refresh, 6195 * but if expiry is sooner, use that one. 6196 * after a failure, use the retry timer instead. */ 6197 xfr->task_nextprobe->next_probe = *env->now; 6198 if(xfr->lease_time && !failure) 6199 xfr->task_nextprobe->next_probe = xfr->lease_time; 6200 6201 if(!failure) { 6202 xfr->task_nextprobe->backoff = 0; 6203 } else { 6204 if(xfr->task_nextprobe->backoff == 0) 6205 xfr->task_nextprobe->backoff = 3; 6206 else xfr->task_nextprobe->backoff *= 2; 6207 if(xfr->task_nextprobe->backoff > AUTH_TRANSFER_MAX_BACKOFF) 6208 xfr->task_nextprobe->backoff = 6209 AUTH_TRANSFER_MAX_BACKOFF; 6210 } 6211 6212 if(xfr->have_zone) { 6213 time_t wait = xfr->refresh; 6214 if(failure) wait = xfr->retry; 6215 if(xfr->expiry < wait) 6216 xfr->task_nextprobe->next_probe += xfr->expiry; 6217 else xfr->task_nextprobe->next_probe += wait; 6218 if(failure) 6219 xfr->task_nextprobe->next_probe += 6220 xfr->task_nextprobe->backoff; 6221 /* put the timer exactly on expiry, if possible */ 6222 if(xfr->lease_time && xfr->lease_time+xfr->expiry < 6223 xfr->task_nextprobe->next_probe && 6224 xfr->lease_time+xfr->expiry > *env->now) 6225 xfr->task_nextprobe->next_probe = 6226 xfr->lease_time+xfr->expiry; 6227 } else { 6228 xfr->task_nextprobe->next_probe += 6229 xfr->task_nextprobe->backoff; 6230 } 6231 6232 if(!xfr->task_nextprobe->timer) { 6233 xfr->task_nextprobe->timer = comm_timer_create( 6234 env->worker_base, auth_xfer_timer, xfr); 6235 if(!xfr->task_nextprobe->timer) { 6236 /* failed to malloc memory. likely zone transfer 6237 * also fails for that. skip the timeout */ 6238 char zname[255+1]; 6239 dname_str(xfr->name, zname); 6240 log_err("cannot allocate timer, no refresh for %s", 6241 zname); 6242 return; 6243 } 6244 } 6245 xfr->task_nextprobe->worker = env->worker; 6246 xfr->task_nextprobe->env = env; 6247 if(*(xfr->task_nextprobe->env->now) <= xfr->task_nextprobe->next_probe) 6248 tv.tv_sec = xfr->task_nextprobe->next_probe - 6249 *(xfr->task_nextprobe->env->now); 6250 else tv.tv_sec = 0; 6251 if(tv.tv_sec != 0 && lookup_only && xfr->task_probe->masters) { 6252 /* don't lookup_only, if lookup timeout is 0 anyway, 6253 * or if we don't have masters to lookup */ 6254 tv.tv_sec = 0; 6255 if(xfr->task_probe && xfr->task_probe->worker == NULL) 6256 xfr->task_probe->only_lookup = 1; 6257 } 6258 if(verbosity >= VERB_ALGO) { 6259 char zname[255+1]; 6260 dname_str(xfr->name, zname); 6261 verbose(VERB_ALGO, "auth zone %s timeout in %d seconds", 6262 zname, (int)tv.tv_sec); 6263 } 6264 tv.tv_usec = 0; 6265 comm_timer_set(xfr->task_nextprobe->timer, &tv); 6266 } 6267 6268 /** initial pick up of worker timeouts, ties events to worker event loop */ 6269 void 6270 auth_xfer_pickup_initial(struct auth_zones* az, struct module_env* env) 6271 { 6272 struct auth_xfer* x; 6273 lock_rw_wrlock(&az->lock); 6274 RBTREE_FOR(x, struct auth_xfer*, &az->xtree) { 6275 lock_basic_lock(&x->lock); 6276 /* set lease_time, because we now have timestamp in env, 6277 * (not earlier during startup and apply_cfg), and this 6278 * notes the start time when the data was acquired */ 6279 if(x->have_zone) 6280 x->lease_time = *env->now; 6281 if(x->task_nextprobe && x->task_nextprobe->worker == NULL) { 6282 xfr_set_timeout(x, env, 0, 1); 6283 } 6284 lock_basic_unlock(&x->lock); 6285 } 6286 lock_rw_unlock(&az->lock); 6287 } 6288 6289 void auth_zones_cleanup(struct auth_zones* az) 6290 { 6291 struct auth_xfer* x; 6292 lock_rw_wrlock(&az->lock); 6293 RBTREE_FOR(x, struct auth_xfer*, &az->xtree) { 6294 lock_basic_lock(&x->lock); 6295 if(x->task_nextprobe && x->task_nextprobe->worker != NULL) { 6296 xfr_nextprobe_disown(x); 6297 } 6298 if(x->task_probe && x->task_probe->worker != NULL) { 6299 xfr_probe_disown(x); 6300 } 6301 if(x->task_transfer && x->task_transfer->worker != NULL) { 6302 auth_chunks_delete(x->task_transfer); 6303 xfr_transfer_disown(x); 6304 } 6305 lock_basic_unlock(&x->lock); 6306 } 6307 lock_rw_unlock(&az->lock); 6308 } 6309 6310 /** 6311 * malloc the xfer and tasks 6312 * @param z: auth_zone with name of zone. 6313 */ 6314 static struct auth_xfer* 6315 auth_xfer_new(struct auth_zone* z) 6316 { 6317 struct auth_xfer* xfr; 6318 xfr = (struct auth_xfer*)calloc(1, sizeof(*xfr)); 6319 if(!xfr) return NULL; 6320 xfr->name = memdup(z->name, z->namelen); 6321 if(!xfr->name) { 6322 free(xfr); 6323 return NULL; 6324 } 6325 xfr->node.key = xfr; 6326 xfr->namelen = z->namelen; 6327 xfr->namelabs = z->namelabs; 6328 xfr->dclass = z->dclass; 6329 6330 xfr->task_nextprobe = (struct auth_nextprobe*)calloc(1, 6331 sizeof(struct auth_nextprobe)); 6332 if(!xfr->task_nextprobe) { 6333 free(xfr->name); 6334 free(xfr); 6335 return NULL; 6336 } 6337 xfr->task_probe = (struct auth_probe*)calloc(1, 6338 sizeof(struct auth_probe)); 6339 if(!xfr->task_probe) { 6340 free(xfr->task_nextprobe); 6341 free(xfr->name); 6342 free(xfr); 6343 return NULL; 6344 } 6345 xfr->task_transfer = (struct auth_transfer*)calloc(1, 6346 sizeof(struct auth_transfer)); 6347 if(!xfr->task_transfer) { 6348 free(xfr->task_probe); 6349 free(xfr->task_nextprobe); 6350 free(xfr->name); 6351 free(xfr); 6352 return NULL; 6353 } 6354 6355 lock_basic_init(&xfr->lock); 6356 lock_protect(&xfr->lock, &xfr->name, sizeof(xfr->name)); 6357 lock_protect(&xfr->lock, &xfr->namelen, sizeof(xfr->namelen)); 6358 lock_protect(&xfr->lock, xfr->name, xfr->namelen); 6359 lock_protect(&xfr->lock, &xfr->namelabs, sizeof(xfr->namelabs)); 6360 lock_protect(&xfr->lock, &xfr->dclass, sizeof(xfr->dclass)); 6361 lock_protect(&xfr->lock, &xfr->notify_received, sizeof(xfr->notify_received)); 6362 lock_protect(&xfr->lock, &xfr->notify_serial, sizeof(xfr->notify_serial)); 6363 lock_protect(&xfr->lock, &xfr->zone_expired, sizeof(xfr->zone_expired)); 6364 lock_protect(&xfr->lock, &xfr->have_zone, sizeof(xfr->have_zone)); 6365 lock_protect(&xfr->lock, &xfr->serial, sizeof(xfr->serial)); 6366 lock_protect(&xfr->lock, &xfr->retry, sizeof(xfr->retry)); 6367 lock_protect(&xfr->lock, &xfr->refresh, sizeof(xfr->refresh)); 6368 lock_protect(&xfr->lock, &xfr->expiry, sizeof(xfr->expiry)); 6369 lock_protect(&xfr->lock, &xfr->lease_time, sizeof(xfr->lease_time)); 6370 lock_protect(&xfr->lock, &xfr->task_nextprobe->worker, 6371 sizeof(xfr->task_nextprobe->worker)); 6372 lock_protect(&xfr->lock, &xfr->task_probe->worker, 6373 sizeof(xfr->task_probe->worker)); 6374 lock_protect(&xfr->lock, &xfr->task_transfer->worker, 6375 sizeof(xfr->task_transfer->worker)); 6376 lock_basic_lock(&xfr->lock); 6377 return xfr; 6378 } 6379 6380 /** Create auth_xfer structure. 6381 * This populates the have_zone, soa values, and so on times. 6382 * and sets the timeout, if a zone transfer is needed a short timeout is set. 6383 * For that the auth_zone itself must exist (and read in zonefile) 6384 * returns false on alloc failure. */ 6385 struct auth_xfer* 6386 auth_xfer_create(struct auth_zones* az, struct auth_zone* z) 6387 { 6388 struct auth_xfer* xfr; 6389 6390 /* malloc it */ 6391 xfr = auth_xfer_new(z); 6392 if(!xfr) { 6393 log_err("malloc failure"); 6394 return NULL; 6395 } 6396 /* insert in tree */ 6397 (void)rbtree_insert(&az->xtree, &xfr->node); 6398 return xfr; 6399 } 6400 6401 /** create new auth_master structure */ 6402 static struct auth_master* 6403 auth_master_new(struct auth_master*** list) 6404 { 6405 struct auth_master *m; 6406 m = (struct auth_master*)calloc(1, sizeof(*m)); 6407 if(!m) { 6408 log_err("malloc failure"); 6409 return NULL; 6410 } 6411 /* set first pointer to m, or next pointer of previous element to m */ 6412 (**list) = m; 6413 /* store m's next pointer as future point to store at */ 6414 (*list) = &(m->next); 6415 return m; 6416 } 6417 6418 /** dup_prefix : create string from initial part of other string, malloced */ 6419 static char* 6420 dup_prefix(char* str, size_t num) 6421 { 6422 char* result; 6423 size_t len = strlen(str); 6424 if(len < num) num = len; /* not more than strlen */ 6425 result = (char*)malloc(num+1); 6426 if(!result) { 6427 log_err("malloc failure"); 6428 return result; 6429 } 6430 memmove(result, str, num); 6431 result[num] = 0; 6432 return result; 6433 } 6434 6435 /** dup string and print error on error */ 6436 static char* 6437 dup_all(char* str) 6438 { 6439 char* result = strdup(str); 6440 if(!result) { 6441 log_err("malloc failure"); 6442 return NULL; 6443 } 6444 return result; 6445 } 6446 6447 /** find first of two characters */ 6448 static char* 6449 str_find_first_of_chars(char* s, char a, char b) 6450 { 6451 char* ra = strchr(s, a); 6452 char* rb = strchr(s, b); 6453 if(!ra) return rb; 6454 if(!rb) return ra; 6455 if(ra < rb) return ra; 6456 return rb; 6457 } 6458 6459 /** parse URL into host and file parts, false on malloc or parse error */ 6460 static int 6461 parse_url(char* url, char** host, char** file, int* port, int* ssl) 6462 { 6463 char* p = url; 6464 /* parse http://www.example.com/file.htm 6465 * or http://127.0.0.1 (index.html) 6466 * or https://[::1@1234]/a/b/c/d */ 6467 *ssl = 1; 6468 *port = AUTH_HTTPS_PORT; 6469 6470 /* parse http:// or https:// */ 6471 if(strncmp(p, "http://", 7) == 0) { 6472 p += 7; 6473 *ssl = 0; 6474 *port = AUTH_HTTP_PORT; 6475 } else if(strncmp(p, "https://", 8) == 0) { 6476 p += 8; 6477 } else if(strstr(p, "://") && strchr(p, '/') > strstr(p, "://") && 6478 strchr(p, ':') >= strstr(p, "://")) { 6479 char* uri = dup_prefix(p, (size_t)(strstr(p, "://")-p)); 6480 log_err("protocol %s:// not supported (for url %s)", 6481 uri?uri:"", p); 6482 free(uri); 6483 return 0; 6484 } 6485 6486 /* parse hostname part */ 6487 if(p[0] == '[') { 6488 char* end = strchr(p, ']'); 6489 p++; /* skip over [ */ 6490 if(end) { 6491 *host = dup_prefix(p, (size_t)(end-p)); 6492 if(!*host) return 0; 6493 p = end+1; /* skip over ] */ 6494 } else { 6495 *host = dup_all(p); 6496 if(!*host) return 0; 6497 p = end; 6498 } 6499 } else { 6500 char* end = str_find_first_of_chars(p, ':', '/'); 6501 if(end) { 6502 *host = dup_prefix(p, (size_t)(end-p)); 6503 if(!*host) return 0; 6504 } else { 6505 *host = dup_all(p); 6506 if(!*host) return 0; 6507 } 6508 p = end; /* at next : or / or NULL */ 6509 } 6510 6511 /* parse port number */ 6512 if(p && p[0] == ':') { 6513 char* end = NULL; 6514 *port = strtol(p+1, &end, 10); 6515 p = end; 6516 } 6517 6518 /* parse filename part */ 6519 while(p && *p == '/') 6520 p++; 6521 if(!p || p[0] == 0) 6522 *file = strdup("index.html"); 6523 else *file = strdup(p); 6524 if(!*file) { 6525 log_err("malloc failure"); 6526 return 0; 6527 } 6528 return 1; 6529 } 6530 6531 int 6532 xfer_set_masters(struct auth_master** list, struct config_auth* c, 6533 int with_http) 6534 { 6535 struct auth_master* m; 6536 struct config_strlist* p; 6537 /* list points to the first, or next pointer for the new element */ 6538 while(*list) { 6539 list = &( (*list)->next ); 6540 } 6541 if(with_http) 6542 for(p = c->urls; p; p = p->next) { 6543 m = auth_master_new(&list); 6544 m->http = 1; 6545 if(!parse_url(p->str, &m->host, &m->file, &m->port, &m->ssl)) 6546 return 0; 6547 } 6548 for(p = c->masters; p; p = p->next) { 6549 m = auth_master_new(&list); 6550 m->ixfr = 1; /* this flag is not configurable */ 6551 m->host = strdup(p->str); 6552 if(!m->host) { 6553 log_err("malloc failure"); 6554 return 0; 6555 } 6556 } 6557 for(p = c->allow_notify; p; p = p->next) { 6558 m = auth_master_new(&list); 6559 m->allow_notify = 1; 6560 m->host = strdup(p->str); 6561 if(!m->host) { 6562 log_err("malloc failure"); 6563 return 0; 6564 } 6565 } 6566 return 1; 6567 } 6568 6569 #define SERIAL_BITS 32 6570 int 6571 compare_serial(uint32_t a, uint32_t b) 6572 { 6573 const uint32_t cutoff = ((uint32_t) 1 << (SERIAL_BITS - 1)); 6574 6575 if (a == b) { 6576 return 0; 6577 } else if ((a < b && b - a < cutoff) || (a > b && a - b > cutoff)) { 6578 return -1; 6579 } else { 6580 return 1; 6581 } 6582 } 6583