1 /* 2 * services/cache/dns.c - Cache services for DNS using msg and rrset caches. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains the DNS cache. 40 */ 41 #include "config.h" 42 #include "iterator/iter_delegpt.h" 43 #include "iterator/iter_utils.h" 44 #include "validator/val_nsec.h" 45 #include "validator/val_utils.h" 46 #include "services/cache/dns.h" 47 #include "services/cache/rrset.h" 48 #include "util/data/msgparse.h" 49 #include "util/data/msgreply.h" 50 #include "util/data/packed_rrset.h" 51 #include "util/data/dname.h" 52 #include "util/module.h" 53 #include "util/net_help.h" 54 #include "util/regional.h" 55 #include "util/config_file.h" 56 #include "sldns/sbuffer.h" 57 58 /** store rrsets in the rrset cache. 59 * @param env: module environment with caches. 60 * @param rep: contains list of rrsets to store. 61 * @param now: current time. 62 * @param leeway: during prefetch how much leeway to update TTLs. 63 * This makes rrsets (other than type NS) timeout sooner so they get 64 * updated with a new full TTL. 65 * Type NS does not get this, because it must not be refreshed from the 66 * child domain, but keep counting down properly. 67 * @param pside: if from parentside discovered NS, so that its NS is okay 68 * in a prefetch situation to be updated (without becoming sticky). 69 * @param qrep: update rrsets here if cache is better 70 * @param region: for qrep allocs. 71 * @param qstarttime: time when delegations were looked up, this is perhaps 72 * earlier than the time in now. The time is used to determine if RRsets 73 * of type NS have expired, so that they can only be updated using 74 * lookups of delegation points that did not use them, since they had 75 * expired then. 76 */ 77 static void 78 store_rrsets(struct module_env* env, struct reply_info* rep, time_t now, 79 time_t leeway, int pside, struct reply_info* qrep, 80 struct regional* region, time_t qstarttime) 81 { 82 size_t i; 83 /* see if rrset already exists in cache, if not insert it. */ 84 for(i=0; i<rep->rrset_count; i++) { 85 rep->ref[i].key = rep->rrsets[i]; 86 rep->ref[i].id = rep->rrsets[i]->id; 87 /* update ref if it was in the cache */ 88 switch(rrset_cache_update(env->rrset_cache, &rep->ref[i], 89 env->alloc, ((ntohs(rep->ref[i].key->rk.type)== 90 LDNS_RR_TYPE_NS && !pside)?qstarttime:now + leeway))) { 91 case 0: /* ref unchanged, item inserted */ 92 break; 93 case 2: /* ref updated, cache is superior */ 94 if(region) { 95 struct ub_packed_rrset_key* ck; 96 lock_rw_rdlock(&rep->ref[i].key->entry.lock); 97 /* if deleted rrset, do not copy it */ 98 if(rep->ref[i].key->id == 0) 99 ck = NULL; 100 else ck = packed_rrset_copy_region( 101 rep->ref[i].key, region, now); 102 lock_rw_unlock(&rep->ref[i].key->entry.lock); 103 if(ck) { 104 /* use cached copy if memory allows */ 105 qrep->rrsets[i] = ck; 106 } 107 } 108 /* no break: also copy key item */ 109 /* the line below is matched by gcc regex and silences 110 * the fallthrough warning */ 111 /* fallthrough */ 112 case 1: /* ref updated, item inserted */ 113 rep->rrsets[i] = rep->ref[i].key; 114 } 115 } 116 } 117 118 /** delete message from message cache */ 119 void 120 msg_cache_remove(struct module_env* env, uint8_t* qname, size_t qnamelen, 121 uint16_t qtype, uint16_t qclass, uint16_t flags) 122 { 123 struct query_info k; 124 hashvalue_type h; 125 126 k.qname = qname; 127 k.qname_len = qnamelen; 128 k.qtype = qtype; 129 k.qclass = qclass; 130 k.local_alias = NULL; 131 h = query_info_hash(&k, flags); 132 slabhash_remove(env->msg_cache, h, &k); 133 } 134 135 /** remove servfail msg cache entry */ 136 static void 137 msg_del_servfail(struct module_env* env, struct query_info* qinfo, 138 uint32_t flags) 139 { 140 struct msgreply_entry* e; 141 /* see if the entry is servfail, and then remove it, so that 142 * lookups move from the cacheresponse stage to the recursionresponse 143 * stage */ 144 e = msg_cache_lookup(env, qinfo->qname, qinfo->qname_len, 145 qinfo->qtype, qinfo->qclass, flags, 0, 0); 146 if(!e) return; 147 /* we don't check for the ttl here, also expired servfail entries 148 * are removed. If the user uses serve-expired, they would still be 149 * used to answer from cache */ 150 if(FLAGS_GET_RCODE(((struct reply_info*)e->entry.data)->flags) 151 != LDNS_RCODE_SERVFAIL) { 152 lock_rw_unlock(&e->entry.lock); 153 return; 154 } 155 lock_rw_unlock(&e->entry.lock); 156 msg_cache_remove(env, qinfo->qname, qinfo->qname_len, qinfo->qtype, 157 qinfo->qclass, flags); 158 } 159 160 void 161 dns_cache_store_msg(struct module_env* env, struct query_info* qinfo, 162 hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside, 163 struct reply_info* qrep, uint32_t flags, struct regional* region, 164 time_t qstarttime) 165 { 166 struct msgreply_entry* e; 167 time_t ttl = rep->ttl; 168 size_t i; 169 170 /* store RRsets */ 171 for(i=0; i<rep->rrset_count; i++) { 172 rep->ref[i].key = rep->rrsets[i]; 173 rep->ref[i].id = rep->rrsets[i]->id; 174 } 175 176 /* there was a reply_info_sortref(rep) here but it seems to be 177 * unnecessary, because the cache gets locked per rrset. */ 178 reply_info_set_ttls(rep, *env->now); 179 store_rrsets(env, rep, *env->now, leeway, pside, qrep, region, 180 qstarttime); 181 if(ttl == 0 && !(flags & DNSCACHE_STORE_ZEROTTL)) { 182 /* we do not store the message, but we did store the RRs, 183 * which could be useful for delegation information */ 184 verbose(VERB_ALGO, "TTL 0: dropped msg from cache"); 185 free(rep); 186 /* if the message is SERVFAIL in cache, remove that SERVFAIL, 187 * so that the TTL 0 response can be returned for future 188 * responses (i.e. don't get answered by the servfail from 189 * cache, but instead go to recursion to get this TTL0 190 * response). */ 191 msg_del_servfail(env, qinfo, flags); 192 return; 193 } 194 195 /* store msg in the cache */ 196 reply_info_sortref(rep); 197 if(!(e = query_info_entrysetup(qinfo, rep, hash))) { 198 log_err("store_msg: malloc failed"); 199 return; 200 } 201 slabhash_insert(env->msg_cache, hash, &e->entry, rep, env->alloc); 202 } 203 204 /** see if an rrset is expired above the qname, return upper qname. */ 205 static int 206 rrset_expired_above(struct module_env* env, uint8_t** qname, size_t* qnamelen, 207 uint16_t searchtype, uint16_t qclass, time_t now, uint8_t* expiretop, 208 size_t expiretoplen) 209 { 210 struct ub_packed_rrset_key *rrset; 211 uint8_t lablen; 212 213 while(*qnamelen > 0) { 214 /* look one label higher */ 215 lablen = **qname; 216 *qname += lablen + 1; 217 *qnamelen -= lablen + 1; 218 if(*qnamelen <= 0) 219 break; 220 221 /* looks up with a time of 0, to see expired entries */ 222 if((rrset = rrset_cache_lookup(env->rrset_cache, *qname, 223 *qnamelen, searchtype, qclass, 0, 0, 0))) { 224 struct packed_rrset_data* data = 225 (struct packed_rrset_data*)rrset->entry.data; 226 if(now > data->ttl) { 227 /* it is expired, this is not wanted */ 228 lock_rw_unlock(&rrset->entry.lock); 229 log_nametypeclass(VERB_ALGO, "this rrset is expired", *qname, searchtype, qclass); 230 return 1; 231 } 232 /* it is not expired, continue looking */ 233 lock_rw_unlock(&rrset->entry.lock); 234 } 235 236 /* do not look above the expiretop. */ 237 if(expiretop && *qnamelen == expiretoplen && 238 query_dname_compare(*qname, expiretop)==0) 239 break; 240 } 241 return 0; 242 } 243 244 /** find closest NS or DNAME and returns the rrset (locked) */ 245 static struct ub_packed_rrset_key* 246 find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen, 247 uint16_t qclass, time_t now, uint16_t searchtype, int stripfront, 248 int noexpiredabove, uint8_t* expiretop, size_t expiretoplen) 249 { 250 struct ub_packed_rrset_key *rrset; 251 uint8_t lablen; 252 253 if(stripfront) { 254 /* strip off so that DNAMEs have strict subdomain match */ 255 lablen = *qname; 256 qname += lablen + 1; 257 qnamelen -= lablen + 1; 258 } 259 260 /* snip off front part of qname until the type is found */ 261 while(qnamelen > 0) { 262 if((rrset = rrset_cache_lookup(env->rrset_cache, qname, 263 qnamelen, searchtype, qclass, 0, now, 0))) { 264 uint8_t* origqname = qname; 265 size_t origqnamelen = qnamelen; 266 if(!noexpiredabove) 267 return rrset; 268 /* if expiretop set, do not look above it, but 269 * qname is equal, so the just found result is also 270 * the nonexpired above part. */ 271 if(expiretop && qnamelen == expiretoplen && 272 query_dname_compare(qname, expiretop)==0) 273 return rrset; 274 /* check for expiry, but we have to let go of the rrset 275 * for the lock ordering */ 276 lock_rw_unlock(&rrset->entry.lock); 277 /* the expired_above function always takes off one 278 * label (if qnamelen>0) and returns the final qname 279 * where it searched, so we can continue from there 280 * turning the O N*N search into O N. */ 281 if(!rrset_expired_above(env, &qname, &qnamelen, 282 searchtype, qclass, now, expiretop, 283 expiretoplen)) { 284 /* we want to return rrset, but it may be 285 * gone from cache, if so, just loop like 286 * it was not in the cache in the first place. 287 */ 288 if((rrset = rrset_cache_lookup(env-> 289 rrset_cache, origqname, origqnamelen, 290 searchtype, qclass, 0, now, 0))) { 291 return rrset; 292 } 293 } 294 log_nametypeclass(VERB_ALGO, "ignoring rrset because expired rrsets exist above it", origqname, searchtype, qclass); 295 continue; 296 } 297 298 /* snip off front label */ 299 lablen = *qname; 300 qname += lablen + 1; 301 qnamelen -= lablen + 1; 302 } 303 return NULL; 304 } 305 306 /** add addr to additional section */ 307 static void 308 addr_to_additional(struct ub_packed_rrset_key* rrset, struct regional* region, 309 struct dns_msg* msg, time_t now) 310 { 311 if((msg->rep->rrsets[msg->rep->rrset_count] = 312 packed_rrset_copy_region(rrset, region, now))) { 313 msg->rep->ar_numrrsets++; 314 msg->rep->rrset_count++; 315 } 316 } 317 318 /** lookup message in message cache */ 319 struct msgreply_entry* 320 msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen, 321 uint16_t qtype, uint16_t qclass, uint16_t flags, time_t now, int wr) 322 { 323 struct lruhash_entry* e; 324 struct query_info k; 325 hashvalue_type h; 326 327 k.qname = qname; 328 k.qname_len = qnamelen; 329 k.qtype = qtype; 330 k.qclass = qclass; 331 k.local_alias = NULL; 332 h = query_info_hash(&k, flags); 333 e = slabhash_lookup(env->msg_cache, h, &k, wr); 334 335 if(!e) return NULL; 336 if( now > ((struct reply_info*)e->data)->ttl ) { 337 lock_rw_unlock(&e->lock); 338 return NULL; 339 } 340 return (struct msgreply_entry*)e->key; 341 } 342 343 /** find and add A and AAAA records for nameservers in delegpt */ 344 static int 345 find_add_addrs(struct module_env* env, uint16_t qclass, 346 struct regional* region, struct delegpt* dp, time_t now, 347 struct dns_msg** msg) 348 { 349 struct delegpt_ns* ns; 350 struct msgreply_entry* neg; 351 struct ub_packed_rrset_key* akey; 352 for(ns = dp->nslist; ns; ns = ns->next) { 353 akey = rrset_cache_lookup(env->rrset_cache, ns->name, 354 ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); 355 if(akey) { 356 if(!delegpt_add_rrset_A(dp, region, akey, 0, NULL)) { 357 lock_rw_unlock(&akey->entry.lock); 358 return 0; 359 } 360 if(msg) 361 addr_to_additional(akey, region, *msg, now); 362 lock_rw_unlock(&akey->entry.lock); 363 } else { 364 /* BIT_CD on false because delegpt lookup does 365 * not use dns64 translation */ 366 neg = msg_cache_lookup(env, ns->name, ns->namelen, 367 LDNS_RR_TYPE_A, qclass, 0, now, 0); 368 if(neg) { 369 delegpt_add_neg_msg(dp, neg); 370 lock_rw_unlock(&neg->entry.lock); 371 } 372 } 373 akey = rrset_cache_lookup(env->rrset_cache, ns->name, 374 ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); 375 if(akey) { 376 if(!delegpt_add_rrset_AAAA(dp, region, akey, 0, NULL)) { 377 lock_rw_unlock(&akey->entry.lock); 378 return 0; 379 } 380 if(msg) 381 addr_to_additional(akey, region, *msg, now); 382 lock_rw_unlock(&akey->entry.lock); 383 } else { 384 /* BIT_CD on false because delegpt lookup does 385 * not use dns64 translation */ 386 neg = msg_cache_lookup(env, ns->name, ns->namelen, 387 LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); 388 if(neg) { 389 delegpt_add_neg_msg(dp, neg); 390 lock_rw_unlock(&neg->entry.lock); 391 } 392 } 393 } 394 return 1; 395 } 396 397 /** find and add A and AAAA records for missing nameservers in delegpt */ 398 int 399 cache_fill_missing(struct module_env* env, uint16_t qclass, 400 struct regional* region, struct delegpt* dp) 401 { 402 struct delegpt_ns* ns; 403 struct msgreply_entry* neg; 404 struct ub_packed_rrset_key* akey; 405 time_t now = *env->now; 406 for(ns = dp->nslist; ns; ns = ns->next) { 407 akey = rrset_cache_lookup(env->rrset_cache, ns->name, 408 ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0); 409 if(akey) { 410 if(!delegpt_add_rrset_A(dp, region, akey, ns->lame, 411 NULL)) { 412 lock_rw_unlock(&akey->entry.lock); 413 return 0; 414 } 415 log_nametypeclass(VERB_ALGO, "found in cache", 416 ns->name, LDNS_RR_TYPE_A, qclass); 417 lock_rw_unlock(&akey->entry.lock); 418 } else { 419 /* BIT_CD on false because delegpt lookup does 420 * not use dns64 translation */ 421 neg = msg_cache_lookup(env, ns->name, ns->namelen, 422 LDNS_RR_TYPE_A, qclass, 0, now, 0); 423 if(neg) { 424 delegpt_add_neg_msg(dp, neg); 425 lock_rw_unlock(&neg->entry.lock); 426 } 427 } 428 akey = rrset_cache_lookup(env->rrset_cache, ns->name, 429 ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); 430 if(akey) { 431 if(!delegpt_add_rrset_AAAA(dp, region, akey, ns->lame, 432 NULL)) { 433 lock_rw_unlock(&akey->entry.lock); 434 return 0; 435 } 436 log_nametypeclass(VERB_ALGO, "found in cache", 437 ns->name, LDNS_RR_TYPE_AAAA, qclass); 438 lock_rw_unlock(&akey->entry.lock); 439 } else { 440 /* BIT_CD on false because delegpt lookup does 441 * not use dns64 translation */ 442 neg = msg_cache_lookup(env, ns->name, ns->namelen, 443 LDNS_RR_TYPE_AAAA, qclass, 0, now, 0); 444 if(neg) { 445 delegpt_add_neg_msg(dp, neg); 446 lock_rw_unlock(&neg->entry.lock); 447 } 448 } 449 } 450 return 1; 451 } 452 453 /** find and add DS or NSEC to delegation msg */ 454 static void 455 find_add_ds(struct module_env* env, struct regional* region, 456 struct dns_msg* msg, struct delegpt* dp, time_t now) 457 { 458 /* Lookup the DS or NSEC at the delegation point. */ 459 struct ub_packed_rrset_key* rrset = rrset_cache_lookup( 460 env->rrset_cache, dp->name, dp->namelen, LDNS_RR_TYPE_DS, 461 msg->qinfo.qclass, 0, now, 0); 462 if(!rrset) { 463 /* NOTE: this won't work for alternate NSEC schemes 464 * (opt-in, NSEC3) */ 465 rrset = rrset_cache_lookup(env->rrset_cache, dp->name, 466 dp->namelen, LDNS_RR_TYPE_NSEC, msg->qinfo.qclass, 467 0, now, 0); 468 /* Note: the PACKED_RRSET_NSEC_AT_APEX flag is not used. 469 * since this is a referral, we need the NSEC at the parent 470 * side of the zone cut, not the NSEC at apex side. */ 471 if(rrset && nsec_has_type(rrset, LDNS_RR_TYPE_DS)) { 472 lock_rw_unlock(&rrset->entry.lock); 473 rrset = NULL; /* discard wrong NSEC */ 474 } 475 } 476 if(rrset) { 477 /* add it to auth section. This is the second rrset. */ 478 if((msg->rep->rrsets[msg->rep->rrset_count] = 479 packed_rrset_copy_region(rrset, region, now))) { 480 msg->rep->ns_numrrsets++; 481 msg->rep->rrset_count++; 482 } 483 lock_rw_unlock(&rrset->entry.lock); 484 } 485 } 486 487 struct dns_msg* 488 dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype, 489 uint16_t qclass, struct regional* region, size_t capacity) 490 { 491 struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, 492 sizeof(struct dns_msg)); 493 if(!msg) 494 return NULL; 495 msg->qinfo.qname = regional_alloc_init(region, qname, qnamelen); 496 if(!msg->qinfo.qname) 497 return NULL; 498 msg->qinfo.qname_len = qnamelen; 499 msg->qinfo.qtype = qtype; 500 msg->qinfo.qclass = qclass; 501 msg->qinfo.local_alias = NULL; 502 /* non-packed reply_info, because it needs to grow the array */ 503 msg->rep = (struct reply_info*)regional_alloc_zero(region, 504 sizeof(struct reply_info)-sizeof(struct rrset_ref)); 505 if(!msg->rep) 506 return NULL; 507 if(capacity > RR_COUNT_MAX) 508 return NULL; /* integer overflow protection */ 509 msg->rep->flags = BIT_QR; /* with QR, no AA */ 510 msg->rep->qdcount = 1; 511 msg->rep->reason_bogus = LDNS_EDE_NONE; 512 msg->rep->rrsets = (struct ub_packed_rrset_key**) 513 regional_alloc(region, 514 capacity*sizeof(struct ub_packed_rrset_key*)); 515 if(!msg->rep->rrsets) 516 return NULL; 517 return msg; 518 } 519 520 int 521 dns_msg_authadd(struct dns_msg* msg, struct regional* region, 522 struct ub_packed_rrset_key* rrset, time_t now) 523 { 524 if(!(msg->rep->rrsets[msg->rep->rrset_count++] = 525 packed_rrset_copy_region(rrset, region, now))) 526 return 0; 527 msg->rep->ns_numrrsets++; 528 return 1; 529 } 530 531 int 532 dns_msg_ansadd(struct dns_msg* msg, struct regional* region, 533 struct ub_packed_rrset_key* rrset, time_t now) 534 { 535 if(!(msg->rep->rrsets[msg->rep->rrset_count++] = 536 packed_rrset_copy_region(rrset, region, now))) 537 return 0; 538 msg->rep->an_numrrsets++; 539 return 1; 540 } 541 542 struct delegpt* 543 dns_cache_find_delegation(struct module_env* env, uint8_t* qname, 544 size_t qnamelen, uint16_t qtype, uint16_t qclass, 545 struct regional* region, struct dns_msg** msg, time_t now, 546 int noexpiredabove, uint8_t* expiretop, size_t expiretoplen) 547 { 548 /* try to find closest NS rrset */ 549 struct ub_packed_rrset_key* nskey; 550 struct packed_rrset_data* nsdata; 551 struct delegpt* dp; 552 553 nskey = find_closest_of_type(env, qname, qnamelen, qclass, now, 554 LDNS_RR_TYPE_NS, 0, noexpiredabove, expiretop, expiretoplen); 555 if(!nskey) /* hope the caller has hints to prime or something */ 556 return NULL; 557 nsdata = (struct packed_rrset_data*)nskey->entry.data; 558 /* got the NS key, create delegation point */ 559 dp = delegpt_create(region); 560 if(!dp || !delegpt_set_name(dp, region, nskey->rk.dname)) { 561 lock_rw_unlock(&nskey->entry.lock); 562 log_err("find_delegation: out of memory"); 563 return NULL; 564 } 565 /* create referral message */ 566 if(msg) { 567 /* allocate the array to as much as we could need: 568 * NS rrset + DS/NSEC rrset + 569 * A rrset for every NS RR 570 * AAAA rrset for every NS RR 571 */ 572 *msg = dns_msg_create(qname, qnamelen, qtype, qclass, region, 573 2 + nsdata->count*2); 574 if(!*msg || !dns_msg_authadd(*msg, region, nskey, now)) { 575 lock_rw_unlock(&nskey->entry.lock); 576 log_err("find_delegation: out of memory"); 577 return NULL; 578 } 579 } 580 if(!delegpt_rrset_add_ns(dp, region, nskey, 0)) 581 log_err("find_delegation: addns out of memory"); 582 lock_rw_unlock(&nskey->entry.lock); /* first unlock before next lookup*/ 583 /* find and add DS/NSEC (if any) */ 584 if(msg) 585 find_add_ds(env, region, *msg, dp, now); 586 /* find and add A entries */ 587 if(!find_add_addrs(env, qclass, region, dp, now, msg)) 588 log_err("find_delegation: addrs out of memory"); 589 return dp; 590 } 591 592 /** allocate dns_msg from query_info and reply_info */ 593 static struct dns_msg* 594 gen_dns_msg(struct regional* region, struct query_info* q, size_t num) 595 { 596 struct dns_msg* msg = (struct dns_msg*)regional_alloc(region, 597 sizeof(struct dns_msg)); 598 if(!msg) 599 return NULL; 600 memcpy(&msg->qinfo, q, sizeof(struct query_info)); 601 msg->qinfo.qname = regional_alloc_init(region, q->qname, q->qname_len); 602 if(!msg->qinfo.qname) 603 return NULL; 604 /* allocate replyinfo struct and rrset key array separately */ 605 msg->rep = (struct reply_info*)regional_alloc(region, 606 sizeof(struct reply_info) - sizeof(struct rrset_ref)); 607 if(!msg->rep) 608 return NULL; 609 msg->rep->reason_bogus = LDNS_EDE_NONE; 610 if(num > RR_COUNT_MAX) 611 return NULL; /* integer overflow protection */ 612 msg->rep->rrsets = (struct ub_packed_rrset_key**) 613 regional_alloc(region, 614 num * sizeof(struct ub_packed_rrset_key*)); 615 if(!msg->rep->rrsets) 616 return NULL; 617 return msg; 618 } 619 620 struct dns_msg* 621 tomsg(struct module_env* env, struct query_info* q, struct reply_info* r, 622 struct regional* region, time_t now, int allow_expired, 623 struct regional* scratch) 624 { 625 struct dns_msg* msg; 626 size_t i; 627 int is_expired = 0; 628 time_t now_control = now; 629 if(now > r->ttl) { 630 /* Check if we are allowed to serve expired */ 631 if(allow_expired) { 632 if(env->cfg->serve_expired_ttl && 633 r->serve_expired_ttl < now) { 634 return NULL; 635 } 636 } else { 637 return NULL; 638 } 639 /* Change the current time so we can pass the below TTL checks when 640 * serving expired data. */ 641 now_control = r->ttl - env->cfg->serve_expired_reply_ttl; 642 is_expired = 1; 643 } 644 645 msg = gen_dns_msg(region, q, r->rrset_count); 646 if(!msg) return NULL; 647 msg->rep->flags = r->flags; 648 msg->rep->qdcount = r->qdcount; 649 msg->rep->ttl = is_expired 650 ?SERVE_EXPIRED_REPLY_TTL 651 :r->ttl - now; 652 if(r->prefetch_ttl > now) 653 msg->rep->prefetch_ttl = r->prefetch_ttl - now; 654 else 655 msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); 656 msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL; 657 msg->rep->security = r->security; 658 msg->rep->an_numrrsets = r->an_numrrsets; 659 msg->rep->ns_numrrsets = r->ns_numrrsets; 660 msg->rep->ar_numrrsets = r->ar_numrrsets; 661 msg->rep->rrset_count = r->rrset_count; 662 msg->rep->authoritative = r->authoritative; 663 msg->rep->reason_bogus = r->reason_bogus; 664 if(!rrset_array_lock(r->ref, r->rrset_count, now_control)) { 665 return NULL; 666 } 667 if(r->an_numrrsets > 0 && (r->rrsets[0]->rk.type == htons( 668 LDNS_RR_TYPE_CNAME) || r->rrsets[0]->rk.type == htons( 669 LDNS_RR_TYPE_DNAME)) && !reply_check_cname_chain(q, r)) { 670 /* cname chain is now invalid, reconstruct msg */ 671 rrset_array_unlock(r->ref, r->rrset_count); 672 return NULL; 673 } 674 if(r->security == sec_status_secure && !reply_all_rrsets_secure(r)) { 675 /* message rrsets have changed status, revalidate */ 676 rrset_array_unlock(r->ref, r->rrset_count); 677 return NULL; 678 } 679 for(i=0; i<msg->rep->rrset_count; i++) { 680 msg->rep->rrsets[i] = packed_rrset_copy_region(r->rrsets[i], 681 region, now); 682 if(!msg->rep->rrsets[i]) { 683 rrset_array_unlock(r->ref, r->rrset_count); 684 return NULL; 685 } 686 } 687 if(env) 688 rrset_array_unlock_touch(env->rrset_cache, scratch, r->ref, 689 r->rrset_count); 690 else 691 rrset_array_unlock(r->ref, r->rrset_count); 692 return msg; 693 } 694 695 /** synthesize RRset-only response from cached RRset item */ 696 static struct dns_msg* 697 rrset_msg(struct ub_packed_rrset_key* rrset, struct regional* region, 698 time_t now, struct query_info* q) 699 { 700 struct dns_msg* msg; 701 struct packed_rrset_data* d = (struct packed_rrset_data*) 702 rrset->entry.data; 703 if(now > d->ttl) 704 return NULL; 705 msg = gen_dns_msg(region, q, 1); /* only the CNAME (or other) RRset */ 706 if(!msg) 707 return NULL; 708 msg->rep->flags = BIT_QR; /* reply, no AA, no error */ 709 msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */ 710 msg->rep->qdcount = 1; 711 msg->rep->ttl = d->ttl - now; 712 msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); 713 msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL; 714 msg->rep->security = sec_status_unchecked; 715 msg->rep->an_numrrsets = 1; 716 msg->rep->ns_numrrsets = 0; 717 msg->rep->ar_numrrsets = 0; 718 msg->rep->rrset_count = 1; 719 msg->rep->reason_bogus = LDNS_EDE_NONE; 720 msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now); 721 if(!msg->rep->rrsets[0]) /* copy CNAME */ 722 return NULL; 723 return msg; 724 } 725 726 /** synthesize DNAME+CNAME response from cached DNAME item */ 727 static struct dns_msg* 728 synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region, 729 time_t now, struct query_info* q, enum sec_status* sec_status) 730 { 731 struct dns_msg* msg; 732 struct ub_packed_rrset_key* ck; 733 struct packed_rrset_data* newd, *d = (struct packed_rrset_data*) 734 rrset->entry.data; 735 uint8_t* newname, *dtarg = NULL; 736 size_t newlen, dtarglen; 737 if(now > d->ttl) 738 return NULL; 739 /* only allow validated (with DNSSEC) DNAMEs used from cache 740 * for insecure DNAMEs, query again. */ 741 *sec_status = d->security; 742 /* return sec status, so the status of the CNAME can be checked 743 * by the calling routine. */ 744 msg = gen_dns_msg(region, q, 2); /* DNAME + CNAME RRset */ 745 if(!msg) 746 return NULL; 747 msg->rep->flags = BIT_QR; /* reply, no AA, no error */ 748 msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */ 749 msg->rep->qdcount = 1; 750 msg->rep->ttl = d->ttl - now; 751 msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl); 752 msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL; 753 msg->rep->security = sec_status_unchecked; 754 msg->rep->an_numrrsets = 1; 755 msg->rep->ns_numrrsets = 0; 756 msg->rep->ar_numrrsets = 0; 757 msg->rep->rrset_count = 1; 758 msg->rep->reason_bogus = LDNS_EDE_NONE; 759 msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now); 760 if(!msg->rep->rrsets[0]) /* copy DNAME */ 761 return NULL; 762 /* synth CNAME rrset */ 763 get_cname_target(rrset, &dtarg, &dtarglen); 764 if(!dtarg) 765 return NULL; 766 newlen = q->qname_len + dtarglen - rrset->rk.dname_len; 767 if(newlen > LDNS_MAX_DOMAINLEN) { 768 msg->rep->flags |= LDNS_RCODE_YXDOMAIN; 769 return msg; 770 } 771 newname = (uint8_t*)regional_alloc(region, newlen); 772 if(!newname) 773 return NULL; 774 /* new name is concatenation of qname front (without DNAME owner) 775 * and DNAME target name */ 776 memcpy(newname, q->qname, q->qname_len-rrset->rk.dname_len); 777 memmove(newname+(q->qname_len-rrset->rk.dname_len), dtarg, dtarglen); 778 /* create rest of CNAME rrset */ 779 ck = (struct ub_packed_rrset_key*)regional_alloc(region, 780 sizeof(struct ub_packed_rrset_key)); 781 if(!ck) 782 return NULL; 783 memset(&ck->entry, 0, sizeof(ck->entry)); 784 msg->rep->rrsets[1] = ck; 785 ck->entry.key = ck; 786 ck->rk.type = htons(LDNS_RR_TYPE_CNAME); 787 ck->rk.rrset_class = rrset->rk.rrset_class; 788 ck->rk.flags = 0; 789 ck->rk.dname = regional_alloc_init(region, q->qname, q->qname_len); 790 if(!ck->rk.dname) 791 return NULL; 792 ck->rk.dname_len = q->qname_len; 793 ck->entry.hash = rrset_key_hash(&ck->rk); 794 newd = (struct packed_rrset_data*)regional_alloc_zero(region, 795 sizeof(struct packed_rrset_data) + sizeof(size_t) + 796 sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t) 797 + newlen); 798 if(!newd) 799 return NULL; 800 ck->entry.data = newd; 801 newd->ttl = 0; /* 0 for synthesized CNAME TTL */ 802 newd->count = 1; 803 newd->rrsig_count = 0; 804 newd->trust = rrset_trust_ans_noAA; 805 newd->rr_len = (size_t*)((uint8_t*)newd + 806 sizeof(struct packed_rrset_data)); 807 newd->rr_len[0] = newlen + sizeof(uint16_t); 808 packed_rrset_ptr_fixup(newd); 809 newd->rr_ttl[0] = newd->ttl; 810 msg->rep->ttl = newd->ttl; 811 msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(newd->ttl); 812 msg->rep->serve_expired_ttl = newd->ttl + SERVE_EXPIRED_TTL; 813 sldns_write_uint16(newd->rr_data[0], newlen); 814 memmove(newd->rr_data[0] + sizeof(uint16_t), newname, newlen); 815 msg->rep->an_numrrsets ++; 816 msg->rep->rrset_count ++; 817 return msg; 818 } 819 820 /** Fill TYPE_ANY response with some data from cache */ 821 static struct dns_msg* 822 fill_any(struct module_env* env, 823 uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, 824 struct regional* region) 825 { 826 time_t now = *env->now; 827 struct dns_msg* msg = NULL; 828 uint16_t lookup[] = {LDNS_RR_TYPE_A, LDNS_RR_TYPE_AAAA, 829 LDNS_RR_TYPE_MX, LDNS_RR_TYPE_SOA, LDNS_RR_TYPE_NS, 830 LDNS_RR_TYPE_DNAME, 0}; 831 int i, num=6; /* number of RR types to look up */ 832 log_assert(lookup[num] == 0); 833 834 if(env->cfg->deny_any) { 835 /* return empty message */ 836 msg = dns_msg_create(qname, qnamelen, qtype, qclass, 837 region, 0); 838 if(!msg) { 839 return NULL; 840 } 841 /* set NOTIMPL for RFC 8482 */ 842 msg->rep->flags |= LDNS_RCODE_NOTIMPL; 843 msg->rep->security = sec_status_indeterminate; 844 return msg; 845 } 846 847 for(i=0; i<num; i++) { 848 /* look up this RR for inclusion in type ANY response */ 849 struct ub_packed_rrset_key* rrset = rrset_cache_lookup( 850 env->rrset_cache, qname, qnamelen, lookup[i], 851 qclass, 0, now, 0); 852 struct packed_rrset_data *d; 853 if(!rrset) 854 continue; 855 856 /* only if rrset from answer section */ 857 d = (struct packed_rrset_data*)rrset->entry.data; 858 if(d->trust == rrset_trust_add_noAA || 859 d->trust == rrset_trust_auth_noAA || 860 d->trust == rrset_trust_add_AA || 861 d->trust == rrset_trust_auth_AA) { 862 lock_rw_unlock(&rrset->entry.lock); 863 continue; 864 } 865 866 /* create msg if none */ 867 if(!msg) { 868 msg = dns_msg_create(qname, qnamelen, qtype, qclass, 869 region, (size_t)(num-i)); 870 if(!msg) { 871 lock_rw_unlock(&rrset->entry.lock); 872 return NULL; 873 } 874 } 875 876 /* add RRset to response */ 877 if(!dns_msg_ansadd(msg, region, rrset, now)) { 878 lock_rw_unlock(&rrset->entry.lock); 879 return NULL; 880 } 881 lock_rw_unlock(&rrset->entry.lock); 882 } 883 return msg; 884 } 885 886 struct dns_msg* 887 dns_cache_lookup(struct module_env* env, 888 uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass, 889 uint16_t flags, struct regional* region, struct regional* scratch, 890 int no_partial, uint8_t* dpname, size_t dpnamelen) 891 { 892 struct lruhash_entry* e; 893 struct query_info k; 894 hashvalue_type h; 895 time_t now = *env->now; 896 struct ub_packed_rrset_key* rrset; 897 898 /* lookup first, this has both NXdomains and ANSWER responses */ 899 k.qname = qname; 900 k.qname_len = qnamelen; 901 k.qtype = qtype; 902 k.qclass = qclass; 903 k.local_alias = NULL; 904 h = query_info_hash(&k, flags); 905 e = slabhash_lookup(env->msg_cache, h, &k, 0); 906 if(e) { 907 struct msgreply_entry* key = (struct msgreply_entry*)e->key; 908 struct reply_info* data = (struct reply_info*)e->data; 909 struct dns_msg* msg = tomsg(env, &key->key, data, region, now, 0, 910 scratch); 911 if(msg) { 912 lock_rw_unlock(&e->lock); 913 return msg; 914 } 915 /* could be msg==NULL; due to TTL or not all rrsets available */ 916 lock_rw_unlock(&e->lock); 917 } 918 919 /* see if a DNAME exists. Checked for first, to enforce that DNAMEs 920 * are more important, the CNAME is resynthesized and thus 921 * consistent with the DNAME */ 922 if(!no_partial && 923 (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now, 924 LDNS_RR_TYPE_DNAME, 1, 0, NULL, 0))) { 925 /* synthesize a DNAME+CNAME message based on this */ 926 enum sec_status sec_status = sec_status_unchecked; 927 struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k, 928 &sec_status); 929 if(msg) { 930 struct ub_packed_rrset_key* cname_rrset; 931 lock_rw_unlock(&rrset->entry.lock); 932 /* now, after unlocking the DNAME rrset lock, 933 * check the sec_status, and see if we need to look 934 * up the CNAME record associated before it can 935 * be used */ 936 /* normally, only secure DNAMEs allowed from cache*/ 937 if(sec_status == sec_status_secure) 938 return msg; 939 /* but if we have a CNAME cached with this name, then we 940 * have previously already allowed this name to pass. 941 * the next cache lookup is going to fetch that CNAME itself, 942 * but it is better to have the (unsigned)DNAME + CNAME in 943 * that case */ 944 cname_rrset = rrset_cache_lookup( 945 env->rrset_cache, qname, qnamelen, 946 LDNS_RR_TYPE_CNAME, qclass, 0, now, 0); 947 if(cname_rrset) { 948 /* CNAME already synthesized by 949 * synth_dname_msg routine, so we can 950 * straight up return the msg */ 951 lock_rw_unlock(&cname_rrset->entry.lock); 952 return msg; 953 } 954 } else { 955 lock_rw_unlock(&rrset->entry.lock); 956 } 957 } 958 959 /* see if we have CNAME for this domain, 960 * but not for DS records (which are part of the parent) */ 961 if(!no_partial && qtype != LDNS_RR_TYPE_DS && 962 (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, 963 LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) { 964 uint8_t* wc = NULL; 965 size_t wl; 966 /* if the rrset is not a wildcard expansion, with wcname */ 967 /* because, if we return that CNAME rrset on its own, it is 968 * missing the NSEC or NSEC3 proof */ 969 if(!(val_rrset_wildcard(rrset, &wc, &wl) && wc != NULL)) { 970 struct dns_msg* msg = rrset_msg(rrset, region, now, &k); 971 if(msg) { 972 lock_rw_unlock(&rrset->entry.lock); 973 return msg; 974 } 975 } 976 lock_rw_unlock(&rrset->entry.lock); 977 } 978 979 /* construct DS, DNSKEY messages from rrset cache. */ 980 if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY) && 981 (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen, 982 qtype, qclass, 0, now, 0))) { 983 /* if the rrset is from the additional section, and the 984 * signatures have fallen off, then do not synthesize a msg 985 * instead, allow a full query for signed results to happen. 986 * Forego all rrset data from additional section, because 987 * some signatures may not be present and cause validation 988 * failure. 989 */ 990 struct packed_rrset_data *d = (struct packed_rrset_data*) 991 rrset->entry.data; 992 if(d->trust != rrset_trust_add_noAA && 993 d->trust != rrset_trust_add_AA && 994 (qtype == LDNS_RR_TYPE_DS || 995 (d->trust != rrset_trust_auth_noAA 996 && d->trust != rrset_trust_auth_AA) )) { 997 struct dns_msg* msg = rrset_msg(rrset, region, now, &k); 998 if(msg) { 999 lock_rw_unlock(&rrset->entry.lock); 1000 return msg; 1001 } 1002 } 1003 lock_rw_unlock(&rrset->entry.lock); 1004 } 1005 1006 /* stop downwards cache search on NXDOMAIN. 1007 * Empty nonterminals are NOERROR, so an NXDOMAIN for foo 1008 * means bla.foo also does not exist. The DNSSEC proofs are 1009 * the same. We search upwards for NXDOMAINs. */ 1010 if(env->cfg->harden_below_nxdomain) { 1011 while(!dname_is_root(k.qname)) { 1012 if(dpname && dpnamelen 1013 && !dname_subdomain_c(k.qname, dpname)) 1014 break; /* no synth nxdomain above the stub */ 1015 dname_remove_label(&k.qname, &k.qname_len); 1016 h = query_info_hash(&k, flags); 1017 e = slabhash_lookup(env->msg_cache, h, &k, 0); 1018 if(!e && k.qtype != LDNS_RR_TYPE_A && 1019 env->cfg->qname_minimisation) { 1020 k.qtype = LDNS_RR_TYPE_A; 1021 h = query_info_hash(&k, flags); 1022 e = slabhash_lookup(env->msg_cache, h, &k, 0); 1023 } 1024 if(e) { 1025 struct reply_info* data = (struct reply_info*)e->data; 1026 struct dns_msg* msg; 1027 if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN 1028 && data->security == sec_status_secure 1029 && (data->an_numrrsets == 0 || 1030 ntohs(data->rrsets[0]->rk.type) != LDNS_RR_TYPE_CNAME) 1031 && (msg=tomsg(env, &k, data, region, now, 0, scratch))) { 1032 lock_rw_unlock(&e->lock); 1033 msg->qinfo.qname=qname; 1034 msg->qinfo.qname_len=qnamelen; 1035 /* check that DNSSEC really works out */ 1036 msg->rep->security = sec_status_unchecked; 1037 iter_scrub_nxdomain(msg); 1038 return msg; 1039 } 1040 lock_rw_unlock(&e->lock); 1041 } 1042 k.qtype = qtype; 1043 } 1044 } 1045 1046 /* fill common RR types for ANY response to avoid requery */ 1047 if(qtype == LDNS_RR_TYPE_ANY) { 1048 return fill_any(env, qname, qnamelen, qtype, qclass, region); 1049 } 1050 1051 return NULL; 1052 } 1053 1054 int 1055 dns_cache_store(struct module_env* env, struct query_info* msgqinf, 1056 struct reply_info* msgrep, int is_referral, time_t leeway, int pside, 1057 struct regional* region, uint32_t flags, time_t qstarttime) 1058 { 1059 struct reply_info* rep = NULL; 1060 /* alloc, malloc properly (not in region, like msg is) */ 1061 rep = reply_info_copy(msgrep, env->alloc, NULL); 1062 if(!rep) 1063 return 0; 1064 /* ttl must be relative ;i.e. 0..86400 not time(0)+86400. 1065 * the env->now is added to message and RRsets in this routine. */ 1066 /* the leeway is used to invalidate other rrsets earlier */ 1067 1068 if(is_referral) { 1069 /* store rrsets */ 1070 struct rrset_ref ref; 1071 size_t i; 1072 for(i=0; i<rep->rrset_count; i++) { 1073 packed_rrset_ttl_add((struct packed_rrset_data*) 1074 rep->rrsets[i]->entry.data, *env->now); 1075 ref.key = rep->rrsets[i]; 1076 ref.id = rep->rrsets[i]->id; 1077 /*ignore ret: it was in the cache, ref updated */ 1078 /* no leeway for typeNS */ 1079 (void)rrset_cache_update(env->rrset_cache, &ref, 1080 env->alloc, 1081 ((ntohs(ref.key->rk.type)==LDNS_RR_TYPE_NS 1082 && !pside) ? qstarttime:*env->now + leeway)); 1083 } 1084 free(rep); 1085 return 1; 1086 } else { 1087 /* store msg, and rrsets */ 1088 struct query_info qinf; 1089 hashvalue_type h; 1090 1091 qinf = *msgqinf; 1092 qinf.qname = memdup(msgqinf->qname, msgqinf->qname_len); 1093 if(!qinf.qname) { 1094 reply_info_parsedelete(rep, env->alloc); 1095 return 0; 1096 } 1097 /* fixup flags to be sensible for a reply based on the cache */ 1098 /* this module means that RA is available. It is an answer QR. 1099 * Not AA from cache. Not CD in cache (depends on client bit). */ 1100 rep->flags |= (BIT_RA | BIT_QR); 1101 rep->flags &= ~(BIT_AA | BIT_CD); 1102 h = query_info_hash(&qinf, (uint16_t)flags); 1103 dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep, 1104 flags, region, qstarttime); 1105 /* qname is used inside query_info_entrysetup, and set to 1106 * NULL. If it has not been used, free it. free(0) is safe. */ 1107 free(qinf.qname); 1108 } 1109 return 1; 1110 } 1111 1112 int 1113 dns_cache_prefetch_adjust(struct module_env* env, struct query_info* qinfo, 1114 time_t adjust, uint16_t flags) 1115 { 1116 struct msgreply_entry* msg; 1117 msg = msg_cache_lookup(env, qinfo->qname, qinfo->qname_len, 1118 qinfo->qtype, qinfo->qclass, flags, *env->now, 1); 1119 if(msg) { 1120 struct reply_info* rep = (struct reply_info*)msg->entry.data; 1121 if(rep) { 1122 rep->prefetch_ttl += adjust; 1123 lock_rw_unlock(&msg->entry.lock); 1124 return 1; 1125 } 1126 lock_rw_unlock(&msg->entry.lock); 1127 } 1128 return 0; 1129 } 1130