xref: /freebsd/contrib/unbound/services/cache/dns.c (revision a25896ca1270e25b657ceaa8d47d5699515f5c25)
1 /*
2  * services/cache/dns.c - Cache services for DNS using msg and rrset caches.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains the DNS cache.
40  */
41 #include "config.h"
42 #include "iterator/iter_delegpt.h"
43 #include "validator/val_nsec.h"
44 #include "validator/val_utils.h"
45 #include "services/cache/dns.h"
46 #include "services/cache/rrset.h"
47 #include "util/data/msgreply.h"
48 #include "util/data/packed_rrset.h"
49 #include "util/data/dname.h"
50 #include "util/module.h"
51 #include "util/net_help.h"
52 #include "util/regional.h"
53 #include "util/config_file.h"
54 #include "sldns/sbuffer.h"
55 
56 /** store rrsets in the rrset cache.
57  * @param env: module environment with caches.
58  * @param rep: contains list of rrsets to store.
59  * @param now: current time.
60  * @param leeway: during prefetch how much leeway to update TTLs.
61  * 	This makes rrsets (other than type NS) timeout sooner so they get
62  * 	updated with a new full TTL.
63  * 	Type NS does not get this, because it must not be refreshed from the
64  * 	child domain, but keep counting down properly.
65  * @param pside: if from parentside discovered NS, so that its NS is okay
66  * 	in a prefetch situation to be updated (without becoming sticky).
67  * @param qrep: update rrsets here if cache is better
68  * @param region: for qrep allocs.
69  */
70 static void
71 store_rrsets(struct module_env* env, struct reply_info* rep, time_t now,
72 	time_t leeway, int pside, struct reply_info* qrep,
73 	struct regional* region)
74 {
75         size_t i;
76         /* see if rrset already exists in cache, if not insert it. */
77         for(i=0; i<rep->rrset_count; i++) {
78                 rep->ref[i].key = rep->rrsets[i];
79                 rep->ref[i].id = rep->rrsets[i]->id;
80 		/* update ref if it was in the cache */
81 		switch(rrset_cache_update(env->rrset_cache, &rep->ref[i],
82                         env->alloc, now + ((ntohs(rep->ref[i].key->rk.type)==
83 			LDNS_RR_TYPE_NS && !pside)?0:leeway))) {
84 		case 0: /* ref unchanged, item inserted */
85 			break;
86 		case 2: /* ref updated, cache is superior */
87 			if(region) {
88 				struct ub_packed_rrset_key* ck;
89 				lock_rw_rdlock(&rep->ref[i].key->entry.lock);
90 				/* if deleted rrset, do not copy it */
91 				if(rep->ref[i].key->id == 0)
92 					ck = NULL;
93 				else 	ck = packed_rrset_copy_region(
94 					rep->ref[i].key, region, now);
95 				lock_rw_unlock(&rep->ref[i].key->entry.lock);
96 				if(ck) {
97 					/* use cached copy if memory allows */
98 					qrep->rrsets[i] = ck;
99 				}
100 			}
101 			/* no break: also copy key item */
102 			/* the line below is matched by gcc regex and silences
103 			 * the fallthrough warning */
104 			/* fallthrough */
105 		case 1: /* ref updated, item inserted */
106                         rep->rrsets[i] = rep->ref[i].key;
107 		}
108         }
109 }
110 
111 /** delete message from message cache */
112 void
113 msg_cache_remove(struct module_env* env, uint8_t* qname, size_t qnamelen,
114 	uint16_t qtype, uint16_t qclass, uint16_t flags)
115 {
116 	struct query_info k;
117 	hashvalue_type h;
118 
119 	k.qname = qname;
120 	k.qname_len = qnamelen;
121 	k.qtype = qtype;
122 	k.qclass = qclass;
123 	k.local_alias = NULL;
124 	h = query_info_hash(&k, flags);
125 	slabhash_remove(env->msg_cache, h, &k);
126 }
127 
128 /** remove servfail msg cache entry */
129 static void
130 msg_del_servfail(struct module_env* env, struct query_info* qinfo,
131 	uint32_t flags)
132 {
133 	struct msgreply_entry* e;
134 	/* see if the entry is servfail, and then remove it, so that
135 	 * lookups move from the cacheresponse stage to the recursionresponse
136 	 * stage */
137 	e = msg_cache_lookup(env, qinfo->qname, qinfo->qname_len,
138 		qinfo->qtype, qinfo->qclass, flags, 0, 0);
139 	if(!e) return;
140 	/* we don't check for the ttl here, also expired servfail entries
141 	 * are removed.  If the user uses serve-expired, they would still be
142 	 * used to answer from cache */
143 	if(FLAGS_GET_RCODE(((struct reply_info*)e->entry.data)->flags)
144 		!= LDNS_RCODE_SERVFAIL) {
145 		lock_rw_unlock(&e->entry.lock);
146 		return;
147 	}
148 	lock_rw_unlock(&e->entry.lock);
149 	msg_cache_remove(env, qinfo->qname, qinfo->qname_len, qinfo->qtype,
150 		qinfo->qclass, flags);
151 }
152 
153 void
154 dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
155 	hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside,
156 	struct reply_info* qrep, uint32_t flags, struct regional* region)
157 {
158 	struct msgreply_entry* e;
159 	time_t ttl = rep->ttl;
160 	size_t i;
161 
162 	/* store RRsets */
163         for(i=0; i<rep->rrset_count; i++) {
164 		rep->ref[i].key = rep->rrsets[i];
165 		rep->ref[i].id = rep->rrsets[i]->id;
166 	}
167 
168 	/* there was a reply_info_sortref(rep) here but it seems to be
169 	 * unnecessary, because the cache gets locked per rrset. */
170 	reply_info_set_ttls(rep, *env->now);
171 	store_rrsets(env, rep, *env->now, leeway, pside, qrep, region);
172 	if(ttl == 0 && !(flags & DNSCACHE_STORE_ZEROTTL)) {
173 		/* we do not store the message, but we did store the RRs,
174 		 * which could be useful for delegation information */
175 		verbose(VERB_ALGO, "TTL 0: dropped msg from cache");
176 		free(rep);
177 		/* if the message is SERVFAIL in cache, remove that SERVFAIL,
178 		 * so that the TTL 0 response can be returned for future
179 		 * responses (i.e. don't get answered by the servfail from
180 		 * cache, but instead go to recursion to get this TTL0
181 		 * response). */
182 		msg_del_servfail(env, qinfo, flags);
183 		return;
184 	}
185 
186 	/* store msg in the cache */
187 	reply_info_sortref(rep);
188 	if(!(e = query_info_entrysetup(qinfo, rep, hash))) {
189 		log_err("store_msg: malloc failed");
190 		return;
191 	}
192 	slabhash_insert(env->msg_cache, hash, &e->entry, rep, env->alloc);
193 }
194 
195 /** find closest NS or DNAME and returns the rrset (locked) */
196 static struct ub_packed_rrset_key*
197 find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen,
198 	uint16_t qclass, time_t now, uint16_t searchtype, int stripfront)
199 {
200 	struct ub_packed_rrset_key *rrset;
201 	uint8_t lablen;
202 
203 	if(stripfront) {
204 		/* strip off so that DNAMEs have strict subdomain match */
205 		lablen = *qname;
206 		qname += lablen + 1;
207 		qnamelen -= lablen + 1;
208 	}
209 
210 	/* snip off front part of qname until the type is found */
211 	while(qnamelen > 0) {
212 		if((rrset = rrset_cache_lookup(env->rrset_cache, qname,
213 			qnamelen, searchtype, qclass, 0, now, 0)))
214 			return rrset;
215 
216 		/* snip off front label */
217 		lablen = *qname;
218 		qname += lablen + 1;
219 		qnamelen -= lablen + 1;
220 	}
221 	return NULL;
222 }
223 
224 /** add addr to additional section */
225 static void
226 addr_to_additional(struct ub_packed_rrset_key* rrset, struct regional* region,
227 	struct dns_msg* msg, time_t now)
228 {
229 	if((msg->rep->rrsets[msg->rep->rrset_count] =
230 		packed_rrset_copy_region(rrset, region, now))) {
231 		msg->rep->ar_numrrsets++;
232 		msg->rep->rrset_count++;
233 	}
234 }
235 
236 /** lookup message in message cache */
237 struct msgreply_entry*
238 msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen,
239 	uint16_t qtype, uint16_t qclass, uint16_t flags, time_t now, int wr)
240 {
241 	struct lruhash_entry* e;
242 	struct query_info k;
243 	hashvalue_type h;
244 
245 	k.qname = qname;
246 	k.qname_len = qnamelen;
247 	k.qtype = qtype;
248 	k.qclass = qclass;
249 	k.local_alias = NULL;
250 	h = query_info_hash(&k, flags);
251 	e = slabhash_lookup(env->msg_cache, h, &k, wr);
252 
253 	if(!e) return NULL;
254 	if( now > ((struct reply_info*)e->data)->ttl ) {
255 		lock_rw_unlock(&e->lock);
256 		return NULL;
257 	}
258 	return (struct msgreply_entry*)e->key;
259 }
260 
261 /** find and add A and AAAA records for nameservers in delegpt */
262 static int
263 find_add_addrs(struct module_env* env, uint16_t qclass,
264 	struct regional* region, struct delegpt* dp, time_t now,
265 	struct dns_msg** msg)
266 {
267 	struct delegpt_ns* ns;
268 	struct msgreply_entry* neg;
269 	struct ub_packed_rrset_key* akey;
270 	for(ns = dp->nslist; ns; ns = ns->next) {
271 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
272 			ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
273 		if(akey) {
274 			if(!delegpt_add_rrset_A(dp, region, akey, 0)) {
275 				lock_rw_unlock(&akey->entry.lock);
276 				return 0;
277 			}
278 			if(msg)
279 				addr_to_additional(akey, region, *msg, now);
280 			lock_rw_unlock(&akey->entry.lock);
281 		} else {
282 			/* BIT_CD on false because delegpt lookup does
283 			 * not use dns64 translation */
284 			neg = msg_cache_lookup(env, ns->name, ns->namelen,
285 				LDNS_RR_TYPE_A, qclass, 0, now, 0);
286 			if(neg) {
287 				delegpt_add_neg_msg(dp, neg);
288 				lock_rw_unlock(&neg->entry.lock);
289 			}
290 		}
291 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
292 			ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
293 		if(akey) {
294 			if(!delegpt_add_rrset_AAAA(dp, region, akey, 0)) {
295 				lock_rw_unlock(&akey->entry.lock);
296 				return 0;
297 			}
298 			if(msg)
299 				addr_to_additional(akey, region, *msg, now);
300 			lock_rw_unlock(&akey->entry.lock);
301 		} else {
302 			/* BIT_CD on false because delegpt lookup does
303 			 * not use dns64 translation */
304 			neg = msg_cache_lookup(env, ns->name, ns->namelen,
305 				LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
306 			if(neg) {
307 				delegpt_add_neg_msg(dp, neg);
308 				lock_rw_unlock(&neg->entry.lock);
309 			}
310 		}
311 	}
312 	return 1;
313 }
314 
315 /** find and add A and AAAA records for missing nameservers in delegpt */
316 int
317 cache_fill_missing(struct module_env* env, uint16_t qclass,
318 	struct regional* region, struct delegpt* dp)
319 {
320 	struct delegpt_ns* ns;
321 	struct msgreply_entry* neg;
322 	struct ub_packed_rrset_key* akey;
323 	time_t now = *env->now;
324 	for(ns = dp->nslist; ns; ns = ns->next) {
325 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
326 			ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
327 		if(akey) {
328 			if(!delegpt_add_rrset_A(dp, region, akey, ns->lame)) {
329 				lock_rw_unlock(&akey->entry.lock);
330 				return 0;
331 			}
332 			log_nametypeclass(VERB_ALGO, "found in cache",
333 				ns->name, LDNS_RR_TYPE_A, qclass);
334 			lock_rw_unlock(&akey->entry.lock);
335 		} else {
336 			/* BIT_CD on false because delegpt lookup does
337 			 * not use dns64 translation */
338 			neg = msg_cache_lookup(env, ns->name, ns->namelen,
339 				LDNS_RR_TYPE_A, qclass, 0, now, 0);
340 			if(neg) {
341 				delegpt_add_neg_msg(dp, neg);
342 				lock_rw_unlock(&neg->entry.lock);
343 			}
344 		}
345 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
346 			ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
347 		if(akey) {
348 			if(!delegpt_add_rrset_AAAA(dp, region, akey, ns->lame)) {
349 				lock_rw_unlock(&akey->entry.lock);
350 				return 0;
351 			}
352 			log_nametypeclass(VERB_ALGO, "found in cache",
353 				ns->name, LDNS_RR_TYPE_AAAA, qclass);
354 			lock_rw_unlock(&akey->entry.lock);
355 		} else {
356 			/* BIT_CD on false because delegpt lookup does
357 			 * not use dns64 translation */
358 			neg = msg_cache_lookup(env, ns->name, ns->namelen,
359 				LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
360 			if(neg) {
361 				delegpt_add_neg_msg(dp, neg);
362 				lock_rw_unlock(&neg->entry.lock);
363 			}
364 		}
365 	}
366 	return 1;
367 }
368 
369 /** find and add DS or NSEC to delegation msg */
370 static void
371 find_add_ds(struct module_env* env, struct regional* region,
372 	struct dns_msg* msg, struct delegpt* dp, time_t now)
373 {
374 	/* Lookup the DS or NSEC at the delegation point. */
375 	struct ub_packed_rrset_key* rrset = rrset_cache_lookup(
376 		env->rrset_cache, dp->name, dp->namelen, LDNS_RR_TYPE_DS,
377 		msg->qinfo.qclass, 0, now, 0);
378 	if(!rrset) {
379 		/* NOTE: this won't work for alternate NSEC schemes
380 		 *	(opt-in, NSEC3) */
381 		rrset = rrset_cache_lookup(env->rrset_cache, dp->name,
382 			dp->namelen, LDNS_RR_TYPE_NSEC, msg->qinfo.qclass,
383 			0, now, 0);
384 		/* Note: the PACKED_RRSET_NSEC_AT_APEX flag is not used.
385 		 * since this is a referral, we need the NSEC at the parent
386 		 * side of the zone cut, not the NSEC at apex side. */
387 		if(rrset && nsec_has_type(rrset, LDNS_RR_TYPE_DS)) {
388 			lock_rw_unlock(&rrset->entry.lock);
389 			rrset = NULL; /* discard wrong NSEC */
390 		}
391 	}
392 	if(rrset) {
393 		/* add it to auth section. This is the second rrset. */
394 		if((msg->rep->rrsets[msg->rep->rrset_count] =
395 			packed_rrset_copy_region(rrset, region, now))) {
396 			msg->rep->ns_numrrsets++;
397 			msg->rep->rrset_count++;
398 		}
399 		lock_rw_unlock(&rrset->entry.lock);
400 	}
401 }
402 
403 struct dns_msg*
404 dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype,
405 	uint16_t qclass, struct regional* region, size_t capacity)
406 {
407 	struct dns_msg* msg = (struct dns_msg*)regional_alloc(region,
408 		sizeof(struct dns_msg));
409 	if(!msg)
410 		return NULL;
411 	msg->qinfo.qname = regional_alloc_init(region, qname, qnamelen);
412 	if(!msg->qinfo.qname)
413 		return NULL;
414 	msg->qinfo.qname_len = qnamelen;
415 	msg->qinfo.qtype = qtype;
416 	msg->qinfo.qclass = qclass;
417 	msg->qinfo.local_alias = NULL;
418 	/* non-packed reply_info, because it needs to grow the array */
419 	msg->rep = (struct reply_info*)regional_alloc_zero(region,
420 		sizeof(struct reply_info)-sizeof(struct rrset_ref));
421 	if(!msg->rep)
422 		return NULL;
423 	if(capacity > RR_COUNT_MAX)
424 		return NULL; /* integer overflow protection */
425 	msg->rep->flags = BIT_QR; /* with QR, no AA */
426 	msg->rep->qdcount = 1;
427 	msg->rep->rrsets = (struct ub_packed_rrset_key**)
428 		regional_alloc(region,
429 		capacity*sizeof(struct ub_packed_rrset_key*));
430 	if(!msg->rep->rrsets)
431 		return NULL;
432 	return msg;
433 }
434 
435 int
436 dns_msg_authadd(struct dns_msg* msg, struct regional* region,
437 	struct ub_packed_rrset_key* rrset, time_t now)
438 {
439 	if(!(msg->rep->rrsets[msg->rep->rrset_count++] =
440 		packed_rrset_copy_region(rrset, region, now)))
441 		return 0;
442 	msg->rep->ns_numrrsets++;
443 	return 1;
444 }
445 
446 int
447 dns_msg_ansadd(struct dns_msg* msg, struct regional* region,
448 	struct ub_packed_rrset_key* rrset, time_t now)
449 {
450 	if(!(msg->rep->rrsets[msg->rep->rrset_count++] =
451 		packed_rrset_copy_region(rrset, region, now)))
452 		return 0;
453 	msg->rep->an_numrrsets++;
454 	return 1;
455 }
456 
457 struct delegpt*
458 dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
459 	size_t qnamelen, uint16_t qtype, uint16_t qclass,
460 	struct regional* region, struct dns_msg** msg, time_t now)
461 {
462 	/* try to find closest NS rrset */
463 	struct ub_packed_rrset_key* nskey;
464 	struct packed_rrset_data* nsdata;
465 	struct delegpt* dp;
466 
467 	nskey = find_closest_of_type(env, qname, qnamelen, qclass, now,
468 		LDNS_RR_TYPE_NS, 0);
469 	if(!nskey) /* hope the caller has hints to prime or something */
470 		return NULL;
471 	nsdata = (struct packed_rrset_data*)nskey->entry.data;
472 	/* got the NS key, create delegation point */
473 	dp = delegpt_create(region);
474 	if(!dp || !delegpt_set_name(dp, region, nskey->rk.dname)) {
475 		lock_rw_unlock(&nskey->entry.lock);
476 		log_err("find_delegation: out of memory");
477 		return NULL;
478 	}
479 	/* create referral message */
480 	if(msg) {
481 		/* allocate the array to as much as we could need:
482 		 *	NS rrset + DS/NSEC rrset +
483 		 *	A rrset for every NS RR
484 		 *	AAAA rrset for every NS RR
485 		 */
486 		*msg = dns_msg_create(qname, qnamelen, qtype, qclass, region,
487 			2 + nsdata->count*2);
488 		if(!*msg || !dns_msg_authadd(*msg, region, nskey, now)) {
489 			lock_rw_unlock(&nskey->entry.lock);
490 			log_err("find_delegation: out of memory");
491 			return NULL;
492 		}
493 	}
494 	if(!delegpt_rrset_add_ns(dp, region, nskey, 0))
495 		log_err("find_delegation: addns out of memory");
496 	lock_rw_unlock(&nskey->entry.lock); /* first unlock before next lookup*/
497 	/* find and add DS/NSEC (if any) */
498 	if(msg)
499 		find_add_ds(env, region, *msg, dp, now);
500 	/* find and add A entries */
501 	if(!find_add_addrs(env, qclass, region, dp, now, msg))
502 		log_err("find_delegation: addrs out of memory");
503 	return dp;
504 }
505 
506 /** allocate dns_msg from query_info and reply_info */
507 static struct dns_msg*
508 gen_dns_msg(struct regional* region, struct query_info* q, size_t num)
509 {
510 	struct dns_msg* msg = (struct dns_msg*)regional_alloc(region,
511 		sizeof(struct dns_msg));
512 	if(!msg)
513 		return NULL;
514 	memcpy(&msg->qinfo, q, sizeof(struct query_info));
515 	msg->qinfo.qname = regional_alloc_init(region, q->qname, q->qname_len);
516 	if(!msg->qinfo.qname)
517 		return NULL;
518 	/* allocate replyinfo struct and rrset key array separately */
519 	msg->rep = (struct reply_info*)regional_alloc(region,
520 		sizeof(struct reply_info) - sizeof(struct rrset_ref));
521 	if(!msg->rep)
522 		return NULL;
523 	if(num > RR_COUNT_MAX)
524 		return NULL; /* integer overflow protection */
525 	msg->rep->rrsets = (struct ub_packed_rrset_key**)
526 		regional_alloc(region,
527 		num * sizeof(struct ub_packed_rrset_key*));
528 	if(!msg->rep->rrsets)
529 		return NULL;
530 	return msg;
531 }
532 
533 struct dns_msg*
534 tomsg(struct module_env* env, struct query_info* q, struct reply_info* r,
535 	struct regional* region, time_t now, struct regional* scratch)
536 {
537 	struct dns_msg* msg;
538 	size_t i;
539 	if(now > r->ttl)
540 		return NULL;
541 	msg = gen_dns_msg(region, q, r->rrset_count);
542 	if(!msg)
543 		return NULL;
544 	msg->rep->flags = r->flags;
545 	msg->rep->qdcount = r->qdcount;
546 	msg->rep->ttl = r->ttl - now;
547 	if(r->prefetch_ttl > now)
548 		msg->rep->prefetch_ttl = r->prefetch_ttl - now;
549 	else	msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl);
550 	msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL;
551 	msg->rep->security = r->security;
552 	msg->rep->an_numrrsets = r->an_numrrsets;
553 	msg->rep->ns_numrrsets = r->ns_numrrsets;
554 	msg->rep->ar_numrrsets = r->ar_numrrsets;
555 	msg->rep->rrset_count = r->rrset_count;
556         msg->rep->authoritative = r->authoritative;
557 	if(!rrset_array_lock(r->ref, r->rrset_count, now))
558 		return NULL;
559 	if(r->an_numrrsets > 0 && (r->rrsets[0]->rk.type == htons(
560 		LDNS_RR_TYPE_CNAME) || r->rrsets[0]->rk.type == htons(
561 		LDNS_RR_TYPE_DNAME)) && !reply_check_cname_chain(q, r)) {
562 		/* cname chain is now invalid, reconstruct msg */
563 		rrset_array_unlock(r->ref, r->rrset_count);
564 		return NULL;
565 	}
566 	if(r->security == sec_status_secure && !reply_all_rrsets_secure(r)) {
567 		/* message rrsets have changed status, revalidate */
568 		rrset_array_unlock(r->ref, r->rrset_count);
569 		return NULL;
570 	}
571 	for(i=0; i<msg->rep->rrset_count; i++) {
572 		msg->rep->rrsets[i] = packed_rrset_copy_region(r->rrsets[i],
573 			region, now);
574 		if(!msg->rep->rrsets[i]) {
575 			rrset_array_unlock(r->ref, r->rrset_count);
576 			return NULL;
577 		}
578 	}
579 	if(env)
580 		rrset_array_unlock_touch(env->rrset_cache, scratch, r->ref,
581 		r->rrset_count);
582 	else
583 		rrset_array_unlock(r->ref, r->rrset_count);
584 	return msg;
585 }
586 
587 /** synthesize RRset-only response from cached RRset item */
588 static struct dns_msg*
589 rrset_msg(struct ub_packed_rrset_key* rrset, struct regional* region,
590 	time_t now, struct query_info* q)
591 {
592 	struct dns_msg* msg;
593 	struct packed_rrset_data* d = (struct packed_rrset_data*)
594 		rrset->entry.data;
595 	if(now > d->ttl)
596 		return NULL;
597 	msg = gen_dns_msg(region, q, 1); /* only the CNAME (or other) RRset */
598 	if(!msg)
599 		return NULL;
600 	msg->rep->flags = BIT_QR; /* reply, no AA, no error */
601         msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */
602 	msg->rep->qdcount = 1;
603 	msg->rep->ttl = d->ttl - now;
604 	msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl);
605 	msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL;
606 	msg->rep->security = sec_status_unchecked;
607 	msg->rep->an_numrrsets = 1;
608 	msg->rep->ns_numrrsets = 0;
609 	msg->rep->ar_numrrsets = 0;
610 	msg->rep->rrset_count = 1;
611 	msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now);
612 	if(!msg->rep->rrsets[0]) /* copy CNAME */
613 		return NULL;
614 	return msg;
615 }
616 
617 /** synthesize DNAME+CNAME response from cached DNAME item */
618 static struct dns_msg*
619 synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region,
620 	time_t now, struct query_info* q, enum sec_status* sec_status)
621 {
622 	struct dns_msg* msg;
623 	struct ub_packed_rrset_key* ck;
624 	struct packed_rrset_data* newd, *d = (struct packed_rrset_data*)
625 		rrset->entry.data;
626 	uint8_t* newname, *dtarg = NULL;
627 	size_t newlen, dtarglen;
628 	if(now > d->ttl)
629 		return NULL;
630 	/* only allow validated (with DNSSEC) DNAMEs used from cache
631 	 * for insecure DNAMEs, query again. */
632 	*sec_status = d->security;
633 	/* return sec status, so the status of the CNAME can be checked
634 	 * by the calling routine. */
635 	msg = gen_dns_msg(region, q, 2); /* DNAME + CNAME RRset */
636 	if(!msg)
637 		return NULL;
638 	msg->rep->flags = BIT_QR; /* reply, no AA, no error */
639         msg->rep->authoritative = 0; /* reply stored in cache can't be authoritative */
640 	msg->rep->qdcount = 1;
641 	msg->rep->ttl = d->ttl - now;
642 	msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl);
643 	msg->rep->serve_expired_ttl = msg->rep->ttl + SERVE_EXPIRED_TTL;
644 	msg->rep->security = sec_status_unchecked;
645 	msg->rep->an_numrrsets = 1;
646 	msg->rep->ns_numrrsets = 0;
647 	msg->rep->ar_numrrsets = 0;
648 	msg->rep->rrset_count = 1;
649 	msg->rep->rrsets[0] = packed_rrset_copy_region(rrset, region, now);
650 	if(!msg->rep->rrsets[0]) /* copy DNAME */
651 		return NULL;
652 	/* synth CNAME rrset */
653 	get_cname_target(rrset, &dtarg, &dtarglen);
654 	if(!dtarg)
655 		return NULL;
656 	newlen = q->qname_len + dtarglen - rrset->rk.dname_len;
657 	if(newlen > LDNS_MAX_DOMAINLEN) {
658 		msg->rep->flags |= LDNS_RCODE_YXDOMAIN;
659 		return msg;
660 	}
661 	newname = (uint8_t*)regional_alloc(region, newlen);
662 	if(!newname)
663 		return NULL;
664 	/* new name is concatenation of qname front (without DNAME owner)
665 	 * and DNAME target name */
666 	memcpy(newname, q->qname, q->qname_len-rrset->rk.dname_len);
667 	memmove(newname+(q->qname_len-rrset->rk.dname_len), dtarg, dtarglen);
668 	/* create rest of CNAME rrset */
669 	ck = (struct ub_packed_rrset_key*)regional_alloc(region,
670 		sizeof(struct ub_packed_rrset_key));
671 	if(!ck)
672 		return NULL;
673 	memset(&ck->entry, 0, sizeof(ck->entry));
674 	msg->rep->rrsets[1] = ck;
675 	ck->entry.key = ck;
676 	ck->rk.type = htons(LDNS_RR_TYPE_CNAME);
677 	ck->rk.rrset_class = rrset->rk.rrset_class;
678 	ck->rk.flags = 0;
679 	ck->rk.dname = regional_alloc_init(region, q->qname, q->qname_len);
680 	if(!ck->rk.dname)
681 		return NULL;
682 	ck->rk.dname_len = q->qname_len;
683 	ck->entry.hash = rrset_key_hash(&ck->rk);
684 	newd = (struct packed_rrset_data*)regional_alloc_zero(region,
685 		sizeof(struct packed_rrset_data) + sizeof(size_t) +
686 		sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t)
687 		+ newlen);
688 	if(!newd)
689 		return NULL;
690 	ck->entry.data = newd;
691 	newd->ttl = 0; /* 0 for synthesized CNAME TTL */
692 	newd->count = 1;
693 	newd->rrsig_count = 0;
694 	newd->trust = rrset_trust_ans_noAA;
695 	newd->rr_len = (size_t*)((uint8_t*)newd +
696 		sizeof(struct packed_rrset_data));
697 	newd->rr_len[0] = newlen + sizeof(uint16_t);
698 	packed_rrset_ptr_fixup(newd);
699 	newd->rr_ttl[0] = newd->ttl;
700 	msg->rep->ttl = newd->ttl;
701 	msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(newd->ttl);
702 	msg->rep->serve_expired_ttl = newd->ttl + SERVE_EXPIRED_TTL;
703 	sldns_write_uint16(newd->rr_data[0], newlen);
704 	memmove(newd->rr_data[0] + sizeof(uint16_t), newname, newlen);
705 	msg->rep->an_numrrsets ++;
706 	msg->rep->rrset_count ++;
707 	return msg;
708 }
709 
710 /** Fill TYPE_ANY response with some data from cache */
711 static struct dns_msg*
712 fill_any(struct module_env* env,
713 	uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
714 	struct regional* region)
715 {
716 	time_t now = *env->now;
717 	struct dns_msg* msg = NULL;
718 	uint16_t lookup[] = {LDNS_RR_TYPE_A, LDNS_RR_TYPE_AAAA,
719 		LDNS_RR_TYPE_MX, LDNS_RR_TYPE_SOA, LDNS_RR_TYPE_NS,
720 		LDNS_RR_TYPE_DNAME, 0};
721 	int i, num=6; /* number of RR types to look up */
722 	log_assert(lookup[num] == 0);
723 
724 	for(i=0; i<num; i++) {
725 		/* look up this RR for inclusion in type ANY response */
726 		struct ub_packed_rrset_key* rrset = rrset_cache_lookup(
727 			env->rrset_cache, qname, qnamelen, lookup[i],
728 			qclass, 0, now, 0);
729 		struct packed_rrset_data *d;
730 		if(!rrset)
731 			continue;
732 
733 		/* only if rrset from answer section */
734 		d = (struct packed_rrset_data*)rrset->entry.data;
735 		if(d->trust == rrset_trust_add_noAA ||
736 			d->trust == rrset_trust_auth_noAA ||
737 			d->trust == rrset_trust_add_AA ||
738 			d->trust == rrset_trust_auth_AA) {
739 			lock_rw_unlock(&rrset->entry.lock);
740 			continue;
741 		}
742 
743 		/* create msg if none */
744 		if(!msg) {
745 			msg = dns_msg_create(qname, qnamelen, qtype, qclass,
746 				region, (size_t)(num-i));
747 			if(!msg) {
748 				lock_rw_unlock(&rrset->entry.lock);
749 				return NULL;
750 			}
751 		}
752 
753 		/* add RRset to response */
754 		if(!dns_msg_ansadd(msg, region, rrset, now)) {
755 			lock_rw_unlock(&rrset->entry.lock);
756 			return NULL;
757 		}
758 		lock_rw_unlock(&rrset->entry.lock);
759 	}
760 	return msg;
761 }
762 
763 struct dns_msg*
764 dns_cache_lookup(struct module_env* env,
765 	uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
766 	uint16_t flags, struct regional* region, struct regional* scratch,
767 	int no_partial)
768 {
769 	struct lruhash_entry* e;
770 	struct query_info k;
771 	hashvalue_type h;
772 	time_t now = *env->now;
773 	struct ub_packed_rrset_key* rrset;
774 
775 	/* lookup first, this has both NXdomains and ANSWER responses */
776 	k.qname = qname;
777 	k.qname_len = qnamelen;
778 	k.qtype = qtype;
779 	k.qclass = qclass;
780 	k.local_alias = NULL;
781 	h = query_info_hash(&k, flags);
782 	e = slabhash_lookup(env->msg_cache, h, &k, 0);
783 	if(e) {
784 		struct msgreply_entry* key = (struct msgreply_entry*)e->key;
785 		struct reply_info* data = (struct reply_info*)e->data;
786 		struct dns_msg* msg = tomsg(env, &key->key, data, region, now,
787 			scratch);
788 		if(msg) {
789 			lock_rw_unlock(&e->lock);
790 			return msg;
791 		}
792 		/* could be msg==NULL; due to TTL or not all rrsets available */
793 		lock_rw_unlock(&e->lock);
794 	}
795 
796 	/* see if a DNAME exists. Checked for first, to enforce that DNAMEs
797 	 * are more important, the CNAME is resynthesized and thus
798 	 * consistent with the DNAME */
799 	if(!no_partial &&
800 		(rrset=find_closest_of_type(env, qname, qnamelen, qclass, now,
801 		LDNS_RR_TYPE_DNAME, 1))) {
802 		/* synthesize a DNAME+CNAME message based on this */
803 		enum sec_status sec_status = sec_status_unchecked;
804 		struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k,
805 			&sec_status);
806 		if(msg) {
807 			struct ub_packed_rrset_key* cname_rrset;
808 			lock_rw_unlock(&rrset->entry.lock);
809 			/* now, after unlocking the DNAME rrset lock,
810 			 * check the sec_status, and see if we need to look
811 			 * up the CNAME record associated before it can
812 			 * be used */
813 			/* normally, only secure DNAMEs allowed from cache*/
814 			if(sec_status == sec_status_secure)
815 				return msg;
816 			/* but if we have a CNAME cached with this name, then we
817 			 * have previously already allowed this name to pass.
818 			 * the next cache lookup is going to fetch that CNAME itself,
819 			 * but it is better to have the (unsigned)DNAME + CNAME in
820 			 * that case */
821 			cname_rrset = rrset_cache_lookup(
822 				env->rrset_cache, qname, qnamelen,
823 				LDNS_RR_TYPE_CNAME, qclass, 0, now, 0);
824 			if(cname_rrset) {
825 				/* CNAME already synthesized by
826 				 * synth_dname_msg routine, so we can
827 				 * straight up return the msg */
828 				lock_rw_unlock(&cname_rrset->entry.lock);
829 				return msg;
830 			}
831 		} else {
832 			lock_rw_unlock(&rrset->entry.lock);
833 		}
834 	}
835 
836 	/* see if we have CNAME for this domain,
837 	 * but not for DS records (which are part of the parent) */
838 	if(!no_partial && qtype != LDNS_RR_TYPE_DS &&
839 	   (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen,
840 		LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) {
841 		uint8_t* wc = NULL;
842 		size_t wl;
843 		/* if the rrset is not a wildcard expansion, with wcname */
844 		/* because, if we return that CNAME rrset on its own, it is
845 		 * missing the NSEC or NSEC3 proof */
846 		if(!(val_rrset_wildcard(rrset, &wc, &wl) && wc != NULL)) {
847 			struct dns_msg* msg = rrset_msg(rrset, region, now, &k);
848 			if(msg) {
849 				lock_rw_unlock(&rrset->entry.lock);
850 				return msg;
851 			}
852 		}
853 		lock_rw_unlock(&rrset->entry.lock);
854 	}
855 
856 	/* construct DS, DNSKEY, DLV messages from rrset cache. */
857 	if((qtype == LDNS_RR_TYPE_DS || qtype == LDNS_RR_TYPE_DNSKEY ||
858 		qtype == LDNS_RR_TYPE_DLV) &&
859 		(rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen,
860 		qtype, qclass, 0, now, 0))) {
861 		/* if the rrset is from the additional section, and the
862 		 * signatures have fallen off, then do not synthesize a msg
863 		 * instead, allow a full query for signed results to happen.
864 		 * Forego all rrset data from additional section, because
865 		 * some signatures may not be present and cause validation
866 		 * failure.
867 		 */
868 		struct packed_rrset_data *d = (struct packed_rrset_data*)
869 			rrset->entry.data;
870 		if(d->trust != rrset_trust_add_noAA &&
871 			d->trust != rrset_trust_add_AA &&
872 			(qtype == LDNS_RR_TYPE_DS ||
873 				(d->trust != rrset_trust_auth_noAA
874 				&& d->trust != rrset_trust_auth_AA) )) {
875 			struct dns_msg* msg = rrset_msg(rrset, region, now, &k);
876 			if(msg) {
877 				lock_rw_unlock(&rrset->entry.lock);
878 				return msg;
879 			}
880 		}
881 		lock_rw_unlock(&rrset->entry.lock);
882 	}
883 
884 	/* stop downwards cache search on NXDOMAIN.
885 	 * Empty nonterminals are NOERROR, so an NXDOMAIN for foo
886 	 * means bla.foo also does not exist.  The DNSSEC proofs are
887 	 * the same.  We search upwards for NXDOMAINs. */
888 	if(env->cfg->harden_below_nxdomain)
889 	    while(!dname_is_root(k.qname)) {
890 		dname_remove_label(&k.qname, &k.qname_len);
891 		h = query_info_hash(&k, flags);
892 		e = slabhash_lookup(env->msg_cache, h, &k, 0);
893 		if(!e && k.qtype != LDNS_RR_TYPE_A &&
894 			env->cfg->qname_minimisation) {
895 			k.qtype = LDNS_RR_TYPE_A;
896 			h = query_info_hash(&k, flags);
897 			e = slabhash_lookup(env->msg_cache, h, &k, 0);
898 		}
899 		if(e) {
900 			struct reply_info* data = (struct reply_info*)e->data;
901 			struct dns_msg* msg;
902 			if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN
903 			  && data->security == sec_status_secure
904 			  && (msg=tomsg(env, &k, data, region, now, scratch))){
905 				lock_rw_unlock(&e->lock);
906 				msg->qinfo.qname=qname;
907 				msg->qinfo.qname_len=qnamelen;
908 				/* check that DNSSEC really works out */
909 				msg->rep->security = sec_status_unchecked;
910 				return msg;
911 			}
912 			lock_rw_unlock(&e->lock);
913 		}
914 		k.qtype = qtype;
915 	    }
916 
917 	/* fill common RR types for ANY response to avoid requery */
918 	if(qtype == LDNS_RR_TYPE_ANY) {
919 		return fill_any(env, qname, qnamelen, qtype, qclass, region);
920 	}
921 
922 	return NULL;
923 }
924 
925 int
926 dns_cache_store(struct module_env* env, struct query_info* msgqinf,
927         struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
928 	struct regional* region, uint32_t flags)
929 {
930 	struct reply_info* rep = NULL;
931 	/* alloc, malloc properly (not in region, like msg is) */
932 	rep = reply_info_copy(msgrep, env->alloc, NULL);
933 	if(!rep)
934 		return 0;
935 	/* ttl must be relative ;i.e. 0..86400 not  time(0)+86400.
936 	 * the env->now is added to message and RRsets in this routine. */
937 	/* the leeway is used to invalidate other rrsets earlier */
938 
939 	if(is_referral) {
940 		/* store rrsets */
941 		struct rrset_ref ref;
942 		size_t i;
943 		for(i=0; i<rep->rrset_count; i++) {
944 			packed_rrset_ttl_add((struct packed_rrset_data*)
945 				rep->rrsets[i]->entry.data, *env->now);
946 			ref.key = rep->rrsets[i];
947 			ref.id = rep->rrsets[i]->id;
948 			/*ignore ret: it was in the cache, ref updated */
949 			/* no leeway for typeNS */
950 			(void)rrset_cache_update(env->rrset_cache, &ref,
951 				env->alloc, *env->now +
952 				((ntohs(ref.key->rk.type)==LDNS_RR_TYPE_NS
953 				 && !pside) ? 0:leeway));
954 		}
955 		free(rep);
956 		return 1;
957 	} else {
958 		/* store msg, and rrsets */
959 		struct query_info qinf;
960 		hashvalue_type h;
961 
962 		qinf = *msgqinf;
963 		qinf.qname = memdup(msgqinf->qname, msgqinf->qname_len);
964 		if(!qinf.qname) {
965 			reply_info_parsedelete(rep, env->alloc);
966 			return 0;
967 		}
968 		/* fixup flags to be sensible for a reply based on the cache */
969 		/* this module means that RA is available. It is an answer QR.
970 		 * Not AA from cache. Not CD in cache (depends on client bit). */
971 		rep->flags |= (BIT_RA | BIT_QR);
972 		rep->flags &= ~(BIT_AA | BIT_CD);
973 		h = query_info_hash(&qinf, (uint16_t)flags);
974 		dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep,
975 			flags, region);
976 		/* qname is used inside query_info_entrysetup, and set to
977 		 * NULL. If it has not been used, free it. free(0) is safe. */
978 		free(qinf.qname);
979 	}
980 	return 1;
981 }
982 
983 int
984 dns_cache_prefetch_adjust(struct module_env* env, struct query_info* qinfo,
985         time_t adjust, uint16_t flags)
986 {
987 	struct msgreply_entry* msg;
988 	msg = msg_cache_lookup(env, qinfo->qname, qinfo->qname_len,
989 		qinfo->qtype, qinfo->qclass, flags, *env->now, 1);
990 	if(msg) {
991 		struct reply_info* rep = (struct reply_info*)msg->entry.data;
992 		if(rep) {
993 			rep->prefetch_ttl += adjust;
994 			lock_rw_unlock(&msg->entry.lock);
995 			return 1;
996 		}
997 		lock_rw_unlock(&msg->entry.lock);
998 	}
999 	return 0;
1000 }
1001