xref: /freebsd/contrib/unbound/iterator/iter_utils.c (revision a25896ca1270e25b657ceaa8d47d5699515f5c25)
1 /*
2  * iterator/iter_utils.c - iterative resolver module utility functions.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains functions to assist the iterator module.
40  * Configuration options. Forward zones.
41  */
42 #include "config.h"
43 #include "iterator/iter_utils.h"
44 #include "iterator/iterator.h"
45 #include "iterator/iter_hints.h"
46 #include "iterator/iter_fwd.h"
47 #include "iterator/iter_donotq.h"
48 #include "iterator/iter_delegpt.h"
49 #include "iterator/iter_priv.h"
50 #include "services/cache/infra.h"
51 #include "services/cache/dns.h"
52 #include "services/cache/rrset.h"
53 #include "util/net_help.h"
54 #include "util/module.h"
55 #include "util/log.h"
56 #include "util/config_file.h"
57 #include "util/regional.h"
58 #include "util/data/msgparse.h"
59 #include "util/data/dname.h"
60 #include "util/random.h"
61 #include "util/fptr_wlist.h"
62 #include "validator/val_anchor.h"
63 #include "validator/val_kcache.h"
64 #include "validator/val_kentry.h"
65 #include "validator/val_utils.h"
66 #include "validator/val_sigcrypt.h"
67 #include "sldns/sbuffer.h"
68 #include "sldns/str2wire.h"
69 
70 /** time when nameserver glue is said to be 'recent' */
71 #define SUSPICION_RECENT_EXPIRY 86400
72 /** penalty to validation failed blacklisted IPs */
73 #define BLACKLIST_PENALTY (USEFUL_SERVER_TOP_TIMEOUT*4)
74 
75 /** fillup fetch policy array */
76 static void
77 fetch_fill(struct iter_env* ie, const char* str)
78 {
79 	char* s = (char*)str, *e;
80 	int i;
81 	for(i=0; i<ie->max_dependency_depth+1; i++) {
82 		ie->target_fetch_policy[i] = strtol(s, &e, 10);
83 		if(s == e)
84 			fatal_exit("cannot parse fetch policy number %s", s);
85 		s = e;
86 	}
87 }
88 
89 /** Read config string that represents the target fetch policy */
90 static int
91 read_fetch_policy(struct iter_env* ie, const char* str)
92 {
93 	int count = cfg_count_numbers(str);
94 	if(count < 1) {
95 		log_err("Cannot parse target fetch policy: \"%s\"", str);
96 		return 0;
97 	}
98 	ie->max_dependency_depth = count - 1;
99 	ie->target_fetch_policy = (int*)calloc(
100 		(size_t)ie->max_dependency_depth+1, sizeof(int));
101 	if(!ie->target_fetch_policy) {
102 		log_err("alloc fetch policy: out of memory");
103 		return 0;
104 	}
105 	fetch_fill(ie, str);
106 	return 1;
107 }
108 
109 /** apply config caps whitelist items to name tree */
110 static int
111 caps_white_apply_cfg(rbtree_type* ntree, struct config_file* cfg)
112 {
113 	struct config_strlist* p;
114 	for(p=cfg->caps_whitelist; p; p=p->next) {
115 		struct name_tree_node* n;
116 		size_t len;
117 		uint8_t* nm = sldns_str2wire_dname(p->str, &len);
118 		if(!nm) {
119 			log_err("could not parse %s", p->str);
120 			return 0;
121 		}
122 		n = (struct name_tree_node*)calloc(1, sizeof(*n));
123 		if(!n) {
124 			log_err("out of memory");
125 			free(nm);
126 			return 0;
127 		}
128 		n->node.key = n;
129 		n->name = nm;
130 		n->len = len;
131 		n->labs = dname_count_labels(nm);
132 		n->dclass = LDNS_RR_CLASS_IN;
133 		if(!name_tree_insert(ntree, n, nm, len, n->labs, n->dclass)) {
134 			/* duplicate element ignored, idempotent */
135 			free(n->name);
136 			free(n);
137 		}
138 	}
139 	name_tree_init_parents(ntree);
140 	return 1;
141 }
142 
143 int
144 iter_apply_cfg(struct iter_env* iter_env, struct config_file* cfg)
145 {
146 	int i;
147 	/* target fetch policy */
148 	if(!read_fetch_policy(iter_env, cfg->target_fetch_policy))
149 		return 0;
150 	for(i=0; i<iter_env->max_dependency_depth+1; i++)
151 		verbose(VERB_QUERY, "target fetch policy for level %d is %d",
152 			i, iter_env->target_fetch_policy[i]);
153 
154 	if(!iter_env->donotq)
155 		iter_env->donotq = donotq_create();
156 	if(!iter_env->donotq || !donotq_apply_cfg(iter_env->donotq, cfg)) {
157 		log_err("Could not set donotqueryaddresses");
158 		return 0;
159 	}
160 	if(!iter_env->priv)
161 		iter_env->priv = priv_create();
162 	if(!iter_env->priv || !priv_apply_cfg(iter_env->priv, cfg)) {
163 		log_err("Could not set private addresses");
164 		return 0;
165 	}
166 	if(cfg->caps_whitelist) {
167 		if(!iter_env->caps_white)
168 			iter_env->caps_white = rbtree_create(name_tree_compare);
169 		if(!iter_env->caps_white || !caps_white_apply_cfg(
170 			iter_env->caps_white, cfg)) {
171 			log_err("Could not set capsforid whitelist");
172 			return 0;
173 		}
174 
175 	}
176 	iter_env->supports_ipv6 = cfg->do_ip6;
177 	iter_env->supports_ipv4 = cfg->do_ip4;
178 	return 1;
179 }
180 
181 /** filter out unsuitable targets
182  * @param iter_env: iterator environment with ipv6-support flag.
183  * @param env: module environment with infra cache.
184  * @param name: zone name
185  * @param namelen: length of name
186  * @param qtype: query type (host order).
187  * @param now: current time
188  * @param a: address in delegation point we are examining.
189  * @return an integer that signals the target suitability.
190  *	as follows:
191  *	-1: The address should be omitted from the list.
192  *	    Because:
193  *		o The address is bogus (DNSSEC validation failure).
194  *		o Listed as donotquery
195  *		o is ipv6 but no ipv6 support (in operating system).
196  *		o is ipv4 but no ipv4 support (in operating system).
197  *		o is lame
198  *	Otherwise, an rtt in milliseconds.
199  *	0 .. USEFUL_SERVER_TOP_TIMEOUT-1
200  *		The roundtrip time timeout estimate. less than 2 minutes.
201  *		Note that util/rtt.c has a MIN_TIMEOUT of 50 msec, thus
202  *		values 0 .. 49 are not used, unless that is changed.
203  *	USEFUL_SERVER_TOP_TIMEOUT
204  *		This value exactly is given for unresponsive blacklisted.
205  *	USEFUL_SERVER_TOP_TIMEOUT+1
206  *		For non-blacklisted servers: huge timeout, but has traffic.
207  *	USEFUL_SERVER_TOP_TIMEOUT*1 ..
208  *		parent-side lame servers get this penalty. A dispreferential
209  *		server. (lame in delegpt).
210  *	USEFUL_SERVER_TOP_TIMEOUT*2 ..
211  *		dnsseclame servers get penalty
212  *	USEFUL_SERVER_TOP_TIMEOUT*3 ..
213  *		recursion lame servers get penalty
214  *	UNKNOWN_SERVER_NICENESS
215  *		If no information is known about the server, this is
216  *		returned. 376 msec or so.
217  *	+BLACKLIST_PENALTY (of USEFUL_TOP_TIMEOUT*4) for dnssec failed IPs.
218  *
219  * When a final value is chosen that is dnsseclame ; dnsseclameness checking
220  * is turned off (so we do not discard the reply).
221  * When a final value is chosen that is recursionlame; RD bit is set on query.
222  * Because of the numbers this means recursionlame also have dnssec lameness
223  * checking turned off.
224  */
225 static int
226 iter_filter_unsuitable(struct iter_env* iter_env, struct module_env* env,
227 	uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
228 	struct delegpt_addr* a)
229 {
230 	int rtt, lame, reclame, dnsseclame;
231 	if(a->bogus)
232 		return -1; /* address of server is bogus */
233 	if(donotq_lookup(iter_env->donotq, &a->addr, a->addrlen)) {
234 		log_addr(VERB_ALGO, "skip addr on the donotquery list",
235 			&a->addr, a->addrlen);
236 		return -1; /* server is on the donotquery list */
237 	}
238 	if(!iter_env->supports_ipv6 && addr_is_ip6(&a->addr, a->addrlen)) {
239 		return -1; /* there is no ip6 available */
240 	}
241 	if(!iter_env->supports_ipv4 && !addr_is_ip6(&a->addr, a->addrlen)) {
242 		return -1; /* there is no ip4 available */
243 	}
244 	/* check lameness - need zone , class info */
245 	if(infra_get_lame_rtt(env->infra_cache, &a->addr, a->addrlen,
246 		name, namelen, qtype, &lame, &dnsseclame, &reclame,
247 		&rtt, now)) {
248 		log_addr(VERB_ALGO, "servselect", &a->addr, a->addrlen);
249 		verbose(VERB_ALGO, "   rtt=%d%s%s%s%s", rtt,
250 			lame?" LAME":"",
251 			dnsseclame?" DNSSEC_LAME":"",
252 			reclame?" REC_LAME":"",
253 			a->lame?" ADDR_LAME":"");
254 		if(lame)
255 			return -1; /* server is lame */
256 		else if(rtt >= USEFUL_SERVER_TOP_TIMEOUT)
257 			/* server is unresponsive,
258 			 * we used to return TOP_TIMEOUT, but fairly useless,
259 			 * because if == TOP_TIMEOUT is dropped because
260 			 * blacklisted later, instead, remove it here, so
261 			 * other choices (that are not blacklisted) can be
262 			 * tried */
263 			return -1;
264 		/* select remainder from worst to best */
265 		else if(reclame)
266 			return rtt+USEFUL_SERVER_TOP_TIMEOUT*3; /* nonpref */
267 		else if(dnsseclame || a->dnsseclame)
268 			return rtt+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */
269 		else if(a->lame)
270 			return rtt+USEFUL_SERVER_TOP_TIMEOUT+1; /* nonpref */
271 		else	return rtt;
272 	}
273 	/* no server information present */
274 	if(a->dnsseclame)
275 		return UNKNOWN_SERVER_NICENESS+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */
276 	else if(a->lame)
277 		return USEFUL_SERVER_TOP_TIMEOUT+1+UNKNOWN_SERVER_NICENESS; /* nonpref */
278 	return UNKNOWN_SERVER_NICENESS;
279 }
280 
281 /** lookup RTT information, and also store fastest rtt (if any) */
282 static int
283 iter_fill_rtt(struct iter_env* iter_env, struct module_env* env,
284 	uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
285 	struct delegpt* dp, int* best_rtt, struct sock_list* blacklist)
286 {
287 	int got_it = 0;
288 	struct delegpt_addr* a;
289 	if(dp->bogus)
290 		return 0; /* NS bogus, all bogus, nothing found */
291 	for(a=dp->result_list; a; a = a->next_result) {
292 		a->sel_rtt = iter_filter_unsuitable(iter_env, env,
293 			name, namelen, qtype, now, a);
294 		if(a->sel_rtt != -1) {
295 			if(sock_list_find(blacklist, &a->addr, a->addrlen))
296 				a->sel_rtt += BLACKLIST_PENALTY;
297 
298 			if(!got_it) {
299 				*best_rtt = a->sel_rtt;
300 				got_it = 1;
301 			} else if(a->sel_rtt < *best_rtt) {
302 				*best_rtt = a->sel_rtt;
303 			}
304 		}
305 	}
306 	return got_it;
307 }
308 
309 /** filter the address list, putting best targets at front,
310  * returns number of best targets (or 0, no suitable targets) */
311 static int
312 iter_filter_order(struct iter_env* iter_env, struct module_env* env,
313 	uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
314 	struct delegpt* dp, int* selected_rtt, int open_target,
315 	struct sock_list* blacklist, time_t prefetch)
316 {
317 	int got_num = 0, low_rtt = 0, swap_to_front, rtt_band = RTT_BAND;
318 	struct delegpt_addr* a, *n, *prev=NULL;
319 
320 	/* fillup sel_rtt and find best rtt in the bunch */
321 	got_num = iter_fill_rtt(iter_env, env, name, namelen, qtype, now, dp,
322 		&low_rtt, blacklist);
323 	if(got_num == 0)
324 		return 0;
325 	if(low_rtt >= USEFUL_SERVER_TOP_TIMEOUT &&
326 		(delegpt_count_missing_targets(dp) > 0 || open_target > 0)) {
327 		verbose(VERB_ALGO, "Bad choices, trying to get more choice");
328 		return 0; /* we want more choice. The best choice is a bad one.
329 			     return 0 to force the caller to fetch more */
330 	}
331 
332 	if(env->cfg->low_rtt_permil != 0 && prefetch == 0 &&
333 		low_rtt < env->cfg->low_rtt &&
334 		ub_random_max(env->rnd, 1000) < env->cfg->low_rtt_permil) {
335 		/* the query is not prefetch, but for a downstream client,
336 		 * there is a low_rtt (fast) server.  We choose that x% of the
337 		 * time */
338 		/* pick rtt numbers from 0..LOWBAND_RTT */
339 		rtt_band = env->cfg->low_rtt - low_rtt;
340 	}
341 
342 	got_num = 0;
343 	a = dp->result_list;
344 	while(a) {
345 		/* skip unsuitable targets */
346 		if(a->sel_rtt == -1) {
347 			prev = a;
348 			a = a->next_result;
349 			continue;
350 		}
351 		/* classify the server address and determine what to do */
352 		swap_to_front = 0;
353 		if(a->sel_rtt >= low_rtt && a->sel_rtt - low_rtt <= rtt_band) {
354 			got_num++;
355 			swap_to_front = 1;
356 		} else if(a->sel_rtt<low_rtt && low_rtt-a->sel_rtt<=rtt_band) {
357 			got_num++;
358 			swap_to_front = 1;
359 		}
360 		/* swap to front if necessary, or move to next result */
361 		if(swap_to_front && prev) {
362 			n = a->next_result;
363 			prev->next_result = n;
364 			a->next_result = dp->result_list;
365 			dp->result_list = a;
366 			a = n;
367 		} else {
368 			prev = a;
369 			a = a->next_result;
370 		}
371 	}
372 	*selected_rtt = low_rtt;
373 
374 	if (env->cfg->prefer_ip6) {
375 		int got_num6 = 0;
376 		int low_rtt6 = 0;
377 		int i;
378 		int attempt = -1; /* filter to make sure addresses have
379 		  less attempts on them than the first, to force round
380 		  robin when all the IPv6 addresses fail */
381 		int num4ok = 0; /* number ip4 at low attempt count */
382 		int num4_lowrtt = 0;
383 		prev = NULL;
384 		a = dp->result_list;
385 		for(i = 0; i < got_num; i++) {
386 			swap_to_front = 0;
387 			if(a->addr.ss_family != AF_INET6 && attempt == -1) {
388 				/* if we only have ip4 at low attempt count,
389 				 * then ip6 is failing, and we need to
390 				 * select one of the remaining IPv4 addrs */
391 				attempt = a->attempts;
392 				num4ok++;
393 				num4_lowrtt = a->sel_rtt;
394 			} else if(a->addr.ss_family != AF_INET6 && attempt == a->attempts) {
395 				num4ok++;
396 				if(num4_lowrtt == 0 || a->sel_rtt < num4_lowrtt) {
397 					num4_lowrtt = a->sel_rtt;
398 				}
399 			}
400 			if(a->addr.ss_family == AF_INET6) {
401 				if(attempt == -1) {
402 					attempt = a->attempts;
403 				} else if(a->attempts > attempt) {
404 					break;
405 				}
406 				got_num6++;
407 				swap_to_front = 1;
408 				if(low_rtt6 == 0 || a->sel_rtt < low_rtt6) {
409 					low_rtt6 = a->sel_rtt;
410 				}
411 			}
412 			/* swap to front if IPv6, or move to next result */
413 			if(swap_to_front && prev) {
414 				n = a->next_result;
415 				prev->next_result = n;
416 				a->next_result = dp->result_list;
417 				dp->result_list = a;
418 				a = n;
419 			} else {
420 				prev = a;
421 				a = a->next_result;
422 			}
423 		}
424 		if(got_num6 > 0) {
425 			got_num = got_num6;
426 			*selected_rtt = low_rtt6;
427 		} else if(num4ok > 0) {
428 			got_num = num4ok;
429 			*selected_rtt = num4_lowrtt;
430 		}
431 	}
432 	return got_num;
433 }
434 
435 struct delegpt_addr*
436 iter_server_selection(struct iter_env* iter_env,
437 	struct module_env* env, struct delegpt* dp,
438 	uint8_t* name, size_t namelen, uint16_t qtype, int* dnssec_lame,
439 	int* chase_to_rd, int open_target, struct sock_list* blacklist,
440 	time_t prefetch)
441 {
442 	int sel;
443 	int selrtt;
444 	struct delegpt_addr* a, *prev;
445 	int num = iter_filter_order(iter_env, env, name, namelen, qtype,
446 		*env->now, dp, &selrtt, open_target, blacklist, prefetch);
447 
448 	if(num == 0)
449 		return NULL;
450 	verbose(VERB_ALGO, "selrtt %d", selrtt);
451 	if(selrtt > BLACKLIST_PENALTY) {
452 		if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*3) {
453 			verbose(VERB_ALGO, "chase to "
454 				"blacklisted recursion lame server");
455 			*chase_to_rd = 1;
456 		}
457 		if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*2) {
458 			verbose(VERB_ALGO, "chase to "
459 				"blacklisted dnssec lame server");
460 			*dnssec_lame = 1;
461 		}
462 	} else {
463 		if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*3) {
464 			verbose(VERB_ALGO, "chase to recursion lame server");
465 			*chase_to_rd = 1;
466 		}
467 		if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*2) {
468 			verbose(VERB_ALGO, "chase to dnssec lame server");
469 			*dnssec_lame = 1;
470 		}
471 		if(selrtt == USEFUL_SERVER_TOP_TIMEOUT) {
472 			verbose(VERB_ALGO, "chase to blacklisted lame server");
473 			return NULL;
474 		}
475 	}
476 
477 	if(num == 1) {
478 		a = dp->result_list;
479 		if(++a->attempts < OUTBOUND_MSG_RETRY)
480 			return a;
481 		dp->result_list = a->next_result;
482 		return a;
483 	}
484 
485 	/* randomly select a target from the list */
486 	log_assert(num > 1);
487 	/* grab secure random number, to pick unexpected server.
488 	 * also we need it to be threadsafe. */
489 	sel = ub_random_max(env->rnd, num);
490 	a = dp->result_list;
491 	prev = NULL;
492 	while(sel > 0 && a) {
493 		prev = a;
494 		a = a->next_result;
495 		sel--;
496 	}
497 	if(!a)  /* robustness */
498 		return NULL;
499 	if(++a->attempts < OUTBOUND_MSG_RETRY)
500 		return a;
501 	/* remove it from the delegation point result list */
502 	if(prev)
503 		prev->next_result = a->next_result;
504 	else	dp->result_list = a->next_result;
505 	return a;
506 }
507 
508 struct dns_msg*
509 dns_alloc_msg(sldns_buffer* pkt, struct msg_parse* msg,
510 	struct regional* region)
511 {
512 	struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
513 		sizeof(struct dns_msg));
514 	if(!m)
515 		return NULL;
516 	memset(m, 0, sizeof(*m));
517 	if(!parse_create_msg(pkt, msg, NULL, &m->qinfo, &m->rep, region)) {
518 		log_err("malloc failure: allocating incoming dns_msg");
519 		return NULL;
520 	}
521 	return m;
522 }
523 
524 struct dns_msg*
525 dns_copy_msg(struct dns_msg* from, struct regional* region)
526 {
527 	struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
528 		sizeof(struct dns_msg));
529 	if(!m)
530 		return NULL;
531 	m->qinfo = from->qinfo;
532 	if(!(m->qinfo.qname = regional_alloc_init(region, from->qinfo.qname,
533 		from->qinfo.qname_len)))
534 		return NULL;
535 	if(!(m->rep = reply_info_copy(from->rep, NULL, region)))
536 		return NULL;
537 	return m;
538 }
539 
540 void
541 iter_dns_store(struct module_env* env, struct query_info* msgqinf,
542 	struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
543 	struct regional* region, uint16_t flags)
544 {
545 	if(!dns_cache_store(env, msgqinf, msgrep, is_referral, leeway,
546 		pside, region, flags))
547 		log_err("out of memory: cannot store data in cache");
548 }
549 
550 int
551 iter_ns_probability(struct ub_randstate* rnd, int n, int m)
552 {
553 	int sel;
554 	if(n == m) /* 100% chance */
555 		return 1;
556 	/* we do not need secure random numbers here, but
557 	 * we do need it to be threadsafe, so we use this */
558 	sel = ub_random_max(rnd, m);
559 	return (sel < n);
560 }
561 
562 /** detect dependency cycle for query and target */
563 static int
564 causes_cycle(struct module_qstate* qstate, uint8_t* name, size_t namelen,
565 	uint16_t t, uint16_t c)
566 {
567 	struct query_info qinf;
568 	qinf.qname = name;
569 	qinf.qname_len = namelen;
570 	qinf.qtype = t;
571 	qinf.qclass = c;
572 	qinf.local_alias = NULL;
573 	fptr_ok(fptr_whitelist_modenv_detect_cycle(
574 		qstate->env->detect_cycle));
575 	return (*qstate->env->detect_cycle)(qstate, &qinf,
576 		(uint16_t)(BIT_RD|BIT_CD), qstate->is_priming,
577 		qstate->is_valrec);
578 }
579 
580 void
581 iter_mark_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
582 {
583 	struct delegpt_ns* ns;
584 	for(ns = dp->nslist; ns; ns = ns->next) {
585 		if(ns->resolved)
586 			continue;
587 		/* see if this ns as target causes dependency cycle */
588 		if(causes_cycle(qstate, ns->name, ns->namelen,
589 			LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass) ||
590 		   causes_cycle(qstate, ns->name, ns->namelen,
591 			LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
592 			log_nametypeclass(VERB_QUERY, "skipping target due "
593 			 	"to dependency cycle (harden-glue: no may "
594 				"fix some of the cycles)",
595 				ns->name, LDNS_RR_TYPE_A,
596 				qstate->qinfo.qclass);
597 			ns->resolved = 1;
598 		}
599 	}
600 }
601 
602 void
603 iter_mark_pside_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
604 {
605 	struct delegpt_ns* ns;
606 	for(ns = dp->nslist; ns; ns = ns->next) {
607 		if(ns->done_pside4 && ns->done_pside6)
608 			continue;
609 		/* see if this ns as target causes dependency cycle */
610 		if(causes_cycle(qstate, ns->name, ns->namelen,
611 			LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
612 			log_nametypeclass(VERB_QUERY, "skipping target due "
613 			 	"to dependency cycle", ns->name,
614 				LDNS_RR_TYPE_A, qstate->qinfo.qclass);
615 			ns->done_pside4 = 1;
616 		}
617 		if(causes_cycle(qstate, ns->name, ns->namelen,
618 			LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass)) {
619 			log_nametypeclass(VERB_QUERY, "skipping target due "
620 			 	"to dependency cycle", ns->name,
621 				LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass);
622 			ns->done_pside6 = 1;
623 		}
624 	}
625 }
626 
627 int
628 iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
629 	struct delegpt* dp)
630 {
631 	struct delegpt_ns* ns;
632 	/* check:
633 	 *      o RD qflag is on.
634 	 *      o no addresses are provided.
635 	 *      o all NS items are required glue.
636 	 * OR
637 	 *      o RD qflag is on.
638 	 *      o no addresses are provided.
639 	 *      o the query is for one of the nameservers in dp,
640 	 *        and that nameserver is a glue-name for this dp.
641 	 */
642 	if(!(qflags&BIT_RD))
643 		return 0;
644 	/* either available or unused targets */
645 	if(dp->usable_list || dp->result_list)
646 		return 0;
647 
648 	/* see if query is for one of the nameservers, which is glue */
649 	if( (qinfo->qtype == LDNS_RR_TYPE_A ||
650 		qinfo->qtype == LDNS_RR_TYPE_AAAA) &&
651 		dname_subdomain_c(qinfo->qname, dp->name) &&
652 		delegpt_find_ns(dp, qinfo->qname, qinfo->qname_len))
653 		return 1;
654 
655 	for(ns = dp->nslist; ns; ns = ns->next) {
656 		if(ns->resolved) /* skip failed targets */
657 			continue;
658 		if(!dname_subdomain_c(ns->name, dp->name))
659 			return 0; /* one address is not required glue */
660 	}
661 	return 1;
662 }
663 
664 int
665 iter_qname_indicates_dnssec(struct module_env* env, struct query_info *qinfo)
666 {
667 	struct trust_anchor* a;
668 	if(!env || !env->anchors || !qinfo || !qinfo->qname)
669 		return 0;
670 	/* a trust anchor exists above the name? */
671 	if((a=anchors_lookup(env->anchors, qinfo->qname, qinfo->qname_len,
672 		qinfo->qclass))) {
673 		if(a->numDS == 0 && a->numDNSKEY == 0) {
674 			/* insecure trust point */
675 			lock_basic_unlock(&a->lock);
676 			return 0;
677 		}
678 		lock_basic_unlock(&a->lock);
679 		return 1;
680 	}
681 	/* no trust anchor above it. */
682 	return 0;
683 }
684 
685 int
686 iter_indicates_dnssec(struct module_env* env, struct delegpt* dp,
687         struct dns_msg* msg, uint16_t dclass)
688 {
689 	struct trust_anchor* a;
690 	/* information not available, !env->anchors can be common */
691 	if(!env || !env->anchors || !dp || !dp->name)
692 		return 0;
693 	/* a trust anchor exists with this name, RRSIGs expected */
694 	if((a=anchor_find(env->anchors, dp->name, dp->namelabs, dp->namelen,
695 		dclass))) {
696 		if(a->numDS == 0 && a->numDNSKEY == 0) {
697 			/* insecure trust point */
698 			lock_basic_unlock(&a->lock);
699 			return 0;
700 		}
701 		lock_basic_unlock(&a->lock);
702 		return 1;
703 	}
704 	/* see if DS rrset was given, in AUTH section */
705 	if(msg && msg->rep &&
706 		reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
707 		LDNS_RR_TYPE_DS, dclass))
708 		return 1;
709 	/* look in key cache */
710 	if(env->key_cache) {
711 		struct key_entry_key* kk = key_cache_obtain(env->key_cache,
712 			dp->name, dp->namelen, dclass, env->scratch, *env->now);
713 		if(kk) {
714 			if(query_dname_compare(kk->name, dp->name) == 0) {
715 			  if(key_entry_isgood(kk) || key_entry_isbad(kk)) {
716 				regional_free_all(env->scratch);
717 				return 1;
718 			  } else if(key_entry_isnull(kk)) {
719 				regional_free_all(env->scratch);
720 				return 0;
721 			  }
722 			}
723 			regional_free_all(env->scratch);
724 		}
725 	}
726 	return 0;
727 }
728 
729 int
730 iter_msg_has_dnssec(struct dns_msg* msg)
731 {
732 	size_t i;
733 	if(!msg || !msg->rep)
734 		return 0;
735 	for(i=0; i<msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) {
736 		if(((struct packed_rrset_data*)msg->rep->rrsets[i]->
737 			entry.data)->rrsig_count > 0)
738 			return 1;
739 	}
740 	/* empty message has no DNSSEC info, with DNSSEC the reply is
741 	 * not empty (NSEC) */
742 	return 0;
743 }
744 
745 int iter_msg_from_zone(struct dns_msg* msg, struct delegpt* dp,
746         enum response_type type, uint16_t dclass)
747 {
748 	if(!msg || !dp || !msg->rep || !dp->name)
749 		return 0;
750 	/* SOA RRset - always from reply zone */
751 	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
752 		LDNS_RR_TYPE_SOA, dclass) ||
753 	   reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
754 		LDNS_RR_TYPE_SOA, dclass))
755 		return 1;
756 	if(type == RESPONSE_TYPE_REFERRAL) {
757 		size_t i;
758 		/* if it adds a single label, i.e. we expect .com,
759 		 * and referral to example.com. NS ... , then origin zone
760 		 * is .com. For a referral to sub.example.com. NS ... then
761 		 * we do not know, since example.com. may be in between. */
762 		for(i=0; i<msg->rep->an_numrrsets+msg->rep->ns_numrrsets;
763 			i++) {
764 			struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
765 			if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS &&
766 				ntohs(s->rk.rrset_class) == dclass) {
767 				int l = dname_count_labels(s->rk.dname);
768 				if(l == dp->namelabs + 1 &&
769 					dname_strict_subdomain(s->rk.dname,
770 					l, dp->name, dp->namelabs))
771 					return 1;
772 			}
773 		}
774 		return 0;
775 	}
776 	log_assert(type==RESPONSE_TYPE_ANSWER || type==RESPONSE_TYPE_CNAME);
777 	/* not a referral, and not lame delegation (upwards), so,
778 	 * any NS rrset must be from the zone itself */
779 	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
780 		LDNS_RR_TYPE_NS, dclass) ||
781 	   reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
782 		LDNS_RR_TYPE_NS, dclass))
783 		return 1;
784 	/* a DNSKEY set is expected at the zone apex as well */
785 	/* this is for 'minimal responses' for DNSKEYs */
786 	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
787 		LDNS_RR_TYPE_DNSKEY, dclass))
788 		return 1;
789 	return 0;
790 }
791 
792 /**
793  * check equality of two rrsets
794  * @param k1: rrset
795  * @param k2: rrset
796  * @return true if equal
797  */
798 static int
799 rrset_equal(struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2)
800 {
801 	struct packed_rrset_data* d1 = (struct packed_rrset_data*)
802 		k1->entry.data;
803 	struct packed_rrset_data* d2 = (struct packed_rrset_data*)
804 		k2->entry.data;
805 	size_t i, t;
806 	if(k1->rk.dname_len != k2->rk.dname_len ||
807 		k1->rk.flags != k2->rk.flags ||
808 		k1->rk.type != k2->rk.type ||
809 		k1->rk.rrset_class != k2->rk.rrset_class ||
810 		query_dname_compare(k1->rk.dname, k2->rk.dname) != 0)
811 		return 0;
812 	if(	/* do not check ttl: d1->ttl != d2->ttl || */
813 		d1->count != d2->count ||
814 		d1->rrsig_count != d2->rrsig_count ||
815 		d1->trust != d2->trust ||
816 		d1->security != d2->security)
817 		return 0;
818 	t = d1->count + d1->rrsig_count;
819 	for(i=0; i<t; i++) {
820 		if(d1->rr_len[i] != d2->rr_len[i] ||
821 			/* no ttl check: d1->rr_ttl[i] != d2->rr_ttl[i] ||*/
822 			memcmp(d1->rr_data[i], d2->rr_data[i],
823 				d1->rr_len[i]) != 0)
824 			return 0;
825 	}
826 	return 1;
827 }
828 
829 int
830 reply_equal(struct reply_info* p, struct reply_info* q, struct regional* region)
831 {
832 	size_t i;
833 	if(p->flags != q->flags ||
834 		p->qdcount != q->qdcount ||
835 		/* do not check TTL, this may differ */
836 		/*
837 		p->ttl != q->ttl ||
838 		p->prefetch_ttl != q->prefetch_ttl ||
839 		*/
840 		p->security != q->security ||
841 		p->an_numrrsets != q->an_numrrsets ||
842 		p->ns_numrrsets != q->ns_numrrsets ||
843 		p->ar_numrrsets != q->ar_numrrsets ||
844 		p->rrset_count != q->rrset_count)
845 		return 0;
846 	for(i=0; i<p->rrset_count; i++) {
847 		if(!rrset_equal(p->rrsets[i], q->rrsets[i])) {
848 			if(!rrset_canonical_equal(region, p->rrsets[i],
849 				q->rrsets[i])) {
850 				regional_free_all(region);
851 				return 0;
852 			}
853 			regional_free_all(region);
854 		}
855 	}
856 	return 1;
857 }
858 
859 void
860 caps_strip_reply(struct reply_info* rep)
861 {
862 	size_t i;
863 	if(!rep) return;
864 	/* see if message is a referral, in which case the additional and
865 	 * NS record cannot be removed */
866 	/* referrals have the AA flag unset (strict check, not elsewhere in
867 	 * unbound, but for 0x20 this is very convenient). */
868 	if(!(rep->flags&BIT_AA))
869 		return;
870 	/* remove the additional section from the reply */
871 	if(rep->ar_numrrsets != 0) {
872 		verbose(VERB_ALGO, "caps fallback: removing additional section");
873 		rep->rrset_count -= rep->ar_numrrsets;
874 		rep->ar_numrrsets = 0;
875 	}
876 	/* is there an NS set in the authority section to remove? */
877 	/* the failure case (Cisco firewalls) only has one rrset in authsec */
878 	for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
879 		struct ub_packed_rrset_key* s = rep->rrsets[i];
880 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS) {
881 			/* remove NS rrset and break from loop (loop limits
882 			 * have changed) */
883 			/* move last rrset into this position (there is no
884 			 * additional section any more) */
885 			verbose(VERB_ALGO, "caps fallback: removing NS rrset");
886 			if(i < rep->rrset_count-1)
887 				rep->rrsets[i]=rep->rrsets[rep->rrset_count-1];
888 			rep->rrset_count --;
889 			rep->ns_numrrsets --;
890 			break;
891 		}
892 	}
893 }
894 
895 int caps_failed_rcode(struct reply_info* rep)
896 {
897 	return !(FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR ||
898 		FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NXDOMAIN);
899 }
900 
901 void
902 iter_store_parentside_rrset(struct module_env* env,
903 	struct ub_packed_rrset_key* rrset)
904 {
905 	struct rrset_ref ref;
906 	rrset = packed_rrset_copy_alloc(rrset, env->alloc, *env->now);
907 	if(!rrset) {
908 		log_err("malloc failure in store_parentside_rrset");
909 		return;
910 	}
911 	rrset->rk.flags |= PACKED_RRSET_PARENT_SIDE;
912 	rrset->entry.hash = rrset_key_hash(&rrset->rk);
913 	ref.key = rrset;
914 	ref.id = rrset->id;
915 	/* ignore ret: if it was in the cache, ref updated */
916 	(void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, *env->now);
917 }
918 
919 /** fetch NS record from reply, if any */
920 static struct ub_packed_rrset_key*
921 reply_get_NS_rrset(struct reply_info* rep)
922 {
923 	size_t i;
924 	for(i=0; i<rep->rrset_count; i++) {
925 		if(rep->rrsets[i]->rk.type == htons(LDNS_RR_TYPE_NS)) {
926 			return rep->rrsets[i];
927 		}
928 	}
929 	return NULL;
930 }
931 
932 void
933 iter_store_parentside_NS(struct module_env* env, struct reply_info* rep)
934 {
935 	struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
936 	if(rrset) {
937 		log_rrset_key(VERB_ALGO, "store parent-side NS", rrset);
938 		iter_store_parentside_rrset(env, rrset);
939 	}
940 }
941 
942 void iter_store_parentside_neg(struct module_env* env,
943         struct query_info* qinfo, struct reply_info* rep)
944 {
945 	/* TTL: NS from referral in iq->deleg_msg,
946 	 *      or first RR from iq->response,
947 	 *      or servfail5secs if !iq->response */
948 	time_t ttl = NORR_TTL;
949 	struct ub_packed_rrset_key* neg;
950 	struct packed_rrset_data* newd;
951 	if(rep) {
952 		struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
953 		if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0];
954 		if(rrset) ttl = ub_packed_rrset_ttl(rrset);
955 	}
956 	/* create empty rrset to store */
957 	neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch,
958 	                sizeof(struct ub_packed_rrset_key));
959 	if(!neg) {
960 		log_err("out of memory in store_parentside_neg");
961 		return;
962 	}
963 	memset(&neg->entry, 0, sizeof(neg->entry));
964 	neg->entry.key = neg;
965 	neg->rk.type = htons(qinfo->qtype);
966 	neg->rk.rrset_class = htons(qinfo->qclass);
967 	neg->rk.flags = 0;
968 	neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname,
969 		qinfo->qname_len);
970 	if(!neg->rk.dname) {
971 		log_err("out of memory in store_parentside_neg");
972 		return;
973 	}
974 	neg->rk.dname_len = qinfo->qname_len;
975 	neg->entry.hash = rrset_key_hash(&neg->rk);
976 	newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch,
977 		sizeof(struct packed_rrset_data) + sizeof(size_t) +
978 		sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t));
979 	if(!newd) {
980 		log_err("out of memory in store_parentside_neg");
981 		return;
982 	}
983 	neg->entry.data = newd;
984 	newd->ttl = ttl;
985 	/* entry must have one RR, otherwise not valid in cache.
986 	 * put in one RR with empty rdata: those are ignored as nameserver */
987 	newd->count = 1;
988 	newd->rrsig_count = 0;
989 	newd->trust = rrset_trust_ans_noAA;
990 	newd->rr_len = (size_t*)((uint8_t*)newd +
991 		sizeof(struct packed_rrset_data));
992 	newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t);
993 	packed_rrset_ptr_fixup(newd);
994 	newd->rr_ttl[0] = newd->ttl;
995 	sldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */);
996 	/* store it */
997 	log_rrset_key(VERB_ALGO, "store parent-side negative", neg);
998 	iter_store_parentside_rrset(env, neg);
999 }
1000 
1001 int
1002 iter_lookup_parent_NS_from_cache(struct module_env* env, struct delegpt* dp,
1003 	struct regional* region, struct query_info* qinfo)
1004 {
1005 	struct ub_packed_rrset_key* akey;
1006 	akey = rrset_cache_lookup(env->rrset_cache, dp->name,
1007 		dp->namelen, LDNS_RR_TYPE_NS, qinfo->qclass,
1008 		PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1009 	if(akey) {
1010 		log_rrset_key(VERB_ALGO, "found parent-side NS in cache", akey);
1011 		dp->has_parent_side_NS = 1;
1012 		/* and mark the new names as lame */
1013 		if(!delegpt_rrset_add_ns(dp, region, akey, 1)) {
1014 			lock_rw_unlock(&akey->entry.lock);
1015 			return 0;
1016 		}
1017 		lock_rw_unlock(&akey->entry.lock);
1018 	}
1019 	return 1;
1020 }
1021 
1022 int iter_lookup_parent_glue_from_cache(struct module_env* env,
1023         struct delegpt* dp, struct regional* region, struct query_info* qinfo)
1024 {
1025 	struct ub_packed_rrset_key* akey;
1026 	struct delegpt_ns* ns;
1027 	size_t num = delegpt_count_targets(dp);
1028 	for(ns = dp->nslist; ns; ns = ns->next) {
1029 		/* get cached parentside A */
1030 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
1031 			ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
1032 			PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1033 		if(akey) {
1034 			log_rrset_key(VERB_ALGO, "found parent-side", akey);
1035 			ns->done_pside4 = 1;
1036 			/* a negative-cache-element has no addresses it adds */
1037 			if(!delegpt_add_rrset_A(dp, region, akey, 1))
1038 				log_err("malloc failure in lookup_parent_glue");
1039 			lock_rw_unlock(&akey->entry.lock);
1040 		}
1041 		/* get cached parentside AAAA */
1042 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
1043 			ns->namelen, LDNS_RR_TYPE_AAAA, qinfo->qclass,
1044 			PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1045 		if(akey) {
1046 			log_rrset_key(VERB_ALGO, "found parent-side", akey);
1047 			ns->done_pside6 = 1;
1048 			/* a negative-cache-element has no addresses it adds */
1049 			if(!delegpt_add_rrset_AAAA(dp, region, akey, 1))
1050 				log_err("malloc failure in lookup_parent_glue");
1051 			lock_rw_unlock(&akey->entry.lock);
1052 		}
1053 	}
1054 	/* see if new (but lame) addresses have become available */
1055 	return delegpt_count_targets(dp) != num;
1056 }
1057 
1058 int
1059 iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
1060 	uint16_t* c)
1061 {
1062 	uint16_t c1 = *c, c2 = *c;
1063 	int r1 = hints_next_root(hints, &c1);
1064 	int r2 = forwards_next_root(fwd, &c2);
1065 	if(!r1 && !r2) /* got none, end of list */
1066 		return 0;
1067 	else if(!r1) /* got one, return that */
1068 		*c = c2;
1069 	else if(!r2)
1070 		*c = c1;
1071 	else if(c1 < c2) /* got both take smallest */
1072 		*c = c1;
1073 	else	*c = c2;
1074 	return 1;
1075 }
1076 
1077 void
1078 iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns, uint8_t* z)
1079 {
1080 	/* Only the DS record for the delegation itself is expected.
1081 	 * We allow DS for everything between the bailiwick and the
1082 	 * zonecut, thus DS records must be at or above the zonecut.
1083 	 * And the DS records must be below the server authority zone.
1084 	 * The answer section is already scrubbed. */
1085 	size_t i = msg->rep->an_numrrsets;
1086 	while(i < (msg->rep->an_numrrsets + msg->rep->ns_numrrsets)) {
1087 		struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1088 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS &&
1089 			(!ns || !dname_subdomain_c(ns->rk.dname, s->rk.dname)
1090 			|| query_dname_compare(z, s->rk.dname) == 0)) {
1091 			log_nametypeclass(VERB_ALGO, "removing irrelevant DS",
1092 				s->rk.dname, ntohs(s->rk.type),
1093 				ntohs(s->rk.rrset_class));
1094 			memmove(msg->rep->rrsets+i, msg->rep->rrsets+i+1,
1095 				sizeof(struct ub_packed_rrset_key*) *
1096 				(msg->rep->rrset_count-i-1));
1097 			msg->rep->ns_numrrsets--;
1098 			msg->rep->rrset_count--;
1099 			/* stay at same i, but new record */
1100 			continue;
1101 		}
1102 		i++;
1103 	}
1104 }
1105 
1106 void iter_dec_attempts(struct delegpt* dp, int d)
1107 {
1108 	struct delegpt_addr* a;
1109 	for(a=dp->target_list; a; a = a->next_target) {
1110 		if(a->attempts >= OUTBOUND_MSG_RETRY) {
1111 			/* add back to result list */
1112 			a->next_result = dp->result_list;
1113 			dp->result_list = a;
1114 		}
1115 		if(a->attempts > d)
1116 			a->attempts -= d;
1117 		else a->attempts = 0;
1118 	}
1119 }
1120 
1121 void iter_merge_retry_counts(struct delegpt* dp, struct delegpt* old)
1122 {
1123 	struct delegpt_addr* a, *o, *prev;
1124 	for(a=dp->target_list; a; a = a->next_target) {
1125 		o = delegpt_find_addr(old, &a->addr, a->addrlen);
1126 		if(o) {
1127 			log_addr(VERB_ALGO, "copy attempt count previous dp",
1128 				&a->addr, a->addrlen);
1129 			a->attempts = o->attempts;
1130 		}
1131 	}
1132 	prev = NULL;
1133 	a = dp->usable_list;
1134 	while(a) {
1135 		if(a->attempts >= OUTBOUND_MSG_RETRY) {
1136 			log_addr(VERB_ALGO, "remove from usable list dp",
1137 				&a->addr, a->addrlen);
1138 			/* remove from result list */
1139 			if(prev)
1140 				prev->next_usable = a->next_usable;
1141 			else	dp->usable_list = a->next_usable;
1142 			/* prev stays the same */
1143 			a = a->next_usable;
1144 			continue;
1145 		}
1146 		prev = a;
1147 		a = a->next_usable;
1148 	}
1149 }
1150 
1151 int
1152 iter_ds_toolow(struct dns_msg* msg, struct delegpt* dp)
1153 {
1154 	/* if for query example.com, there is example.com SOA or a subdomain
1155 	 * of example.com, then we are too low and need to fetch NS. */
1156 	size_t i;
1157 	/* if we have a DNAME or CNAME we are probably wrong */
1158 	/* if we have a qtype DS in the answer section, its fine */
1159 	for(i=0; i < msg->rep->an_numrrsets; i++) {
1160 		struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1161 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_DNAME ||
1162 			ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME) {
1163 			/* not the right answer, maybe too low, check the
1164 			 * RRSIG signer name (if there is any) for a hint
1165 			 * that it is from the dp zone anyway */
1166 			uint8_t* sname;
1167 			size_t slen;
1168 			val_find_rrset_signer(s, &sname, &slen);
1169 			if(sname && query_dname_compare(dp->name, sname)==0)
1170 				return 0; /* it is fine, from the right dp */
1171 			return 1;
1172 		}
1173 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS)
1174 			return 0; /* fine, we have a DS record */
1175 	}
1176 	for(i=msg->rep->an_numrrsets;
1177 		i < msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) {
1178 		struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1179 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_SOA) {
1180 			if(dname_subdomain_c(s->rk.dname, msg->qinfo.qname))
1181 				return 1; /* point is too low */
1182 			if(query_dname_compare(s->rk.dname, dp->name)==0)
1183 				return 0; /* right dp */
1184 		}
1185 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC ||
1186 			ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC3) {
1187 			uint8_t* sname;
1188 			size_t slen;
1189 			val_find_rrset_signer(s, &sname, &slen);
1190 			if(sname && query_dname_compare(dp->name, sname)==0)
1191 				return 0; /* it is fine, from the right dp */
1192 			return 1;
1193 		}
1194 	}
1195 	/* we do not know */
1196 	return 1;
1197 }
1198 
1199 int iter_dp_cangodown(struct query_info* qinfo, struct delegpt* dp)
1200 {
1201 	/* no delegation point, do not see how we can go down,
1202 	 * robust check, it should really exist */
1203 	if(!dp) return 0;
1204 
1205 	/* see if dp equals the qname, then we cannot go down further */
1206 	if(query_dname_compare(qinfo->qname, dp->name) == 0)
1207 		return 0;
1208 	/* if dp is one label above the name we also cannot go down further */
1209 	if(dname_count_labels(qinfo->qname) == dp->namelabs+1)
1210 		return 0;
1211 	return 1;
1212 }
1213