xref: /freebsd/contrib/unbound/iterator/iter_utils.c (revision 8c2f6c3be0125142d3c1782e4b0ee0634c584b9e)
1 /*
2  * iterator/iter_utils.c - iterative resolver module utility functions.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains functions to assist the iterator module.
40  * Configuration options. Forward zones.
41  */
42 #include "config.h"
43 #include "iterator/iter_utils.h"
44 #include "iterator/iterator.h"
45 #include "iterator/iter_hints.h"
46 #include "iterator/iter_fwd.h"
47 #include "iterator/iter_donotq.h"
48 #include "iterator/iter_delegpt.h"
49 #include "iterator/iter_priv.h"
50 #include "services/cache/infra.h"
51 #include "services/cache/dns.h"
52 #include "services/cache/rrset.h"
53 #include "services/outside_network.h"
54 #include "util/net_help.h"
55 #include "util/module.h"
56 #include "util/log.h"
57 #include "util/config_file.h"
58 #include "util/regional.h"
59 #include "util/data/msgparse.h"
60 #include "util/data/dname.h"
61 #include "util/random.h"
62 #include "util/fptr_wlist.h"
63 #include "validator/val_anchor.h"
64 #include "validator/val_kcache.h"
65 #include "validator/val_kentry.h"
66 #include "validator/val_utils.h"
67 #include "validator/val_sigcrypt.h"
68 #include "sldns/sbuffer.h"
69 #include "sldns/str2wire.h"
70 
71 /** time when nameserver glue is said to be 'recent' */
72 #define SUSPICION_RECENT_EXPIRY 86400
73 
74 /** if NAT64 is enabled and no NAT64 prefix is configured, first fall back to
75  * DNS64 prefix.  If that is not configured, fall back to this default value.
76  */
77 static const char DEFAULT_NAT64_PREFIX[] = "64:ff9b::/96";
78 
79 /** fillup fetch policy array */
80 static void
81 fetch_fill(struct iter_env* ie, const char* str)
82 {
83 	char* s = (char*)str, *e;
84 	int i;
85 	for(i=0; i<ie->max_dependency_depth+1; i++) {
86 		ie->target_fetch_policy[i] = strtol(s, &e, 10);
87 		if(s == e)
88 			fatal_exit("cannot parse fetch policy number %s", s);
89 		s = e;
90 	}
91 }
92 
93 /** Read config string that represents the target fetch policy */
94 static int
95 read_fetch_policy(struct iter_env* ie, const char* str)
96 {
97 	int count = cfg_count_numbers(str);
98 	if(count < 1) {
99 		log_err("Cannot parse target fetch policy: \"%s\"", str);
100 		return 0;
101 	}
102 	ie->max_dependency_depth = count - 1;
103 	ie->target_fetch_policy = (int*)calloc(
104 		(size_t)ie->max_dependency_depth+1, sizeof(int));
105 	if(!ie->target_fetch_policy) {
106 		log_err("alloc fetch policy: out of memory");
107 		return 0;
108 	}
109 	fetch_fill(ie, str);
110 	return 1;
111 }
112 
113 /** apply config caps whitelist items to name tree */
114 static int
115 caps_white_apply_cfg(rbtree_type* ntree, struct config_file* cfg)
116 {
117 	struct config_strlist* p;
118 	for(p=cfg->caps_whitelist; p; p=p->next) {
119 		struct name_tree_node* n;
120 		size_t len;
121 		uint8_t* nm = sldns_str2wire_dname(p->str, &len);
122 		if(!nm) {
123 			log_err("could not parse %s", p->str);
124 			return 0;
125 		}
126 		n = (struct name_tree_node*)calloc(1, sizeof(*n));
127 		if(!n) {
128 			log_err("out of memory");
129 			free(nm);
130 			return 0;
131 		}
132 		n->node.key = n;
133 		n->name = nm;
134 		n->len = len;
135 		n->labs = dname_count_labels(nm);
136 		n->dclass = LDNS_RR_CLASS_IN;
137 		if(!name_tree_insert(ntree, n, nm, len, n->labs, n->dclass)) {
138 			/* duplicate element ignored, idempotent */
139 			free(n->name);
140 			free(n);
141 		}
142 	}
143 	name_tree_init_parents(ntree);
144 	return 1;
145 }
146 
147 int
148 iter_apply_cfg(struct iter_env* iter_env, struct config_file* cfg)
149 {
150 	const char *nat64_prefix;
151 	int i;
152 	/* target fetch policy */
153 	if(!read_fetch_policy(iter_env, cfg->target_fetch_policy))
154 		return 0;
155 	for(i=0; i<iter_env->max_dependency_depth+1; i++)
156 		verbose(VERB_QUERY, "target fetch policy for level %d is %d",
157 			i, iter_env->target_fetch_policy[i]);
158 
159 	if(!iter_env->donotq)
160 		iter_env->donotq = donotq_create();
161 	if(!iter_env->donotq || !donotq_apply_cfg(iter_env->donotq, cfg)) {
162 		log_err("Could not set donotqueryaddresses");
163 		return 0;
164 	}
165 	if(!iter_env->priv)
166 		iter_env->priv = priv_create();
167 	if(!iter_env->priv || !priv_apply_cfg(iter_env->priv, cfg)) {
168 		log_err("Could not set private addresses");
169 		return 0;
170 	}
171 	if(cfg->caps_whitelist) {
172 		if(!iter_env->caps_white)
173 			iter_env->caps_white = rbtree_create(name_tree_compare);
174 		if(!iter_env->caps_white || !caps_white_apply_cfg(
175 			iter_env->caps_white, cfg)) {
176 			log_err("Could not set capsforid whitelist");
177 			return 0;
178 		}
179 
180 	}
181 
182 	nat64_prefix = cfg->nat64_prefix;
183 	if(!nat64_prefix)
184 		nat64_prefix = cfg->dns64_prefix;
185 	if(!nat64_prefix)
186 		nat64_prefix = DEFAULT_NAT64_PREFIX;
187 	if(!netblockstrtoaddr(nat64_prefix, 0, &iter_env->nat64_prefix_addr,
188 		&iter_env->nat64_prefix_addrlen,
189 		&iter_env->nat64_prefix_net)) {
190 		log_err("cannot parse nat64-prefix netblock: %s", nat64_prefix);
191 		return 0;
192 	}
193 	if(!addr_is_ip6(&iter_env->nat64_prefix_addr,
194 		iter_env->nat64_prefix_addrlen)) {
195 		log_err("nat64-prefix is not IPv6: %s", cfg->nat64_prefix);
196 		return 0;
197 	}
198 	if(!prefixnet_is_nat64(iter_env->nat64_prefix_net)) {
199 		log_err("nat64-prefix length it not 32, 40, 48, 56, 64 or 96: %s",
200 			nat64_prefix);
201 		return 0;
202 	}
203 
204 	iter_env->supports_ipv6 = cfg->do_ip6;
205 	iter_env->supports_ipv4 = cfg->do_ip4;
206 	iter_env->use_nat64 = cfg->do_nat64;
207 	iter_env->outbound_msg_retry = cfg->outbound_msg_retry;
208 	iter_env->max_sent_count = cfg->max_sent_count;
209 	iter_env->max_query_restarts = cfg->max_query_restarts;
210 	return 1;
211 }
212 
213 /** filter out unsuitable targets
214  * @param iter_env: iterator environment with ipv6-support flag.
215  * @param env: module environment with infra cache.
216  * @param name: zone name
217  * @param namelen: length of name
218  * @param qtype: query type (host order).
219  * @param now: current time
220  * @param a: address in delegation point we are examining.
221  * @return an integer that signals the target suitability.
222  *	as follows:
223  *	-1: The address should be omitted from the list.
224  *	    Because:
225  *		o The address is bogus (DNSSEC validation failure).
226  *		o Listed as donotquery
227  *		o is ipv6 but no ipv6 support (in operating system).
228  *		o is ipv4 but no ipv4 support (in operating system).
229  *		o is lame
230  *	Otherwise, an rtt in milliseconds.
231  *	0 .. USEFUL_SERVER_TOP_TIMEOUT-1
232  *		The roundtrip time timeout estimate. less than 2 minutes.
233  *		Note that util/rtt.c has a MIN_TIMEOUT of 50 msec, thus
234  *		values 0 .. 49 are not used, unless that is changed.
235  *	USEFUL_SERVER_TOP_TIMEOUT
236  *		This value exactly is given for unresponsive blacklisted.
237  *	USEFUL_SERVER_TOP_TIMEOUT+1
238  *		For non-blacklisted servers: huge timeout, but has traffic.
239  *	USEFUL_SERVER_TOP_TIMEOUT*1 ..
240  *		parent-side lame servers get this penalty. A dispreferential
241  *		server. (lame in delegpt).
242  *	USEFUL_SERVER_TOP_TIMEOUT*2 ..
243  *		dnsseclame servers get penalty
244  *	USEFUL_SERVER_TOP_TIMEOUT*3 ..
245  *		recursion lame servers get penalty
246  *	UNKNOWN_SERVER_NICENESS
247  *		If no information is known about the server, this is
248  *		returned. 376 msec or so.
249  *	+BLACKLIST_PENALTY (of USEFUL_TOP_TIMEOUT*4) for dnssec failed IPs.
250  *
251  * When a final value is chosen that is dnsseclame ; dnsseclameness checking
252  * is turned off (so we do not discard the reply).
253  * When a final value is chosen that is recursionlame; RD bit is set on query.
254  * Because of the numbers this means recursionlame also have dnssec lameness
255  * checking turned off.
256  */
257 static int
258 iter_filter_unsuitable(struct iter_env* iter_env, struct module_env* env,
259 	uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
260 	struct delegpt_addr* a)
261 {
262 	int rtt, lame, reclame, dnsseclame;
263 	if(a->bogus)
264 		return -1; /* address of server is bogus */
265 	if(donotq_lookup(iter_env->donotq, &a->addr, a->addrlen)) {
266 		log_addr(VERB_ALGO, "skip addr on the donotquery list",
267 			&a->addr, a->addrlen);
268 		return -1; /* server is on the donotquery list */
269 	}
270 	if(!iter_env->supports_ipv6 && addr_is_ip6(&a->addr, a->addrlen)) {
271 		return -1; /* there is no ip6 available */
272 	}
273 	if(!iter_env->supports_ipv4 && !iter_env->use_nat64 &&
274 	   !addr_is_ip6(&a->addr, a->addrlen)) {
275 		return -1; /* there is no ip4 available */
276 	}
277 	/* check lameness - need zone , class info */
278 	if(infra_get_lame_rtt(env->infra_cache, &a->addr, a->addrlen,
279 		name, namelen, qtype, &lame, &dnsseclame, &reclame,
280 		&rtt, now)) {
281 		log_addr(VERB_ALGO, "servselect", &a->addr, a->addrlen);
282 		verbose(VERB_ALGO, "   rtt=%d%s%s%s%s", rtt,
283 			lame?" LAME":"",
284 			dnsseclame?" DNSSEC_LAME":"",
285 			reclame?" REC_LAME":"",
286 			a->lame?" ADDR_LAME":"");
287 		if(lame)
288 			return -1; /* server is lame */
289 		else if(rtt >= USEFUL_SERVER_TOP_TIMEOUT)
290 			/* server is unresponsive,
291 			 * we used to return TOP_TIMEOUT, but fairly useless,
292 			 * because if == TOP_TIMEOUT is dropped because
293 			 * blacklisted later, instead, remove it here, so
294 			 * other choices (that are not blacklisted) can be
295 			 * tried */
296 			return -1;
297 		/* select remainder from worst to best */
298 		else if(reclame)
299 			return rtt+USEFUL_SERVER_TOP_TIMEOUT*3; /* nonpref */
300 		else if(dnsseclame || a->dnsseclame)
301 			return rtt+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */
302 		else if(a->lame)
303 			return rtt+USEFUL_SERVER_TOP_TIMEOUT+1; /* nonpref */
304 		else	return rtt;
305 	}
306 	/* no server information present */
307 	if(a->dnsseclame)
308 		return UNKNOWN_SERVER_NICENESS+USEFUL_SERVER_TOP_TIMEOUT*2; /* nonpref */
309 	else if(a->lame)
310 		return USEFUL_SERVER_TOP_TIMEOUT+1+UNKNOWN_SERVER_NICENESS; /* nonpref */
311 	return UNKNOWN_SERVER_NICENESS;
312 }
313 
314 /** lookup RTT information, and also store fastest rtt (if any) */
315 static int
316 iter_fill_rtt(struct iter_env* iter_env, struct module_env* env,
317 	uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
318 	struct delegpt* dp, int* best_rtt, struct sock_list* blacklist,
319 	size_t* num_suitable_results)
320 {
321 	int got_it = 0;
322 	struct delegpt_addr* a;
323 	*num_suitable_results = 0;
324 
325 	if(dp->bogus)
326 		return 0; /* NS bogus, all bogus, nothing found */
327 	for(a=dp->result_list; a; a = a->next_result) {
328 		a->sel_rtt = iter_filter_unsuitable(iter_env, env,
329 			name, namelen, qtype, now, a);
330 		if(a->sel_rtt != -1) {
331 			if(sock_list_find(blacklist, &a->addr, a->addrlen))
332 				a->sel_rtt += BLACKLIST_PENALTY;
333 
334 			if(!got_it) {
335 				*best_rtt = a->sel_rtt;
336 				got_it = 1;
337 			} else if(a->sel_rtt < *best_rtt) {
338 				*best_rtt = a->sel_rtt;
339 			}
340 			(*num_suitable_results)++;
341 		}
342 	}
343 	return got_it;
344 }
345 
346 /** compare two rtts, return -1, 0 or 1 */
347 static int
348 rtt_compare(const void* x, const void* y)
349 {
350 	if(*(int*)x == *(int*)y)
351 		return 0;
352 	if(*(int*)x > *(int*)y)
353 		return 1;
354 	return -1;
355 }
356 
357 /** get RTT for the Nth fastest server */
358 static int
359 nth_rtt(struct delegpt_addr* result_list, size_t num_results, size_t n)
360 {
361 	int rtt_band;
362 	size_t i;
363 	int* rtt_list, *rtt_index;
364 
365 	if(num_results < 1 || n >= num_results) {
366 		return -1;
367 	}
368 
369 	rtt_list = calloc(num_results, sizeof(int));
370 	if(!rtt_list) {
371 		log_err("malloc failure: allocating rtt_list");
372 		return -1;
373 	}
374 	rtt_index = rtt_list;
375 
376 	for(i=0; i<num_results && result_list; i++) {
377 		if(result_list->sel_rtt != -1) {
378 			*rtt_index = result_list->sel_rtt;
379 			rtt_index++;
380 		}
381 		result_list=result_list->next_result;
382 	}
383 	qsort(rtt_list, num_results, sizeof(*rtt_list), rtt_compare);
384 
385 	log_assert(n > 0);
386 	rtt_band = rtt_list[n-1];
387 	free(rtt_list);
388 
389 	return rtt_band;
390 }
391 
392 /** filter the address list, putting best targets at front,
393  * returns number of best targets (or 0, no suitable targets) */
394 static int
395 iter_filter_order(struct iter_env* iter_env, struct module_env* env,
396 	uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
397 	struct delegpt* dp, int* selected_rtt, int open_target,
398 	struct sock_list* blacklist, time_t prefetch)
399 {
400 	int got_num = 0, low_rtt = 0, swap_to_front, rtt_band = RTT_BAND, nth;
401 	int alllame = 0;
402 	size_t num_results;
403 	struct delegpt_addr* a, *n, *prev=NULL;
404 
405 	/* fillup sel_rtt and find best rtt in the bunch */
406 	got_num = iter_fill_rtt(iter_env, env, name, namelen, qtype, now, dp,
407 		&low_rtt, blacklist, &num_results);
408 	if(got_num == 0)
409 		return 0;
410 	if(low_rtt >= USEFUL_SERVER_TOP_TIMEOUT &&
411 		/* If all missing (or not fully resolved) targets are lame,
412 		 * then use the remaining lame address. */
413 		((delegpt_count_missing_targets(dp, &alllame) > 0 && !alllame) ||
414 		open_target > 0)) {
415 		verbose(VERB_ALGO, "Bad choices, trying to get more choice");
416 		return 0; /* we want more choice. The best choice is a bad one.
417 			     return 0 to force the caller to fetch more */
418 	}
419 
420 	if(env->cfg->fast_server_permil != 0 && prefetch == 0 &&
421 		num_results > env->cfg->fast_server_num &&
422 		ub_random_max(env->rnd, 1000) < env->cfg->fast_server_permil) {
423 		/* the query is not prefetch, but for a downstream client,
424 		 * there are more servers available then the fastest N we want
425 		 * to choose from. Limit our choice to the fastest servers. */
426 		nth = nth_rtt(dp->result_list, num_results,
427 			env->cfg->fast_server_num);
428 		if(nth > 0) {
429 			rtt_band = nth - low_rtt;
430 			if(rtt_band > RTT_BAND)
431 				rtt_band = RTT_BAND;
432 		}
433 	}
434 
435 	got_num = 0;
436 	a = dp->result_list;
437 	while(a) {
438 		/* skip unsuitable targets */
439 		if(a->sel_rtt == -1) {
440 			prev = a;
441 			a = a->next_result;
442 			continue;
443 		}
444 		/* classify the server address and determine what to do */
445 		swap_to_front = 0;
446 		if(a->sel_rtt >= low_rtt && a->sel_rtt - low_rtt <= rtt_band) {
447 			got_num++;
448 			swap_to_front = 1;
449 		} else if(a->sel_rtt<low_rtt && low_rtt-a->sel_rtt<=rtt_band) {
450 			got_num++;
451 			swap_to_front = 1;
452 		}
453 		/* swap to front if necessary, or move to next result */
454 		if(swap_to_front && prev) {
455 			n = a->next_result;
456 			prev->next_result = n;
457 			a->next_result = dp->result_list;
458 			dp->result_list = a;
459 			a = n;
460 		} else {
461 			prev = a;
462 			a = a->next_result;
463 		}
464 	}
465 	*selected_rtt = low_rtt;
466 
467 	if (env->cfg->prefer_ip6) {
468 		int got_num6 = 0;
469 		int low_rtt6 = 0;
470 		int i;
471 		int attempt = -1; /* filter to make sure addresses have
472 		  less attempts on them than the first, to force round
473 		  robin when all the IPv6 addresses fail */
474 		int num4ok = 0; /* number ip4 at low attempt count */
475 		int num4_lowrtt = 0;
476 		prev = NULL;
477 		a = dp->result_list;
478 		for(i = 0; i < got_num; i++) {
479 			if(!a) break; /* robustness */
480 			swap_to_front = 0;
481 			if(a->addr.ss_family != AF_INET6 && attempt == -1) {
482 				/* if we only have ip4 at low attempt count,
483 				 * then ip6 is failing, and we need to
484 				 * select one of the remaining IPv4 addrs */
485 				attempt = a->attempts;
486 				num4ok++;
487 				num4_lowrtt = a->sel_rtt;
488 			} else if(a->addr.ss_family != AF_INET6 && attempt == a->attempts) {
489 				num4ok++;
490 				if(num4_lowrtt == 0 || a->sel_rtt < num4_lowrtt) {
491 					num4_lowrtt = a->sel_rtt;
492 				}
493 			}
494 			if(a->addr.ss_family == AF_INET6) {
495 				if(attempt == -1) {
496 					attempt = a->attempts;
497 				} else if(a->attempts > attempt) {
498 					break;
499 				}
500 				got_num6++;
501 				swap_to_front = 1;
502 				if(low_rtt6 == 0 || a->sel_rtt < low_rtt6) {
503 					low_rtt6 = a->sel_rtt;
504 				}
505 			}
506 			/* swap to front if IPv6, or move to next result */
507 			if(swap_to_front && prev) {
508 				n = a->next_result;
509 				prev->next_result = n;
510 				a->next_result = dp->result_list;
511 				dp->result_list = a;
512 				a = n;
513 			} else {
514 				prev = a;
515 				a = a->next_result;
516 			}
517 		}
518 		if(got_num6 > 0) {
519 			got_num = got_num6;
520 			*selected_rtt = low_rtt6;
521 		} else if(num4ok > 0) {
522 			got_num = num4ok;
523 			*selected_rtt = num4_lowrtt;
524 		}
525 	} else if (env->cfg->prefer_ip4) {
526 		int got_num4 = 0;
527 		int low_rtt4 = 0;
528 		int i;
529 		int attempt = -1; /* filter to make sure addresses have
530 		  less attempts on them than the first, to force round
531 		  robin when all the IPv4 addresses fail */
532 		int num6ok = 0; /* number ip6 at low attempt count */
533 		int num6_lowrtt = 0;
534 		prev = NULL;
535 		a = dp->result_list;
536 		for(i = 0; i < got_num; i++) {
537 			if(!a) break; /* robustness */
538 			swap_to_front = 0;
539 			if(a->addr.ss_family != AF_INET && attempt == -1) {
540 				/* if we only have ip6 at low attempt count,
541 				 * then ip4 is failing, and we need to
542 				 * select one of the remaining IPv6 addrs */
543 				attempt = a->attempts;
544 				num6ok++;
545 				num6_lowrtt = a->sel_rtt;
546 			} else if(a->addr.ss_family != AF_INET && attempt == a->attempts) {
547 				num6ok++;
548 				if(num6_lowrtt == 0 || a->sel_rtt < num6_lowrtt) {
549 					num6_lowrtt = a->sel_rtt;
550 				}
551 			}
552 			if(a->addr.ss_family == AF_INET) {
553 				if(attempt == -1) {
554 					attempt = a->attempts;
555 				} else if(a->attempts > attempt) {
556 					break;
557 				}
558 				got_num4++;
559 				swap_to_front = 1;
560 				if(low_rtt4 == 0 || a->sel_rtt < low_rtt4) {
561 					low_rtt4 = a->sel_rtt;
562 				}
563 			}
564 			/* swap to front if IPv4, or move to next result */
565 			if(swap_to_front && prev) {
566 				n = a->next_result;
567 				prev->next_result = n;
568 				a->next_result = dp->result_list;
569 				dp->result_list = a;
570 				a = n;
571 			} else {
572 				prev = a;
573 				a = a->next_result;
574 			}
575 		}
576 		if(got_num4 > 0) {
577 			got_num = got_num4;
578 			*selected_rtt = low_rtt4;
579 		} else if(num6ok > 0) {
580 			got_num = num6ok;
581 			*selected_rtt = num6_lowrtt;
582 		}
583 	}
584 	return got_num;
585 }
586 
587 struct delegpt_addr*
588 iter_server_selection(struct iter_env* iter_env,
589 	struct module_env* env, struct delegpt* dp,
590 	uint8_t* name, size_t namelen, uint16_t qtype, int* dnssec_lame,
591 	int* chase_to_rd, int open_target, struct sock_list* blacklist,
592 	time_t prefetch)
593 {
594 	int sel;
595 	int selrtt;
596 	struct delegpt_addr* a, *prev;
597 	int num = iter_filter_order(iter_env, env, name, namelen, qtype,
598 		*env->now, dp, &selrtt, open_target, blacklist, prefetch);
599 
600 	if(num == 0)
601 		return NULL;
602 	verbose(VERB_ALGO, "selrtt %d", selrtt);
603 	if(selrtt > BLACKLIST_PENALTY) {
604 		if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*3) {
605 			verbose(VERB_ALGO, "chase to "
606 				"blacklisted recursion lame server");
607 			*chase_to_rd = 1;
608 		}
609 		if(selrtt-BLACKLIST_PENALTY > USEFUL_SERVER_TOP_TIMEOUT*2) {
610 			verbose(VERB_ALGO, "chase to "
611 				"blacklisted dnssec lame server");
612 			*dnssec_lame = 1;
613 		}
614 	} else {
615 		if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*3) {
616 			verbose(VERB_ALGO, "chase to recursion lame server");
617 			*chase_to_rd = 1;
618 		}
619 		if(selrtt > USEFUL_SERVER_TOP_TIMEOUT*2) {
620 			verbose(VERB_ALGO, "chase to dnssec lame server");
621 			*dnssec_lame = 1;
622 		}
623 		if(selrtt == USEFUL_SERVER_TOP_TIMEOUT) {
624 			verbose(VERB_ALGO, "chase to blacklisted lame server");
625 			return NULL;
626 		}
627 	}
628 
629 	if(num == 1) {
630 		a = dp->result_list;
631 		if(++a->attempts < iter_env->outbound_msg_retry)
632 			return a;
633 		dp->result_list = a->next_result;
634 		return a;
635 	}
636 
637 	/* randomly select a target from the list */
638 	log_assert(num > 1);
639 	/* grab secure random number, to pick unexpected server.
640 	 * also we need it to be threadsafe. */
641 	sel = ub_random_max(env->rnd, num);
642 	a = dp->result_list;
643 	prev = NULL;
644 	while(sel > 0 && a) {
645 		prev = a;
646 		a = a->next_result;
647 		sel--;
648 	}
649 	if(!a)  /* robustness */
650 		return NULL;
651 	if(++a->attempts < iter_env->outbound_msg_retry)
652 		return a;
653 	/* remove it from the delegation point result list */
654 	if(prev)
655 		prev->next_result = a->next_result;
656 	else	dp->result_list = a->next_result;
657 	return a;
658 }
659 
660 struct dns_msg*
661 dns_alloc_msg(sldns_buffer* pkt, struct msg_parse* msg,
662 	struct regional* region)
663 {
664 	struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
665 		sizeof(struct dns_msg));
666 	if(!m)
667 		return NULL;
668 	memset(m, 0, sizeof(*m));
669 	if(!parse_create_msg(pkt, msg, NULL, &m->qinfo, &m->rep, region)) {
670 		log_err("malloc failure: allocating incoming dns_msg");
671 		return NULL;
672 	}
673 	return m;
674 }
675 
676 struct dns_msg*
677 dns_copy_msg(struct dns_msg* from, struct regional* region)
678 {
679 	struct dns_msg* m = (struct dns_msg*)regional_alloc(region,
680 		sizeof(struct dns_msg));
681 	if(!m)
682 		return NULL;
683 	m->qinfo = from->qinfo;
684 	if(!(m->qinfo.qname = regional_alloc_init(region, from->qinfo.qname,
685 		from->qinfo.qname_len)))
686 		return NULL;
687 	if(!(m->rep = reply_info_copy(from->rep, NULL, region)))
688 		return NULL;
689 	return m;
690 }
691 
692 void
693 iter_dns_store(struct module_env* env, struct query_info* msgqinf,
694 	struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
695 	struct regional* region, uint16_t flags, time_t qstarttime)
696 {
697 	if(!dns_cache_store(env, msgqinf, msgrep, is_referral, leeway,
698 		pside, region, flags, qstarttime))
699 		log_err("out of memory: cannot store data in cache");
700 }
701 
702 int
703 iter_ns_probability(struct ub_randstate* rnd, int n, int m)
704 {
705 	int sel;
706 	if(n == m) /* 100% chance */
707 		return 1;
708 	/* we do not need secure random numbers here, but
709 	 * we do need it to be threadsafe, so we use this */
710 	sel = ub_random_max(rnd, m);
711 	return (sel < n);
712 }
713 
714 /** detect dependency cycle for query and target */
715 static int
716 causes_cycle(struct module_qstate* qstate, uint8_t* name, size_t namelen,
717 	uint16_t t, uint16_t c)
718 {
719 	struct query_info qinf;
720 	qinf.qname = name;
721 	qinf.qname_len = namelen;
722 	qinf.qtype = t;
723 	qinf.qclass = c;
724 	qinf.local_alias = NULL;
725 	fptr_ok(fptr_whitelist_modenv_detect_cycle(
726 		qstate->env->detect_cycle));
727 	return (*qstate->env->detect_cycle)(qstate, &qinf,
728 		(uint16_t)(BIT_RD|BIT_CD), qstate->is_priming,
729 		qstate->is_valrec);
730 }
731 
732 void
733 iter_mark_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
734 {
735 	struct delegpt_ns* ns;
736 	for(ns = dp->nslist; ns; ns = ns->next) {
737 		if(ns->resolved)
738 			continue;
739 		/* see if this ns as target causes dependency cycle */
740 		if(causes_cycle(qstate, ns->name, ns->namelen,
741 			LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass) ||
742 		   causes_cycle(qstate, ns->name, ns->namelen,
743 			LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
744 			log_nametypeclass(VERB_QUERY, "skipping target due "
745 			 	"to dependency cycle (harden-glue: no may "
746 				"fix some of the cycles)",
747 				ns->name, LDNS_RR_TYPE_A,
748 				qstate->qinfo.qclass);
749 			ns->resolved = 1;
750 		}
751 	}
752 }
753 
754 void
755 iter_mark_pside_cycle_targets(struct module_qstate* qstate, struct delegpt* dp)
756 {
757 	struct delegpt_ns* ns;
758 	for(ns = dp->nslist; ns; ns = ns->next) {
759 		if(ns->done_pside4 && ns->done_pside6)
760 			continue;
761 		/* see if this ns as target causes dependency cycle */
762 		if(causes_cycle(qstate, ns->name, ns->namelen,
763 			LDNS_RR_TYPE_A, qstate->qinfo.qclass)) {
764 			log_nametypeclass(VERB_QUERY, "skipping target due "
765 			 	"to dependency cycle", ns->name,
766 				LDNS_RR_TYPE_A, qstate->qinfo.qclass);
767 			ns->done_pside4 = 1;
768 		}
769 		if(causes_cycle(qstate, ns->name, ns->namelen,
770 			LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass)) {
771 			log_nametypeclass(VERB_QUERY, "skipping target due "
772 			 	"to dependency cycle", ns->name,
773 				LDNS_RR_TYPE_AAAA, qstate->qinfo.qclass);
774 			ns->done_pside6 = 1;
775 		}
776 	}
777 }
778 
779 int
780 iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
781 	struct delegpt* dp, int supports_ipv4, int supports_ipv6,
782 	int use_nat64)
783 {
784 	struct delegpt_ns* ns;
785 	struct delegpt_addr* a;
786 
787 	if(supports_ipv6 && use_nat64)
788 		supports_ipv4 = 1;
789 
790 	/* check:
791 	 *      o RD qflag is on.
792 	 *      o no addresses are provided.
793 	 *      o all NS items are required glue.
794 	 * OR
795 	 *      o RD qflag is on.
796 	 *      o no addresses are provided.
797 	 *      o the query is for one of the nameservers in dp,
798 	 *        and that nameserver is a glue-name for this dp.
799 	 */
800 	if(!(qflags&BIT_RD))
801 		return 0;
802 	/* either available or unused targets,
803 	 * if they exist, the dp is not useless. */
804 	for(a = dp->usable_list; a; a = a->next_usable) {
805 		if(!addr_is_ip6(&a->addr, a->addrlen) && supports_ipv4)
806 			return 0;
807 		else if(addr_is_ip6(&a->addr, a->addrlen) && supports_ipv6)
808 			return 0;
809 	}
810 	for(a = dp->result_list; a; a = a->next_result) {
811 		if(!addr_is_ip6(&a->addr, a->addrlen) && supports_ipv4)
812 			return 0;
813 		else if(addr_is_ip6(&a->addr, a->addrlen) && supports_ipv6)
814 			return 0;
815 	}
816 
817 	/* see if query is for one of the nameservers, which is glue */
818 	if( ((qinfo->qtype == LDNS_RR_TYPE_A && supports_ipv4) ||
819 		(qinfo->qtype == LDNS_RR_TYPE_AAAA && supports_ipv6)) &&
820 		dname_subdomain_c(qinfo->qname, dp->name) &&
821 		delegpt_find_ns(dp, qinfo->qname, qinfo->qname_len))
822 		return 1;
823 
824 	for(ns = dp->nslist; ns; ns = ns->next) {
825 		if(ns->resolved) /* skip failed targets */
826 			continue;
827 		if(!dname_subdomain_c(ns->name, dp->name))
828 			return 0; /* one address is not required glue */
829 	}
830 	return 1;
831 }
832 
833 int
834 iter_qname_indicates_dnssec(struct module_env* env, struct query_info *qinfo)
835 {
836 	struct trust_anchor* a;
837 	if(!env || !env->anchors || !qinfo || !qinfo->qname)
838 		return 0;
839 	/* a trust anchor exists above the name? */
840 	if((a=anchors_lookup(env->anchors, qinfo->qname, qinfo->qname_len,
841 		qinfo->qclass))) {
842 		if(a->numDS == 0 && a->numDNSKEY == 0) {
843 			/* insecure trust point */
844 			lock_basic_unlock(&a->lock);
845 			return 0;
846 		}
847 		lock_basic_unlock(&a->lock);
848 		return 1;
849 	}
850 	/* no trust anchor above it. */
851 	return 0;
852 }
853 
854 int
855 iter_indicates_dnssec(struct module_env* env, struct delegpt* dp,
856         struct dns_msg* msg, uint16_t dclass)
857 {
858 	struct trust_anchor* a;
859 	/* information not available, !env->anchors can be common */
860 	if(!env || !env->anchors || !dp || !dp->name)
861 		return 0;
862 	/* a trust anchor exists with this name, RRSIGs expected */
863 	if((a=anchor_find(env->anchors, dp->name, dp->namelabs, dp->namelen,
864 		dclass))) {
865 		if(a->numDS == 0 && a->numDNSKEY == 0) {
866 			/* insecure trust point */
867 			lock_basic_unlock(&a->lock);
868 			return 0;
869 		}
870 		lock_basic_unlock(&a->lock);
871 		return 1;
872 	}
873 	/* see if DS rrset was given, in AUTH section */
874 	if(msg && msg->rep &&
875 		reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
876 		LDNS_RR_TYPE_DS, dclass))
877 		return 1;
878 	/* look in key cache */
879 	if(env->key_cache) {
880 		struct key_entry_key* kk = key_cache_obtain(env->key_cache,
881 			dp->name, dp->namelen, dclass, env->scratch, *env->now);
882 		if(kk) {
883 			if(query_dname_compare(kk->name, dp->name) == 0) {
884 			  if(key_entry_isgood(kk) || key_entry_isbad(kk)) {
885 				regional_free_all(env->scratch);
886 				return 1;
887 			  } else if(key_entry_isnull(kk)) {
888 				regional_free_all(env->scratch);
889 				return 0;
890 			  }
891 			}
892 			regional_free_all(env->scratch);
893 		}
894 	}
895 	return 0;
896 }
897 
898 int
899 iter_msg_has_dnssec(struct dns_msg* msg)
900 {
901 	size_t i;
902 	if(!msg || !msg->rep)
903 		return 0;
904 	for(i=0; i<msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) {
905 		if(((struct packed_rrset_data*)msg->rep->rrsets[i]->
906 			entry.data)->rrsig_count > 0)
907 			return 1;
908 	}
909 	/* empty message has no DNSSEC info, with DNSSEC the reply is
910 	 * not empty (NSEC) */
911 	return 0;
912 }
913 
914 int iter_msg_from_zone(struct dns_msg* msg, struct delegpt* dp,
915         enum response_type type, uint16_t dclass)
916 {
917 	if(!msg || !dp || !msg->rep || !dp->name)
918 		return 0;
919 	/* SOA RRset - always from reply zone */
920 	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
921 		LDNS_RR_TYPE_SOA, dclass) ||
922 	   reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
923 		LDNS_RR_TYPE_SOA, dclass))
924 		return 1;
925 	if(type == RESPONSE_TYPE_REFERRAL) {
926 		size_t i;
927 		/* if it adds a single label, i.e. we expect .com,
928 		 * and referral to example.com. NS ... , then origin zone
929 		 * is .com. For a referral to sub.example.com. NS ... then
930 		 * we do not know, since example.com. may be in between. */
931 		for(i=0; i<msg->rep->an_numrrsets+msg->rep->ns_numrrsets;
932 			i++) {
933 			struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
934 			if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS &&
935 				ntohs(s->rk.rrset_class) == dclass) {
936 				int l = dname_count_labels(s->rk.dname);
937 				if(l == dp->namelabs + 1 &&
938 					dname_strict_subdomain(s->rk.dname,
939 					l, dp->name, dp->namelabs))
940 					return 1;
941 			}
942 		}
943 		return 0;
944 	}
945 	log_assert(type==RESPONSE_TYPE_ANSWER || type==RESPONSE_TYPE_CNAME);
946 	/* not a referral, and not lame delegation (upwards), so,
947 	 * any NS rrset must be from the zone itself */
948 	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
949 		LDNS_RR_TYPE_NS, dclass) ||
950 	   reply_find_rrset_section_ns(msg->rep, dp->name, dp->namelen,
951 		LDNS_RR_TYPE_NS, dclass))
952 		return 1;
953 	/* a DNSKEY set is expected at the zone apex as well */
954 	/* this is for 'minimal responses' for DNSKEYs */
955 	if(reply_find_rrset_section_an(msg->rep, dp->name, dp->namelen,
956 		LDNS_RR_TYPE_DNSKEY, dclass))
957 		return 1;
958 	return 0;
959 }
960 
961 /**
962  * check equality of two rrsets
963  * @param k1: rrset
964  * @param k2: rrset
965  * @return true if equal
966  */
967 static int
968 rrset_equal(struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2)
969 {
970 	struct packed_rrset_data* d1 = (struct packed_rrset_data*)
971 		k1->entry.data;
972 	struct packed_rrset_data* d2 = (struct packed_rrset_data*)
973 		k2->entry.data;
974 	size_t i, t;
975 	if(k1->rk.dname_len != k2->rk.dname_len ||
976 		k1->rk.flags != k2->rk.flags ||
977 		k1->rk.type != k2->rk.type ||
978 		k1->rk.rrset_class != k2->rk.rrset_class ||
979 		query_dname_compare(k1->rk.dname, k2->rk.dname) != 0)
980 		return 0;
981 	if(	/* do not check ttl: d1->ttl != d2->ttl || */
982 		d1->count != d2->count ||
983 		d1->rrsig_count != d2->rrsig_count ||
984 		d1->trust != d2->trust ||
985 		d1->security != d2->security)
986 		return 0;
987 	t = d1->count + d1->rrsig_count;
988 	for(i=0; i<t; i++) {
989 		if(d1->rr_len[i] != d2->rr_len[i] ||
990 			/* no ttl check: d1->rr_ttl[i] != d2->rr_ttl[i] ||*/
991 			memcmp(d1->rr_data[i], d2->rr_data[i],
992 				d1->rr_len[i]) != 0)
993 			return 0;
994 	}
995 	return 1;
996 }
997 
998 /** compare rrsets and sort canonically.  Compares rrset name, type, class.
999  * return 0 if equal, +1 if x > y, and -1 if x < y.
1000  */
1001 static int
1002 rrset_canonical_sort_cmp(const void* x, const void* y)
1003 {
1004 	struct ub_packed_rrset_key* rrx = *(struct ub_packed_rrset_key**)x;
1005 	struct ub_packed_rrset_key* rry = *(struct ub_packed_rrset_key**)y;
1006 	int r = dname_canonical_compare(rrx->rk.dname, rry->rk.dname);
1007 	if(r != 0)
1008 		return r;
1009 	if(rrx->rk.type != rry->rk.type) {
1010 		if(ntohs(rrx->rk.type) > ntohs(rry->rk.type))
1011 			return 1;
1012 		else	return -1;
1013 	}
1014 	if(rrx->rk.rrset_class != rry->rk.rrset_class) {
1015 		if(ntohs(rrx->rk.rrset_class) > ntohs(rry->rk.rrset_class))
1016 			return 1;
1017 		else	return -1;
1018 	}
1019 	return 0;
1020 }
1021 
1022 int
1023 reply_equal(struct reply_info* p, struct reply_info* q, struct regional* region)
1024 {
1025 	size_t i;
1026 	struct ub_packed_rrset_key** sorted_p, **sorted_q;
1027 	if(p->flags != q->flags ||
1028 		p->qdcount != q->qdcount ||
1029 		/* do not check TTL, this may differ */
1030 		/*
1031 		p->ttl != q->ttl ||
1032 		p->prefetch_ttl != q->prefetch_ttl ||
1033 		*/
1034 		p->security != q->security ||
1035 		p->an_numrrsets != q->an_numrrsets ||
1036 		p->ns_numrrsets != q->ns_numrrsets ||
1037 		p->ar_numrrsets != q->ar_numrrsets ||
1038 		p->rrset_count != q->rrset_count)
1039 		return 0;
1040 	/* sort the rrsets in the authority and additional sections before
1041 	 * compare, the query and answer sections are ordered in the sequence
1042 	 * they should have (eg. one after the other for aliases). */
1043 	sorted_p = (struct ub_packed_rrset_key**)regional_alloc_init(
1044 		region, p->rrsets, sizeof(*sorted_p)*p->rrset_count);
1045 	if(!sorted_p) return 0;
1046 	log_assert(p->an_numrrsets + p->ns_numrrsets + p->ar_numrrsets <=
1047 		p->rrset_count);
1048 	qsort(sorted_p + p->an_numrrsets, p->ns_numrrsets,
1049 		sizeof(*sorted_p), rrset_canonical_sort_cmp);
1050 	qsort(sorted_p + p->an_numrrsets + p->ns_numrrsets, p->ar_numrrsets,
1051 		sizeof(*sorted_p), rrset_canonical_sort_cmp);
1052 
1053 	sorted_q = (struct ub_packed_rrset_key**)regional_alloc_init(
1054 		region, q->rrsets, sizeof(*sorted_q)*q->rrset_count);
1055 	if(!sorted_q) {
1056 		regional_free_all(region);
1057 		return 0;
1058 	}
1059 	log_assert(q->an_numrrsets + q->ns_numrrsets + q->ar_numrrsets <=
1060 		q->rrset_count);
1061 	qsort(sorted_q + q->an_numrrsets, q->ns_numrrsets,
1062 		sizeof(*sorted_q), rrset_canonical_sort_cmp);
1063 	qsort(sorted_q + q->an_numrrsets + q->ns_numrrsets, q->ar_numrrsets,
1064 		sizeof(*sorted_q), rrset_canonical_sort_cmp);
1065 
1066 	/* compare the rrsets */
1067 	for(i=0; i<p->rrset_count; i++) {
1068 		if(!rrset_equal(sorted_p[i], sorted_q[i])) {
1069 			if(!rrset_canonical_equal(region, sorted_p[i],
1070 				sorted_q[i])) {
1071 				regional_free_all(region);
1072 				return 0;
1073 			}
1074 		}
1075 	}
1076 	regional_free_all(region);
1077 	return 1;
1078 }
1079 
1080 void
1081 caps_strip_reply(struct reply_info* rep)
1082 {
1083 	size_t i;
1084 	if(!rep) return;
1085 	/* see if message is a referral, in which case the additional and
1086 	 * NS record cannot be removed */
1087 	/* referrals have the AA flag unset (strict check, not elsewhere in
1088 	 * unbound, but for 0x20 this is very convenient). */
1089 	if(!(rep->flags&BIT_AA))
1090 		return;
1091 	/* remove the additional section from the reply */
1092 	if(rep->ar_numrrsets != 0) {
1093 		verbose(VERB_ALGO, "caps fallback: removing additional section");
1094 		rep->rrset_count -= rep->ar_numrrsets;
1095 		rep->ar_numrrsets = 0;
1096 	}
1097 	/* is there an NS set in the authority section to remove? */
1098 	/* the failure case (Cisco firewalls) only has one rrset in authsec */
1099 	for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
1100 		struct ub_packed_rrset_key* s = rep->rrsets[i];
1101 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_NS) {
1102 			/* remove NS rrset and break from loop (loop limits
1103 			 * have changed) */
1104 			/* move last rrset into this position (there is no
1105 			 * additional section any more) */
1106 			verbose(VERB_ALGO, "caps fallback: removing NS rrset");
1107 			if(i < rep->rrset_count-1)
1108 				rep->rrsets[i]=rep->rrsets[rep->rrset_count-1];
1109 			rep->rrset_count --;
1110 			rep->ns_numrrsets --;
1111 			break;
1112 		}
1113 	}
1114 }
1115 
1116 int caps_failed_rcode(struct reply_info* rep)
1117 {
1118 	return !(FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NOERROR ||
1119 		FLAGS_GET_RCODE(rep->flags) == LDNS_RCODE_NXDOMAIN);
1120 }
1121 
1122 void
1123 iter_store_parentside_rrset(struct module_env* env,
1124 	struct ub_packed_rrset_key* rrset)
1125 {
1126 	struct rrset_ref ref;
1127 	rrset = packed_rrset_copy_alloc(rrset, env->alloc, *env->now);
1128 	if(!rrset) {
1129 		log_err("malloc failure in store_parentside_rrset");
1130 		return;
1131 	}
1132 	rrset->rk.flags |= PACKED_RRSET_PARENT_SIDE;
1133 	rrset->entry.hash = rrset_key_hash(&rrset->rk);
1134 	ref.key = rrset;
1135 	ref.id = rrset->id;
1136 	/* ignore ret: if it was in the cache, ref updated */
1137 	(void)rrset_cache_update(env->rrset_cache, &ref, env->alloc, *env->now);
1138 }
1139 
1140 /** fetch NS record from reply, if any */
1141 static struct ub_packed_rrset_key*
1142 reply_get_NS_rrset(struct reply_info* rep)
1143 {
1144 	size_t i;
1145 	for(i=0; i<rep->rrset_count; i++) {
1146 		if(rep->rrsets[i]->rk.type == htons(LDNS_RR_TYPE_NS)) {
1147 			return rep->rrsets[i];
1148 		}
1149 	}
1150 	return NULL;
1151 }
1152 
1153 void
1154 iter_store_parentside_NS(struct module_env* env, struct reply_info* rep)
1155 {
1156 	struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
1157 	if(rrset) {
1158 		log_rrset_key(VERB_ALGO, "store parent-side NS", rrset);
1159 		iter_store_parentside_rrset(env, rrset);
1160 	}
1161 }
1162 
1163 void iter_store_parentside_neg(struct module_env* env,
1164         struct query_info* qinfo, struct reply_info* rep)
1165 {
1166 	/* TTL: NS from referral in iq->deleg_msg,
1167 	 *      or first RR from iq->response,
1168 	 *      or servfail5secs if !iq->response */
1169 	time_t ttl = NORR_TTL;
1170 	struct ub_packed_rrset_key* neg;
1171 	struct packed_rrset_data* newd;
1172 	if(rep) {
1173 		struct ub_packed_rrset_key* rrset = reply_get_NS_rrset(rep);
1174 		if(!rrset && rep->rrset_count != 0) rrset = rep->rrsets[0];
1175 		if(rrset) ttl = ub_packed_rrset_ttl(rrset);
1176 	}
1177 	/* create empty rrset to store */
1178 	neg = (struct ub_packed_rrset_key*)regional_alloc(env->scratch,
1179 	                sizeof(struct ub_packed_rrset_key));
1180 	if(!neg) {
1181 		log_err("out of memory in store_parentside_neg");
1182 		return;
1183 	}
1184 	memset(&neg->entry, 0, sizeof(neg->entry));
1185 	neg->entry.key = neg;
1186 	neg->rk.type = htons(qinfo->qtype);
1187 	neg->rk.rrset_class = htons(qinfo->qclass);
1188 	neg->rk.flags = 0;
1189 	neg->rk.dname = regional_alloc_init(env->scratch, qinfo->qname,
1190 		qinfo->qname_len);
1191 	if(!neg->rk.dname) {
1192 		log_err("out of memory in store_parentside_neg");
1193 		return;
1194 	}
1195 	neg->rk.dname_len = qinfo->qname_len;
1196 	neg->entry.hash = rrset_key_hash(&neg->rk);
1197 	newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch,
1198 		sizeof(struct packed_rrset_data) + sizeof(size_t) +
1199 		sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t));
1200 	if(!newd) {
1201 		log_err("out of memory in store_parentside_neg");
1202 		return;
1203 	}
1204 	neg->entry.data = newd;
1205 	newd->ttl = ttl;
1206 	/* entry must have one RR, otherwise not valid in cache.
1207 	 * put in one RR with empty rdata: those are ignored as nameserver */
1208 	newd->count = 1;
1209 	newd->rrsig_count = 0;
1210 	newd->trust = rrset_trust_ans_noAA;
1211 	newd->rr_len = (size_t*)((uint8_t*)newd +
1212 		sizeof(struct packed_rrset_data));
1213 	newd->rr_len[0] = 0 /* zero len rdata */ + sizeof(uint16_t);
1214 	packed_rrset_ptr_fixup(newd);
1215 	newd->rr_ttl[0] = newd->ttl;
1216 	sldns_write_uint16(newd->rr_data[0], 0 /* zero len rdata */);
1217 	/* store it */
1218 	log_rrset_key(VERB_ALGO, "store parent-side negative", neg);
1219 	iter_store_parentside_rrset(env, neg);
1220 }
1221 
1222 int
1223 iter_lookup_parent_NS_from_cache(struct module_env* env, struct delegpt* dp,
1224 	struct regional* region, struct query_info* qinfo)
1225 {
1226 	struct ub_packed_rrset_key* akey;
1227 	akey = rrset_cache_lookup(env->rrset_cache, dp->name,
1228 		dp->namelen, LDNS_RR_TYPE_NS, qinfo->qclass,
1229 		PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1230 	if(akey) {
1231 		log_rrset_key(VERB_ALGO, "found parent-side NS in cache", akey);
1232 		dp->has_parent_side_NS = 1;
1233 		/* and mark the new names as lame */
1234 		if(!delegpt_rrset_add_ns(dp, region, akey, 1)) {
1235 			lock_rw_unlock(&akey->entry.lock);
1236 			return 0;
1237 		}
1238 		lock_rw_unlock(&akey->entry.lock);
1239 	}
1240 	return 1;
1241 }
1242 
1243 int iter_lookup_parent_glue_from_cache(struct module_env* env,
1244         struct delegpt* dp, struct regional* region, struct query_info* qinfo)
1245 {
1246 	struct ub_packed_rrset_key* akey;
1247 	struct delegpt_ns* ns;
1248 	size_t num = delegpt_count_targets(dp);
1249 	for(ns = dp->nslist; ns; ns = ns->next) {
1250 		if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE)
1251 			continue;
1252 		ns->cache_lookup_count++;
1253 		/* get cached parentside A */
1254 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
1255 			ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
1256 			PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1257 		if(akey) {
1258 			log_rrset_key(VERB_ALGO, "found parent-side", akey);
1259 			ns->done_pside4 = 1;
1260 			/* a negative-cache-element has no addresses it adds */
1261 			if(!delegpt_add_rrset_A(dp, region, akey, 1, NULL))
1262 				log_err("malloc failure in lookup_parent_glue");
1263 			lock_rw_unlock(&akey->entry.lock);
1264 		}
1265 		/* get cached parentside AAAA */
1266 		akey = rrset_cache_lookup(env->rrset_cache, ns->name,
1267 			ns->namelen, LDNS_RR_TYPE_AAAA, qinfo->qclass,
1268 			PACKED_RRSET_PARENT_SIDE, *env->now, 0);
1269 		if(akey) {
1270 			log_rrset_key(VERB_ALGO, "found parent-side", akey);
1271 			ns->done_pside6 = 1;
1272 			/* a negative-cache-element has no addresses it adds */
1273 			if(!delegpt_add_rrset_AAAA(dp, region, akey, 1, NULL))
1274 				log_err("malloc failure in lookup_parent_glue");
1275 			lock_rw_unlock(&akey->entry.lock);
1276 		}
1277 	}
1278 	/* see if new (but lame) addresses have become available */
1279 	return delegpt_count_targets(dp) != num;
1280 }
1281 
1282 int
1283 iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
1284 	uint16_t* c)
1285 {
1286 	uint16_t c1 = *c, c2 = *c;
1287 	int r1 = hints_next_root(hints, &c1);
1288 	int r2 = forwards_next_root(fwd, &c2);
1289 	if(!r1 && !r2) /* got none, end of list */
1290 		return 0;
1291 	else if(!r1) /* got one, return that */
1292 		*c = c2;
1293 	else if(!r2)
1294 		*c = c1;
1295 	else if(c1 < c2) /* got both take smallest */
1296 		*c = c1;
1297 	else	*c = c2;
1298 	return 1;
1299 }
1300 
1301 void
1302 iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns, uint8_t* z)
1303 {
1304 	/* Only the DS record for the delegation itself is expected.
1305 	 * We allow DS for everything between the bailiwick and the
1306 	 * zonecut, thus DS records must be at or above the zonecut.
1307 	 * And the DS records must be below the server authority zone.
1308 	 * The answer section is already scrubbed. */
1309 	size_t i = msg->rep->an_numrrsets;
1310 	while(i < (msg->rep->an_numrrsets + msg->rep->ns_numrrsets)) {
1311 		struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1312 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS &&
1313 			(!ns || !dname_subdomain_c(ns->rk.dname, s->rk.dname)
1314 			|| query_dname_compare(z, s->rk.dname) == 0)) {
1315 			log_nametypeclass(VERB_ALGO, "removing irrelevant DS",
1316 				s->rk.dname, ntohs(s->rk.type),
1317 				ntohs(s->rk.rrset_class));
1318 			memmove(msg->rep->rrsets+i, msg->rep->rrsets+i+1,
1319 				sizeof(struct ub_packed_rrset_key*) *
1320 				(msg->rep->rrset_count-i-1));
1321 			msg->rep->ns_numrrsets--;
1322 			msg->rep->rrset_count--;
1323 			/* stay at same i, but new record */
1324 			continue;
1325 		}
1326 		i++;
1327 	}
1328 }
1329 
1330 void
1331 iter_scrub_nxdomain(struct dns_msg* msg)
1332 {
1333 	if(msg->rep->an_numrrsets == 0)
1334 		return;
1335 
1336 	memmove(msg->rep->rrsets, msg->rep->rrsets+msg->rep->an_numrrsets,
1337 		sizeof(struct ub_packed_rrset_key*) *
1338 		(msg->rep->rrset_count-msg->rep->an_numrrsets));
1339 	msg->rep->rrset_count -= msg->rep->an_numrrsets;
1340 	msg->rep->an_numrrsets = 0;
1341 }
1342 
1343 void iter_dec_attempts(struct delegpt* dp, int d, int outbound_msg_retry)
1344 {
1345 	struct delegpt_addr* a;
1346 	for(a=dp->target_list; a; a = a->next_target) {
1347 		if(a->attempts >= outbound_msg_retry) {
1348 			/* add back to result list */
1349 			delegpt_add_to_result_list(dp, a);
1350 		}
1351 		if(a->attempts > d)
1352 			a->attempts -= d;
1353 		else a->attempts = 0;
1354 	}
1355 }
1356 
1357 void iter_merge_retry_counts(struct delegpt* dp, struct delegpt* old,
1358 	int outbound_msg_retry)
1359 {
1360 	struct delegpt_addr* a, *o, *prev;
1361 	for(a=dp->target_list; a; a = a->next_target) {
1362 		o = delegpt_find_addr(old, &a->addr, a->addrlen);
1363 		if(o) {
1364 			log_addr(VERB_ALGO, "copy attempt count previous dp",
1365 				&a->addr, a->addrlen);
1366 			a->attempts = o->attempts;
1367 		}
1368 	}
1369 	prev = NULL;
1370 	a = dp->usable_list;
1371 	while(a) {
1372 		if(a->attempts >= outbound_msg_retry) {
1373 			log_addr(VERB_ALGO, "remove from usable list dp",
1374 				&a->addr, a->addrlen);
1375 			/* remove from result list */
1376 			if(prev)
1377 				prev->next_usable = a->next_usable;
1378 			else	dp->usable_list = a->next_usable;
1379 			/* prev stays the same */
1380 			a = a->next_usable;
1381 			continue;
1382 		}
1383 		prev = a;
1384 		a = a->next_usable;
1385 	}
1386 }
1387 
1388 int
1389 iter_ds_toolow(struct dns_msg* msg, struct delegpt* dp)
1390 {
1391 	/* if for query example.com, there is example.com SOA or a subdomain
1392 	 * of example.com, then we are too low and need to fetch NS. */
1393 	size_t i;
1394 	/* if we have a DNAME or CNAME we are probably wrong */
1395 	/* if we have a qtype DS in the answer section, its fine */
1396 	for(i=0; i < msg->rep->an_numrrsets; i++) {
1397 		struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1398 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_DNAME ||
1399 			ntohs(s->rk.type) == LDNS_RR_TYPE_CNAME) {
1400 			/* not the right answer, maybe too low, check the
1401 			 * RRSIG signer name (if there is any) for a hint
1402 			 * that it is from the dp zone anyway */
1403 			uint8_t* sname;
1404 			size_t slen;
1405 			val_find_rrset_signer(s, &sname, &slen);
1406 			if(sname && query_dname_compare(dp->name, sname)==0)
1407 				return 0; /* it is fine, from the right dp */
1408 			return 1;
1409 		}
1410 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_DS)
1411 			return 0; /* fine, we have a DS record */
1412 	}
1413 	for(i=msg->rep->an_numrrsets;
1414 		i < msg->rep->an_numrrsets + msg->rep->ns_numrrsets; i++) {
1415 		struct ub_packed_rrset_key* s = msg->rep->rrsets[i];
1416 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_SOA) {
1417 			if(dname_subdomain_c(s->rk.dname, msg->qinfo.qname))
1418 				return 1; /* point is too low */
1419 			if(query_dname_compare(s->rk.dname, dp->name)==0)
1420 				return 0; /* right dp */
1421 		}
1422 		if(ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC ||
1423 			ntohs(s->rk.type) == LDNS_RR_TYPE_NSEC3) {
1424 			uint8_t* sname;
1425 			size_t slen;
1426 			val_find_rrset_signer(s, &sname, &slen);
1427 			if(sname && query_dname_compare(dp->name, sname)==0)
1428 				return 0; /* it is fine, from the right dp */
1429 			return 1;
1430 		}
1431 	}
1432 	/* we do not know */
1433 	return 1;
1434 }
1435 
1436 int iter_dp_cangodown(struct query_info* qinfo, struct delegpt* dp)
1437 {
1438 	/* no delegation point, do not see how we can go down,
1439 	 * robust check, it should really exist */
1440 	if(!dp) return 0;
1441 
1442 	/* see if dp equals the qname, then we cannot go down further */
1443 	if(query_dname_compare(qinfo->qname, dp->name) == 0)
1444 		return 0;
1445 	/* if dp is one label above the name we also cannot go down further */
1446 	if(dname_count_labels(qinfo->qname) == dp->namelabs+1)
1447 		return 0;
1448 	return 1;
1449 }
1450 
1451 int
1452 iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
1453 	uint8_t** retdpname, size_t* retdpnamelen)
1454 {
1455 	struct iter_hints_stub *stub;
1456 	struct delegpt *dp;
1457 
1458 	/* Check for stub. */
1459 	stub = hints_lookup_stub(qstate->env->hints, qinf->qname,
1460 	    qinf->qclass, NULL);
1461 	dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass);
1462 
1463 	/* see if forward or stub is more pertinent */
1464 	if(stub && stub->dp && dp) {
1465 		if(dname_strict_subdomain(dp->name, dp->namelabs,
1466 			stub->dp->name, stub->dp->namelabs)) {
1467 			stub = NULL; /* ignore stub, forward is lower */
1468 		} else {
1469 			dp = NULL; /* ignore forward, stub is lower */
1470 		}
1471 	}
1472 
1473 	/* check stub */
1474 	if (stub != NULL && stub->dp != NULL) {
1475 		if(stub->dp->no_cache) {
1476 			char qname[255+1];
1477 			char dpname[255+1];
1478 			dname_str(qinf->qname, qname);
1479 			dname_str(stub->dp->name, dpname);
1480 			verbose(VERB_ALGO, "stub for %s %s has no_cache", qname, dpname);
1481 		}
1482 		if(retdpname) {
1483 			*retdpname = stub->dp->name;
1484 			*retdpnamelen = stub->dp->namelen;
1485 		}
1486 		return (stub->dp->no_cache);
1487 	}
1488 
1489 	/* Check for forward. */
1490 	if (dp) {
1491 		if(dp->no_cache) {
1492 			char qname[255+1];
1493 			char dpname[255+1];
1494 			dname_str(qinf->qname, qname);
1495 			dname_str(dp->name, dpname);
1496 			verbose(VERB_ALGO, "forward for %s %s has no_cache", qname, dpname);
1497 		}
1498 		if(retdpname) {
1499 			*retdpname = dp->name;
1500 			*retdpnamelen = dp->namelen;
1501 		}
1502 		return (dp->no_cache);
1503 	}
1504 	if(retdpname) {
1505 		*retdpname = NULL;
1506 		*retdpnamelen = 0;
1507 	}
1508 	return 0;
1509 }
1510 
1511 void iterator_set_ip46_support(struct module_stack* mods,
1512 	struct module_env* env, struct outside_network* outnet)
1513 {
1514 	int m = modstack_find(mods, "iterator");
1515 	struct iter_env* ie = NULL;
1516 	if(m == -1)
1517 		return;
1518 	ie = (struct iter_env*)env->modinfo[m];
1519 	if(outnet->pending == NULL)
1520 		return; /* we are in testbound, no rbtree for UDP */
1521 	if(outnet->num_ip4 == 0)
1522 		ie->supports_ipv4 = 0;
1523 	if(outnet->num_ip6 == 0)
1524 		ie->supports_ipv6 = 0;
1525 }
1526