xref: /freebsd/contrib/unbound/edns-subnet/subnetmod.c (revision 4928135658a9d0eaee37003df6137ab363fcb0b4)
1 /*
2  * edns-subnet/subnetmod.c - edns subnet module. Must be called before validator
3  * and iterator.
4  *
5  * Copyright (c) 2013, NLnet Labs. All rights reserved.
6  *
7  * This software is open source.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * Redistributions of source code must retain the above copyright notice,
14  * this list of conditions and the following disclaimer.
15  *
16  * Redistributions in binary form must reproduce the above copyright notice,
17  * this list of conditions and the following disclaimer in the documentation
18  * and/or other materials provided with the distribution.
19  *
20  * Neither the name of the NLNET LABS nor the names of its contributors may
21  * be used to endorse or promote products derived from this software without
22  * specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
30  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36  /**
37  * \file
38  * subnet module for unbound.
39  */
40 
41 #include "config.h"
42 
43 #ifdef CLIENT_SUBNET /* keeps splint happy */
44 
45 #include "edns-subnet/subnetmod.h"
46 #include "edns-subnet/edns-subnet.h"
47 #include "edns-subnet/addrtree.h"
48 #include "edns-subnet/subnet-whitelist.h"
49 
50 #include "services/mesh.h"
51 #include "services/cache/dns.h"
52 #include "util/module.h"
53 #include "util/regional.h"
54 #include "util/storage/slabhash.h"
55 #include "util/config_file.h"
56 #include "util/data/msgreply.h"
57 #include "sldns/sbuffer.h"
58 
59 #define ECS_MAX_TREESIZE 100
60 
61 /** externally called */
62 void
63 subnet_data_delete(void *d, void *ATTR_UNUSED(arg))
64 {
65 	struct subnet_msg_cache_data *r;
66 	r = (struct subnet_msg_cache_data*)d;
67 	addrtree_delete(r->tree4);
68 	addrtree_delete(r->tree6);
69 	free(r);
70 }
71 
72 /** externally called */
73 size_t
74 msg_cache_sizefunc(void *k, void *d)
75 {
76 	struct msgreply_entry *q = (struct msgreply_entry*)k;
77 	struct subnet_msg_cache_data *r = (struct subnet_msg_cache_data*)d;
78 	size_t s = sizeof(struct msgreply_entry)
79 		+ sizeof(struct subnet_msg_cache_data)
80 		+ q->key.qname_len + lock_get_mem(&q->entry.lock);
81 	s += addrtree_size(r->tree4);
82 	s += addrtree_size(r->tree6);
83 	return s;
84 }
85 
86 /** new query for ecs module */
87 static int
88 subnet_new_qstate(struct module_qstate *qstate, int id)
89 {
90 	struct subnet_qstate *sq = (struct subnet_qstate*)regional_alloc(
91 		qstate->region, sizeof(struct subnet_qstate));
92 	if(!sq)
93 		return 0;
94 	qstate->minfo[id] = sq;
95 	memset(sq, 0, sizeof(*sq));
96 	return 1;
97 }
98 
99 /** Add ecs struct to edns list, after parsing it to wire format. */
100 static void
101 ecs_opt_list_append(struct ecs_data* ecs, struct edns_option** list,
102 	struct module_qstate *qstate)
103 {
104 	size_t sn_octs, sn_octs_remainder;
105 	sldns_buffer* buf = qstate->env->scratch_buffer;
106 
107 	if(ecs->subnet_validdata) {
108 		log_assert(ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4 ||
109 			ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP6);
110 		log_assert(ecs->subnet_addr_fam != EDNSSUBNET_ADDRFAM_IP4 ||
111 			ecs->subnet_source_mask <=  INET_SIZE*8);
112 		log_assert(ecs->subnet_addr_fam != EDNSSUBNET_ADDRFAM_IP6 ||
113 			ecs->subnet_source_mask <= INET6_SIZE*8);
114 
115 		sn_octs = ecs->subnet_source_mask / 8;
116 		sn_octs_remainder =
117 			(size_t)((ecs->subnet_source_mask % 8)>0?1:0);
118 
119 		log_assert(sn_octs + sn_octs_remainder <= INET6_SIZE);
120 
121 		sldns_buffer_clear(buf);
122 		sldns_buffer_write_u16(buf, ecs->subnet_addr_fam);
123 		sldns_buffer_write_u8(buf, ecs->subnet_source_mask);
124 		sldns_buffer_write_u8(buf, ecs->subnet_scope_mask);
125 		sldns_buffer_write(buf, ecs->subnet_addr, sn_octs);
126 		if(sn_octs_remainder)
127 			sldns_buffer_write_u8(buf, ecs->subnet_addr[sn_octs] &
128 				~(0xFF >> (ecs->subnet_source_mask % 8)));
129 		sldns_buffer_flip(buf);
130 
131 		edns_opt_list_append(list,
132 				qstate->env->cfg->client_subnet_opcode,
133 				sn_octs + sn_octs_remainder + 4,
134 				sldns_buffer_begin(buf), qstate->region);
135 	}
136 }
137 
138 int ecs_whitelist_check(struct query_info* qinfo,
139 	uint16_t ATTR_UNUSED(flags), struct module_qstate* qstate,
140 	struct sockaddr_storage* addr, socklen_t addrlen,
141 	uint8_t* ATTR_UNUSED(zone), size_t ATTR_UNUSED(zonelen),
142 	struct regional* ATTR_UNUSED(region), int id, void* ATTR_UNUSED(cbargs))
143 {
144 	struct subnet_qstate *sq;
145 	struct subnet_env *sn_env;
146 
147 	if(!(sq=(struct subnet_qstate*)qstate->minfo[id]))
148 		return 1;
149 	sn_env = (struct subnet_env*)qstate->env->modinfo[id];
150 
151 	/* Cache by default, might be disabled after parsing EDNS option
152 	 * received from nameserver. */
153 	qstate->no_cache_store = 0;
154 
155 	if(sq->ecs_server_out.subnet_validdata && ((sq->subnet_downstream &&
156 		qstate->env->cfg->client_subnet_always_forward) ||
157 		ecs_is_whitelisted(sn_env->whitelist,
158 		addr, addrlen, qinfo->qname, qinfo->qname_len,
159 		qinfo->qclass))) {
160 		/* Address on whitelist or client query contains ECS option, we
161 		 * want to sent out ECS. Only add option if it is not already
162 		 * set. */
163 		if(!(sq->subnet_sent)) {
164 			ecs_opt_list_append(&sq->ecs_server_out,
165 				&qstate->edns_opts_back_out, qstate);
166 			sq->subnet_sent = 1;
167 		}
168 	}
169 	else if(sq->subnet_sent) {
170 		/* Outgoing ECS option is set, but we don't want to sent it to
171 		 * this address, remove option. */
172 		edns_opt_list_remove(&qstate->edns_opts_back_out,
173 			qstate->env->cfg->client_subnet_opcode);
174 		sq->subnet_sent = 0;
175 	}
176 	return 1;
177 }
178 
179 
180 int
181 subnetmod_init(struct module_env *env, int id)
182 {
183 	struct subnet_env *sn_env = (struct subnet_env*)calloc(1,
184 		sizeof(struct subnet_env));
185 	if(!sn_env) {
186 		log_err("malloc failure");
187 		return 0;
188 	}
189 	alloc_init(&sn_env->alloc, NULL, 0);
190 	env->modinfo[id] = (void*)sn_env;
191 	/* Copy msg_cache settings */
192 	sn_env->subnet_msg_cache = slabhash_create(env->cfg->msg_cache_slabs,
193 		HASH_DEFAULT_STARTARRAY, env->cfg->msg_cache_size,
194 		msg_cache_sizefunc, query_info_compare, query_entry_delete,
195 		subnet_data_delete, NULL);
196 	if(!sn_env->subnet_msg_cache) {
197 		log_err("subnet: could not create cache");
198 		free(sn_env);
199 		env->modinfo[id] = NULL;
200 		return 0;
201 	}
202 	/* whitelist for edns subnet capable servers */
203 	sn_env->whitelist = ecs_whitelist_create();
204 	if(!sn_env->whitelist ||
205 		!ecs_whitelist_apply_cfg(sn_env->whitelist, env->cfg)) {
206 		log_err("subnet: could not create ECS whitelist");
207 		slabhash_delete(sn_env->subnet_msg_cache);
208 		free(sn_env);
209 		env->modinfo[id] = NULL;
210 		return 0;
211 	}
212 
213 	verbose(VERB_QUERY, "subnet: option registered (%d)",
214 		env->cfg->client_subnet_opcode);
215 	/* Create new mesh state for all queries. */
216 	env->unique_mesh = 1;
217 	if(!edns_register_option(env->cfg->client_subnet_opcode,
218 		env->cfg->client_subnet_always_forward /* bypass cache */,
219 		0 /* no aggregation */, env)) {
220 		log_err("subnet: could not register opcode");
221 		ecs_whitelist_delete(sn_env->whitelist);
222 		slabhash_delete(sn_env->subnet_msg_cache);
223 		free(sn_env);
224 		env->modinfo[id] = NULL;
225 		return 0;
226 	}
227 	inplace_cb_register((void*)ecs_whitelist_check, inplace_cb_query, NULL,
228 		env, id);
229 	inplace_cb_register((void*)ecs_edns_back_parsed,
230 		inplace_cb_edns_back_parsed, NULL, env, id);
231 	inplace_cb_register((void*)ecs_query_response,
232 		inplace_cb_query_response, NULL, env, id);
233 	lock_rw_init(&sn_env->biglock);
234 	return 1;
235 }
236 
237 void
238 subnetmod_deinit(struct module_env *env, int id)
239 {
240 	struct subnet_env *sn_env;
241 	if(!env || !env->modinfo[id])
242 		return;
243 	sn_env = (struct subnet_env*)env->modinfo[id];
244 	lock_rw_destroy(&sn_env->biglock);
245 	inplace_cb_delete(env, inplace_cb_edns_back_parsed, id);
246 	inplace_cb_delete(env, inplace_cb_query, id);
247 	inplace_cb_delete(env, inplace_cb_query_response, id);
248 	ecs_whitelist_delete(sn_env->whitelist);
249 	slabhash_delete(sn_env->subnet_msg_cache);
250 	alloc_clear(&sn_env->alloc);
251 	free(sn_env);
252 	env->modinfo[id] = NULL;
253 }
254 
255 /** Tells client that upstream has no/improper support */
256 static void
257 cp_edns_bad_response(struct ecs_data *target, struct ecs_data *source)
258 {
259 	target->subnet_scope_mask  = 0;
260 	target->subnet_source_mask = source->subnet_source_mask;
261 	target->subnet_addr_fam    = source->subnet_addr_fam;
262 	memcpy(target->subnet_addr, source->subnet_addr, INET6_SIZE);
263 	target->subnet_validdata = 1;
264 }
265 
266 static void
267 delfunc(void *envptr, void *elemptr) {
268 	struct reply_info *elem = (struct reply_info *)elemptr;
269 	struct subnet_env *env = (struct subnet_env *)envptr;
270 	reply_info_parsedelete(elem, &env->alloc);
271 }
272 
273 static size_t
274 sizefunc(void *elemptr) {
275 	struct reply_info *elem  = (struct reply_info *)elemptr;
276 	return sizeof (struct reply_info) - sizeof (struct rrset_ref)
277 		+ elem->rrset_count * sizeof (struct rrset_ref)
278 		+ elem->rrset_count * sizeof (struct ub_packed_rrset_key *);
279 }
280 
281 /**
282  * Select tree from cache entry based on edns data.
283  * If for address family not present it will create a new one.
284  * NULL on failure to create. */
285 static struct addrtree*
286 get_tree(struct subnet_msg_cache_data *data, struct ecs_data *edns,
287 	struct subnet_env *env, struct config_file* cfg)
288 {
289 	struct addrtree *tree;
290 	if (edns->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4) {
291 		if (!data->tree4)
292 			data->tree4 = addrtree_create(
293 				cfg->max_client_subnet_ipv4, &delfunc,
294 				&sizefunc, env, ECS_MAX_TREESIZE);
295 		tree = data->tree4;
296 	} else {
297 		if (!data->tree6)
298 			data->tree6 = addrtree_create(
299 				cfg->max_client_subnet_ipv6, &delfunc,
300 				&sizefunc, env, ECS_MAX_TREESIZE);
301 		tree = data->tree6;
302 	}
303 	return tree;
304 }
305 
306 static void
307 update_cache(struct module_qstate *qstate, int id)
308 {
309 	struct msgreply_entry *mrep_entry;
310 	struct addrtree *tree;
311 	struct reply_info *rep;
312 	struct query_info qinf;
313 	struct subnet_env *sne = qstate->env->modinfo[id];
314 	struct subnet_qstate *sq = (struct subnet_qstate*)qstate->minfo[id];
315 	struct slabhash *subnet_msg_cache = sne->subnet_msg_cache;
316 	struct ecs_data *edns = &sq->ecs_client_in;
317 	size_t i;
318 
319 	/* We already calculated hash upon lookup */
320 	hashvalue_type h = qstate->minfo[id] ?
321 		((struct subnet_qstate*)qstate->minfo[id])->qinfo_hash :
322 		query_info_hash(&qstate->qinfo, qstate->query_flags);
323 	/* Step 1, general qinfo lookup */
324 	struct lruhash_entry *lru_entry = slabhash_lookup(subnet_msg_cache, h,
325 		&qstate->qinfo, 1);
326 	int acquired_lock = (lru_entry != NULL);
327 	if (!lru_entry) {
328 		qinf = qstate->qinfo;
329 		qinf.qname = memdup(qstate->qinfo.qname,
330 			qstate->qinfo.qname_len);
331 		if(!qinf.qname) {
332 			log_err("memdup failed");
333 			return;
334 		}
335 		mrep_entry = query_info_entrysetup(&qinf, NULL, h);
336 		free(qinf.qname); /* if qname 'consumed', it is set to NULL */
337 		if (!mrep_entry) {
338 			log_err("query_info_entrysetup failed");
339 			return;
340 		}
341 		lru_entry = &mrep_entry->entry;
342 		lock_rw_wrlock(&lru_entry->lock);
343 		lru_entry->data = calloc(1,
344 			sizeof(struct subnet_msg_cache_data));
345 		if (!lru_entry->data) {
346 			log_err("malloc failed");
347 			return;
348 		}
349 	}
350 	/* Step 2, find the correct tree */
351 	if (!(tree = get_tree(lru_entry->data, edns, sne, qstate->env->cfg))) {
352 		if (acquired_lock) lock_rw_unlock(&lru_entry->lock);
353 		log_err("Subnet cache insertion failed");
354 		return;
355 	}
356 	lock_quick_lock(&sne->alloc.lock);
357 	rep = reply_info_copy(qstate->return_msg->rep, &sne->alloc, NULL);
358 	lock_quick_unlock(&sne->alloc.lock);
359 	if (!rep) {
360 		if (acquired_lock) lock_rw_unlock(&lru_entry->lock);
361 		log_err("Subnet cache insertion failed");
362 		return;
363 	}
364 
365 	/* store RRsets */
366 	for(i=0; i<rep->rrset_count; i++) {
367 		rep->ref[i].key = rep->rrsets[i];
368 		rep->ref[i].id = rep->rrsets[i]->id;
369 	}
370 	reply_info_set_ttls(rep, *qstate->env->now);
371 	rep->flags |= (BIT_RA | BIT_QR); /* fix flags to be sensible for */
372 	rep->flags &= ~(BIT_AA | BIT_CD);/* a reply based on the cache   */
373 	addrtree_insert(tree, (addrkey_t*)edns->subnet_addr,
374 		edns->subnet_source_mask,
375 		sq->ecs_server_in.subnet_scope_mask, rep,
376 		rep->ttl, *qstate->env->now);
377 	if (acquired_lock) {
378 		lock_rw_unlock(&lru_entry->lock);
379 	} else {
380 		lock_rw_unlock(&lru_entry->lock);
381 		slabhash_insert(subnet_msg_cache, h, lru_entry, lru_entry->data,
382 			NULL);
383 	}
384 }
385 
386 /** Lookup in cache and reply true iff reply is sent. */
387 static int
388 lookup_and_reply(struct module_qstate *qstate, int id, struct subnet_qstate *sq)
389 {
390 	struct lruhash_entry *e;
391 	struct module_env *env = qstate->env;
392 	struct subnet_env *sne = (struct subnet_env*)env->modinfo[id];
393 	hashvalue_type h = query_info_hash(&qstate->qinfo, qstate->query_flags);
394 	struct subnet_msg_cache_data *data;
395 	struct ecs_data *ecs = &sq->ecs_client_in;
396 	struct addrtree *tree;
397 	struct addrnode *node;
398 	uint8_t scope;
399 
400 	memset(&sq->ecs_client_out, 0, sizeof(sq->ecs_client_out));
401 
402 	if (sq) sq->qinfo_hash = h; /* Might be useful on cache miss */
403 	e = slabhash_lookup(sne->subnet_msg_cache, h, &qstate->qinfo, 1);
404 	if (!e) return 0; /* qinfo not in cache */
405 	data = e->data;
406 	tree = (ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4)?
407 		data->tree4 : data->tree6;
408 	if (!tree) { /* qinfo in cache but not for this family */
409 		lock_rw_unlock(&e->lock);
410 		return 0;
411 	}
412 	node = addrtree_find(tree, (addrkey_t*)ecs->subnet_addr,
413 		ecs->subnet_source_mask, *env->now);
414 	if (!node) { /* plain old cache miss */
415 		lock_rw_unlock(&e->lock);
416 		return 0;
417 	}
418 
419 	qstate->return_msg = tomsg(NULL, &qstate->qinfo,
420 		(struct reply_info *)node->elem, qstate->region, *env->now,
421 		env->scratch);
422 	scope = (uint8_t)node->scope;
423 	lock_rw_unlock(&e->lock);
424 
425 	if (!qstate->return_msg) { /* Failed allocation or expired TTL */
426 		return 0;
427 	}
428 
429 	if (sq->subnet_downstream) { /* relay to interested client */
430 		sq->ecs_client_out.subnet_scope_mask = scope;
431 		sq->ecs_client_out.subnet_addr_fam = ecs->subnet_addr_fam;
432 		sq->ecs_client_out.subnet_source_mask = ecs->subnet_source_mask;
433 		memcpy(&sq->ecs_client_out.subnet_addr, &ecs->subnet_addr,
434 			INET6_SIZE);
435 		sq->ecs_client_out.subnet_validdata = 1;
436 	}
437 	return 1;
438 }
439 
440 /**
441  * Test first bits of addresses for equality. Caller is responsible
442  * for making sure that both a and b are at least net/8 octets long.
443  * @param a: first address.
444  * @param a: seconds address.
445  * @param net: Number of bits to test.
446  * @return: 1 if equal, 0 otherwise.
447  */
448 static int
449 common_prefix(uint8_t *a, uint8_t *b, uint8_t net)
450 {
451 	size_t n = (size_t)net / 8;
452 	return !memcmp(a, b, n) && ((net % 8) == 0 || a[n] == b[n]);
453 }
454 
455 static enum module_ext_state
456 eval_response(struct module_qstate *qstate, int id, struct subnet_qstate *sq)
457 {
458 	struct subnet_env *sne = qstate->env->modinfo[id];
459 
460 	struct ecs_data *c_in  = &sq->ecs_client_in; /* rcvd from client */
461 	struct ecs_data *c_out = &sq->ecs_client_out;/* will send to client */
462 	struct ecs_data *s_in  = &sq->ecs_server_in; /* rcvd from auth */
463 	struct ecs_data *s_out = &sq->ecs_server_out;/* sent to auth */
464 
465 	memset(c_out, 0, sizeof(*c_out));
466 
467 	if (!qstate->return_msg) return module_error;
468 
469 	/* We have not asked for subnet data */
470 	if (!sq->subnet_sent) {
471 		if (s_in->subnet_validdata)
472 			verbose(VERB_QUERY, "subnet: received spurious data");
473 		if (sq->subnet_downstream) /* Copy back to client */
474 			cp_edns_bad_response(c_out, c_in);
475 		return module_finished;
476 	}
477 
478 	/* subnet sent but nothing came back */
479 	if (!s_in->subnet_validdata) {
480 		/* The authority indicated no support for edns subnet. As a
481 		 * consequence the answer ended up in the regular cache. It
482 		 * is still usefull to put it in the edns subnet cache for
483 		 * when a client explicitly asks for subnet specific answer. */
484 		verbose(VERB_QUERY, "subnet: Authority indicates no support");
485 		lock_rw_wrlock(&sne->biglock);
486 		update_cache(qstate, id);
487 		lock_rw_unlock(&sne->biglock);
488 		if (sq->subnet_downstream)
489 			cp_edns_bad_response(c_out, c_in);
490 		return module_finished;
491 	}
492 
493 	/* Being here means we have asked for and got a subnet specific
494 	 * answer. Also, the answer from the authority is not yet cached
495 	 * anywhere. */
496 
497 	/* can we accept response? */
498 	if(s_out->subnet_addr_fam != s_in->subnet_addr_fam ||
499 		s_out->subnet_source_mask != s_in->subnet_source_mask ||
500 		!common_prefix(s_out->subnet_addr, s_in->subnet_addr,
501 			s_out->subnet_source_mask))
502 	{
503 		/* we can not accept, restart query without option */
504 		verbose(VERB_QUERY, "subnet: forged data");
505 		s_out->subnet_validdata = 0;
506 		(void)edns_opt_list_remove(&qstate->edns_opts_back_out,
507 			qstate->env->cfg->client_subnet_opcode);
508 		sq->subnet_sent = 0;
509 		return module_restart_next;
510 	}
511 
512 	lock_rw_wrlock(&sne->biglock);
513 	update_cache(qstate, id);
514 	lock_rw_unlock(&sne->biglock);
515 
516 	if (sq->subnet_downstream) {
517 		/* Client wants to see the answer, echo option back
518 		 * and adjust the scope. */
519 		c_out->subnet_addr_fam = c_in->subnet_addr_fam;
520 		c_out->subnet_source_mask = c_in->subnet_source_mask;
521 		memcpy(&c_out->subnet_addr, &c_in->subnet_addr, INET6_SIZE);
522 		c_out->subnet_scope_mask = s_in->subnet_scope_mask;
523 		c_out->subnet_validdata = 1;
524 	}
525 	return module_finished;
526 }
527 
528 /** Parse EDNS opt data containing ECS */
529 static int
530 parse_subnet_option(struct edns_option* ecs_option, struct ecs_data* ecs)
531 {
532 	memset(ecs, 0, sizeof(*ecs));
533 	if (ecs_option->opt_len < 4)
534 		return 0;
535 
536 	ecs->subnet_addr_fam = sldns_read_uint16(ecs_option->opt_data);
537 	ecs->subnet_source_mask = ecs_option->opt_data[2];
538 	ecs->subnet_scope_mask = ecs_option->opt_data[3];
539 	/* remaining bytes indicate address */
540 
541 	/* validate input*/
542 	/* option length matches calculated length? */
543 	if (ecs_option->opt_len != (size_t)((ecs->subnet_source_mask+7)/8 + 4))
544 		return 0;
545 	if (ecs_option->opt_len - 4 > INET6_SIZE || ecs_option->opt_len == 0)
546 		return 0;
547 	if (ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4) {
548 		if (ecs->subnet_source_mask > 32 || ecs->subnet_scope_mask > 32)
549 			return 0;
550 	} else if (ecs->subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP6) {
551 		if (ecs->subnet_source_mask > 128 ||
552 			ecs->subnet_scope_mask > 128)
553 			return 0;
554 	} else
555 		return 0;
556 
557 	/* valid ECS data, write to ecs_data */
558 	if (copy_clear(ecs->subnet_addr, INET6_SIZE, ecs_option->opt_data + 4,
559 		ecs_option->opt_len - 4, ecs->subnet_source_mask))
560 		return 0;
561 	ecs->subnet_validdata = 1;
562 	return 1;
563 }
564 
565 static void
566 subnet_option_from_ss(struct sockaddr_storage *ss, struct ecs_data* ecs,
567 	struct config_file* cfg)
568 {
569 	void* sinaddr;
570 
571 	/* Construct subnet option from original query */
572 	if(((struct sockaddr_in*)ss)->sin_family == AF_INET) {
573 		ecs->subnet_source_mask = cfg->max_client_subnet_ipv4;
574 		ecs->subnet_addr_fam = EDNSSUBNET_ADDRFAM_IP4;
575 		sinaddr = &((struct sockaddr_in*)ss)->sin_addr;
576 		if (!copy_clear( ecs->subnet_addr, INET6_SIZE,
577 			(uint8_t *)sinaddr, INET_SIZE,
578 			ecs->subnet_source_mask)) {
579 			ecs->subnet_validdata = 1;
580 		}
581 	}
582 #ifdef INET6
583 	else {
584 		ecs->subnet_source_mask = cfg->max_client_subnet_ipv6;
585 		ecs->subnet_addr_fam = EDNSSUBNET_ADDRFAM_IP6;
586 		sinaddr = &((struct sockaddr_in6*)ss)->sin6_addr;
587 		if (!copy_clear( ecs->subnet_addr, INET6_SIZE,
588 			(uint8_t *)sinaddr, INET6_SIZE,
589 			ecs->subnet_source_mask)) {
590 			ecs->subnet_validdata = 1;
591 		}
592 	}
593 #else
594 			/* We don't know how to handle ip6, just pass */
595 #endif /* INET6 */
596 }
597 
598 int
599 ecs_query_response(struct module_qstate* qstate, struct dns_msg* response,
600 	int id, void* ATTR_UNUSED(cbargs))
601 {
602 	struct subnet_qstate *sq;
603 
604 	if(!response || !(sq=(struct subnet_qstate*)qstate->minfo[id]))
605 		return 1;
606 
607 	if(sq->subnet_sent &&
608 		FLAGS_GET_RCODE(response->rep->flags) == LDNS_RCODE_REFUSED) {
609 		/* REFUSED response to ECS query, remove ECS option. */
610 		edns_opt_list_remove(&qstate->edns_opts_back_out,
611 			qstate->env->cfg->client_subnet_opcode);
612 		sq->subnet_sent = 0;
613 		memset(&sq->ecs_server_out, 0, sizeof(sq->ecs_server_out));
614 	}
615 	return 1;
616 }
617 
618 int
619 ecs_edns_back_parsed(struct module_qstate* qstate, int id,
620 	void* ATTR_UNUSED(cbargs))
621 {
622 	struct subnet_qstate *sq;
623 	struct edns_option* ecs_opt;
624 
625 	if(!(sq=(struct subnet_qstate*)qstate->minfo[id]))
626 		return 1;
627 	if((ecs_opt = edns_opt_list_find(
628 		qstate->edns_opts_back_in,
629 		qstate->env->cfg->client_subnet_opcode))) {
630 		if(parse_subnet_option(ecs_opt, &sq->ecs_server_in) &&
631 			sq->subnet_sent &&
632 			sq->ecs_server_in.subnet_validdata)
633 			/* Only skip global cache store if we sent an ECS option
634 			 * and received one back. Answers from non-whitelisted
635 			 * servers will end up in global cache. Answers for
636 			 * queries with 0 source will not (unless nameserver
637 			 * does not support ECS). */
638 			qstate->no_cache_store = 1;
639 	}
640 
641 	return 1;
642 }
643 
644 void
645 subnetmod_operate(struct module_qstate *qstate, enum module_ev event,
646 	int id, struct outbound_entry* outbound)
647 {
648 	struct subnet_env *sne = qstate->env->modinfo[id];
649 	struct subnet_qstate *sq = (struct subnet_qstate*)qstate->minfo[id];
650 
651 	verbose(VERB_QUERY, "subnet[module %d] operate: extstate:%s "
652 		"event:%s", id, strextstate(qstate->ext_state[id]),
653 		strmodulevent(event));
654 	log_query_info(VERB_QUERY, "subnet operate: query", &qstate->qinfo);
655 
656 	if((event == module_event_new || event == module_event_pass) &&
657 		sq == NULL) {
658 		struct edns_option* ecs_opt;
659 		if(!subnet_new_qstate(qstate, id)) {
660 			qstate->return_msg = NULL;
661 			qstate->ext_state[id] = module_finished;
662 			return;
663 		}
664 
665 		sq = (struct subnet_qstate*)qstate->minfo[id];
666 
667 		if((ecs_opt = edns_opt_list_find(
668 			qstate->edns_opts_front_in,
669 			qstate->env->cfg->client_subnet_opcode))) {
670 			if(!parse_subnet_option(ecs_opt, &sq->ecs_client_in)) {
671 				/* Wrongly formatted ECS option. RFC mandates to
672 				 * return FORMERROR. */
673 				qstate->return_rcode = LDNS_RCODE_FORMERR;
674 				qstate->ext_state[id] = module_finished;
675 				return;
676 			}
677 			sq->subnet_downstream = 1;
678 		}
679 		else if(qstate->mesh_info->reply_list) {
680 			subnet_option_from_ss(
681 				&qstate->mesh_info->reply_list->query_reply.addr,
682 				&sq->ecs_client_in, qstate->env->cfg);
683 		}
684 
685 		if(sq->ecs_client_in.subnet_validdata == 0) {
686 			/* No clients are interested in result or we could not
687 			 * parse it, we don't do client subnet */
688 			sq->ecs_server_out.subnet_validdata = 0;
689 			verbose(VERB_ALGO, "subnet: pass to next module");
690 			qstate->ext_state[id] = module_wait_module;
691 			return;
692 		}
693 
694 		lock_rw_wrlock(&sne->biglock);
695 		if (lookup_and_reply(qstate, id, sq)) {
696 			lock_rw_unlock(&sne->biglock);
697 			verbose(VERB_QUERY, "subnet: answered from cache");
698 			qstate->ext_state[id] = module_finished;
699 
700 			ecs_opt_list_append(&sq->ecs_client_out,
701 				&qstate->edns_opts_front_out, qstate);
702 			return;
703 		}
704 		lock_rw_unlock(&sne->biglock);
705 
706 		sq->ecs_server_out.subnet_addr_fam =
707 			sq->ecs_client_in.subnet_addr_fam;
708 		sq->ecs_server_out.subnet_source_mask =
709 			sq->ecs_client_in.subnet_source_mask;
710 		/* Limit source prefix to configured maximum */
711 		if(sq->ecs_server_out.subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP4
712 			&& sq->ecs_server_out.subnet_source_mask >
713 			qstate->env->cfg->max_client_subnet_ipv4)
714 			sq->ecs_server_out.subnet_source_mask =
715 				qstate->env->cfg->max_client_subnet_ipv4;
716 		else if(sq->ecs_server_out.subnet_addr_fam == EDNSSUBNET_ADDRFAM_IP6
717 			&& sq->ecs_server_out.subnet_source_mask >
718 			qstate->env->cfg->max_client_subnet_ipv6)
719 			sq->ecs_server_out.subnet_source_mask =
720 				qstate->env->cfg->max_client_subnet_ipv6;
721 		/* Safe to copy completely, even if the source is limited by the
722 		 * configuration. ecs_opt_list_append() will limit the address.
723 		 * */
724 		memcpy(&sq->ecs_server_out.subnet_addr,
725 			sq->ecs_client_in.subnet_addr, INET6_SIZE);
726 		sq->ecs_server_out.subnet_scope_mask = 0;
727 		sq->ecs_server_out.subnet_validdata = 1;
728 		if(sq->ecs_server_out.subnet_source_mask != 0 &&
729 			qstate->env->cfg->client_subnet_always_forward &&
730 			sq->subnet_downstream)
731 			/* ECS specific data required, do not look at the global
732 			 * cache in other modules. */
733 			qstate->no_cache_lookup = 1;
734 
735 		/* pass request to next module */
736 		verbose(VERB_ALGO,
737 			"subnet: not found in cache. pass to next module");
738 		qstate->ext_state[id] = module_wait_module;
739 		return;
740 	}
741 	/* Query handed back by next module, we have a 'final' answer */
742 	if(sq && event == module_event_moddone) {
743 		qstate->ext_state[id] = eval_response(qstate, id, sq);
744 		if(qstate->ext_state[id] == module_finished) {
745 			ecs_opt_list_append(&sq->ecs_client_out,
746 				&qstate->edns_opts_front_out, qstate);
747 		}
748 		return;
749 	}
750 	if(sq && outbound) {
751 		return;
752 	}
753 	/* We are being revisited */
754 	if(event == module_event_pass || event == module_event_new) {
755 		/* Just pass it on, we already did the work */
756 		verbose(VERB_ALGO, "subnet: pass to next module");
757 		qstate->ext_state[id] = module_wait_module;
758 		return;
759 	}
760 	if(!sq && (event == module_event_moddone)) {
761 		/* during priming, module done but we never started */
762 		qstate->ext_state[id] = module_finished;
763 		return;
764 	}
765 	log_err("subnet: bad event %s", strmodulevent(event));
766 	qstate->ext_state[id] = module_error;
767 	return;
768 }
769 
770 void
771 subnetmod_clear(struct module_qstate *ATTR_UNUSED(qstate),
772 	int ATTR_UNUSED(id))
773 {
774 	/* qstate has no data outside region */
775 }
776 
777 void
778 subnetmod_inform_super(struct module_qstate *ATTR_UNUSED(qstate),
779 	int ATTR_UNUSED(id), struct module_qstate *ATTR_UNUSED(super))
780 {
781 	/* Not used */
782 }
783 
784 size_t
785 subnetmod_get_mem(struct module_env *env, int id)
786 {
787 	struct subnet_env *sn_env = env->modinfo[id];
788 	if (!sn_env) return 0;
789 	return sizeof(*sn_env) +
790 		slabhash_get_mem(sn_env->subnet_msg_cache) +
791 		ecs_whitelist_get_mem(sn_env->whitelist);
792 }
793 
794 /**
795  * The module function block
796  */
797 static struct module_func_block subnetmod_block = {
798 	"subnet", &subnetmod_init, &subnetmod_deinit, &subnetmod_operate,
799 	&subnetmod_inform_super, &subnetmod_clear, &subnetmod_get_mem
800 };
801 
802 struct module_func_block*
803 subnetmod_get_funcblock(void)
804 {
805 	return &subnetmod_block;
806 }
807 
808 /** Wrappers for static functions to unit test */
809 size_t
810 unittest_wrapper_subnetmod_sizefunc(void *elemptr)
811 {
812 	return sizefunc(elemptr);
813 }
814 
815 #endif  /* CLIENT_SUBNET */
816