1 /*
2 * validator/val_neg.c - validator aggressive negative caching functions.
3 *
4 * Copyright (c) 2008, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /**
37 * \file
38 *
39 * This file contains helper functions for the validator module.
40 * The functions help with aggressive negative caching.
41 * This creates new denials of existence, and proofs for absence of types
42 * from cached NSEC records.
43 */
44 #include "config.h"
45 #ifdef HAVE_OPENSSL_SSL_H
46 #include <openssl/ssl.h>
47 #define NSEC3_SHA_LEN SHA_DIGEST_LENGTH
48 #else
49 #define NSEC3_SHA_LEN 20
50 #endif
51 #include "validator/val_neg.h"
52 #include "validator/val_nsec.h"
53 #include "validator/val_nsec3.h"
54 #include "validator/val_utils.h"
55 #include "util/data/dname.h"
56 #include "util/data/msgreply.h"
57 #include "util/log.h"
58 #include "util/net_help.h"
59 #include "util/config_file.h"
60 #include "services/cache/rrset.h"
61 #include "services/cache/dns.h"
62 #include "sldns/rrdef.h"
63 #include "sldns/sbuffer.h"
64
val_neg_data_compare(const void * a,const void * b)65 int val_neg_data_compare(const void* a, const void* b)
66 {
67 struct val_neg_data* x = (struct val_neg_data*)a;
68 struct val_neg_data* y = (struct val_neg_data*)b;
69 int m;
70 return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
71 }
72
val_neg_zone_compare(const void * a,const void * b)73 int val_neg_zone_compare(const void* a, const void* b)
74 {
75 struct val_neg_zone* x = (struct val_neg_zone*)a;
76 struct val_neg_zone* y = (struct val_neg_zone*)b;
77 int m;
78 if(x->dclass != y->dclass) {
79 if(x->dclass < y->dclass)
80 return -1;
81 return 1;
82 }
83 return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
84 }
85
val_neg_create(struct config_file * cfg,size_t maxiter)86 struct val_neg_cache* val_neg_create(struct config_file* cfg, size_t maxiter)
87 {
88 struct val_neg_cache* neg = (struct val_neg_cache*)calloc(1,
89 sizeof(*neg));
90 if(!neg) {
91 log_err("Could not create neg cache: out of memory");
92 return NULL;
93 }
94 neg->nsec3_max_iter = maxiter;
95 neg->max = 1024*1024; /* 1 M is thousands of entries */
96 if(cfg) neg->max = cfg->neg_cache_size;
97 rbtree_init(&neg->tree, &val_neg_zone_compare);
98 lock_basic_init(&neg->lock);
99 lock_protect(&neg->lock, neg, sizeof(*neg));
100 return neg;
101 }
102
val_neg_get_mem(struct val_neg_cache * neg)103 size_t val_neg_get_mem(struct val_neg_cache* neg)
104 {
105 size_t result;
106 lock_basic_lock(&neg->lock);
107 result = sizeof(*neg) + neg->use;
108 lock_basic_unlock(&neg->lock);
109 return result;
110 }
111
112 /** clear datas on cache deletion */
113 static void
neg_clear_datas(rbnode_type * n,void * ATTR_UNUSED (arg))114 neg_clear_datas(rbnode_type* n, void* ATTR_UNUSED(arg))
115 {
116 struct val_neg_data* d = (struct val_neg_data*)n;
117 free(d->name);
118 free(d);
119 }
120
121 /** clear zones on cache deletion */
122 static void
neg_clear_zones(rbnode_type * n,void * ATTR_UNUSED (arg))123 neg_clear_zones(rbnode_type* n, void* ATTR_UNUSED(arg))
124 {
125 struct val_neg_zone* z = (struct val_neg_zone*)n;
126 /* delete all the rrset entries in the tree */
127 traverse_postorder(&z->tree, &neg_clear_datas, NULL);
128 free(z->nsec3_salt);
129 free(z->name);
130 free(z);
131 }
132
neg_cache_delete(struct val_neg_cache * neg)133 void neg_cache_delete(struct val_neg_cache* neg)
134 {
135 if(!neg) return;
136 lock_basic_destroy(&neg->lock);
137 /* delete all the zones in the tree */
138 traverse_postorder(&neg->tree, &neg_clear_zones, NULL);
139 free(neg);
140 }
141
142 /**
143 * Put data element at the front of the LRU list.
144 * @param neg: negative cache with LRU start and end.
145 * @param data: this data is fronted.
146 */
neg_lru_front(struct val_neg_cache * neg,struct val_neg_data * data)147 static void neg_lru_front(struct val_neg_cache* neg,
148 struct val_neg_data* data)
149 {
150 data->prev = NULL;
151 data->next = neg->first;
152 if(!neg->first)
153 neg->last = data;
154 else neg->first->prev = data;
155 neg->first = data;
156 }
157
158 /**
159 * Remove data element from LRU list.
160 * @param neg: negative cache with LRU start and end.
161 * @param data: this data is removed from the list.
162 */
neg_lru_remove(struct val_neg_cache * neg,struct val_neg_data * data)163 static void neg_lru_remove(struct val_neg_cache* neg,
164 struct val_neg_data* data)
165 {
166 if(data->prev)
167 data->prev->next = data->next;
168 else neg->first = data->next;
169 if(data->next)
170 data->next->prev = data->prev;
171 else neg->last = data->prev;
172 }
173
174 /**
175 * Touch LRU for data element, put it at the start of the LRU list.
176 * @param neg: negative cache with LRU start and end.
177 * @param data: this data is used.
178 */
neg_lru_touch(struct val_neg_cache * neg,struct val_neg_data * data)179 static void neg_lru_touch(struct val_neg_cache* neg,
180 struct val_neg_data* data)
181 {
182 if(data == neg->first)
183 return; /* nothing to do */
184 /* remove from current lru position */
185 neg_lru_remove(neg, data);
186 /* add at front */
187 neg_lru_front(neg, data);
188 }
189
190 /**
191 * Delete a zone element from the negative cache.
192 * May delete other zone elements to keep tree coherent, or
193 * only mark the element as 'not in use'.
194 * @param neg: negative cache.
195 * @param z: zone element to delete.
196 */
neg_delete_zone(struct val_neg_cache * neg,struct val_neg_zone * z)197 static void neg_delete_zone(struct val_neg_cache* neg, struct val_neg_zone* z)
198 {
199 struct val_neg_zone* p, *np;
200 if(!z) return;
201 log_assert(z->in_use);
202 log_assert(z->count > 0);
203 z->in_use = 0;
204
205 /* go up the tree and reduce counts */
206 p = z;
207 while(p) {
208 log_assert(p->count > 0);
209 p->count --;
210 p = p->parent;
211 }
212
213 /* remove zones with zero count */
214 p = z;
215 while(p && p->count == 0) {
216 np = p->parent;
217 (void)rbtree_delete(&neg->tree, &p->node);
218 neg->use -= p->len + sizeof(*p);
219 free(p->nsec3_salt);
220 free(p->name);
221 free(p);
222 p = np;
223 }
224 }
225
neg_delete_data(struct val_neg_cache * neg,struct val_neg_data * el)226 void neg_delete_data(struct val_neg_cache* neg, struct val_neg_data* el)
227 {
228 struct val_neg_zone* z;
229 struct val_neg_data* p, *np;
230 if(!el) return;
231 z = el->zone;
232 log_assert(el->in_use);
233 log_assert(el->count > 0);
234 el->in_use = 0;
235
236 /* remove it from the lru list */
237 neg_lru_remove(neg, el);
238 log_assert(neg->first != el && neg->last != el);
239
240 /* go up the tree and reduce counts */
241 p = el;
242 while(p) {
243 log_assert(p->count > 0);
244 p->count --;
245 p = p->parent;
246 }
247
248 /* delete 0 count items from tree */
249 p = el;
250 while(p && p->count == 0) {
251 np = p->parent;
252 (void)rbtree_delete(&z->tree, &p->node);
253 neg->use -= p->len + sizeof(*p);
254 free(p->name);
255 free(p);
256 p = np;
257 }
258
259 /* check if the zone is now unused */
260 if(z->tree.count == 0) {
261 neg_delete_zone(neg, z);
262 }
263 }
264
265 /**
266 * Create more space in negative cache
267 * The oldest elements are deleted until enough space is present.
268 * Empty zones are deleted.
269 * @param neg: negative cache.
270 * @param need: how many bytes are needed.
271 */
neg_make_space(struct val_neg_cache * neg,size_t need)272 static void neg_make_space(struct val_neg_cache* neg, size_t need)
273 {
274 /* delete elements until enough space or its empty */
275 while(neg->last && neg->max < neg->use + need) {
276 neg_delete_data(neg, neg->last);
277 }
278 }
279
neg_find_zone(struct val_neg_cache * neg,uint8_t * nm,size_t len,uint16_t dclass)280 struct val_neg_zone* neg_find_zone(struct val_neg_cache* neg,
281 uint8_t* nm, size_t len, uint16_t dclass)
282 {
283 struct val_neg_zone lookfor;
284 struct val_neg_zone* result;
285 lookfor.node.key = &lookfor;
286 lookfor.name = nm;
287 lookfor.len = len;
288 lookfor.labs = dname_count_labels(lookfor.name);
289 lookfor.dclass = dclass;
290
291 result = (struct val_neg_zone*)
292 rbtree_search(&neg->tree, lookfor.node.key);
293 return result;
294 }
295
296 /**
297 * Find the given data
298 * @param zone: negative zone
299 * @param nm: what to look for.
300 * @param len: length of nm
301 * @param labs: labels in nm
302 * @return data or NULL if not found.
303 */
neg_find_data(struct val_neg_zone * zone,uint8_t * nm,size_t len,int labs)304 static struct val_neg_data* neg_find_data(struct val_neg_zone* zone,
305 uint8_t* nm, size_t len, int labs)
306 {
307 struct val_neg_data lookfor;
308 struct val_neg_data* result;
309 lookfor.node.key = &lookfor;
310 lookfor.name = nm;
311 lookfor.len = len;
312 lookfor.labs = labs;
313
314 result = (struct val_neg_data*)
315 rbtree_search(&zone->tree, lookfor.node.key);
316 return result;
317 }
318
319 /**
320 * Calculate space needed for the data and all its parents
321 * @param rep: NSEC entries.
322 * @return size.
323 */
calc_data_need(struct reply_info * rep)324 static size_t calc_data_need(struct reply_info* rep)
325 {
326 uint8_t* d;
327 size_t i, len, res = 0;
328
329 for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
330 if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC) {
331 d = rep->rrsets[i]->rk.dname;
332 len = rep->rrsets[i]->rk.dname_len;
333 res = sizeof(struct val_neg_data) + len;
334 while(!dname_is_root(d)) {
335 log_assert(len > 1); /* not root label */
336 dname_remove_label(&d, &len);
337 res += sizeof(struct val_neg_data) + len;
338 }
339 }
340 }
341 return res;
342 }
343
344 /**
345 * Calculate space needed for zone and all its parents
346 * @param d: name of zone
347 * @param len: length of name
348 * @return size.
349 */
calc_zone_need(uint8_t * d,size_t len)350 static size_t calc_zone_need(uint8_t* d, size_t len)
351 {
352 size_t res = sizeof(struct val_neg_zone) + len;
353 while(!dname_is_root(d)) {
354 log_assert(len > 1); /* not root label */
355 dname_remove_label(&d, &len);
356 res += sizeof(struct val_neg_zone) + len;
357 }
358 return res;
359 }
360
361 /**
362 * Find closest existing parent zone of the given name.
363 * @param neg: negative cache.
364 * @param nm: name to look for
365 * @param nm_len: length of nm
366 * @param labs: labelcount of nm.
367 * @param qclass: class.
368 * @return the zone or NULL if none found.
369 */
neg_closest_zone_parent(struct val_neg_cache * neg,uint8_t * nm,size_t nm_len,int labs,uint16_t qclass)370 static struct val_neg_zone* neg_closest_zone_parent(struct val_neg_cache* neg,
371 uint8_t* nm, size_t nm_len, int labs, uint16_t qclass)
372 {
373 struct val_neg_zone key;
374 struct val_neg_zone* result;
375 rbnode_type* res = NULL;
376 key.node.key = &key;
377 key.name = nm;
378 key.len = nm_len;
379 key.labs = labs;
380 key.dclass = qclass;
381 if(rbtree_find_less_equal(&neg->tree, &key, &res)) {
382 /* exact match */
383 result = (struct val_neg_zone*)res;
384 } else {
385 /* smaller element (or no element) */
386 int m;
387 result = (struct val_neg_zone*)res;
388 if(!result || result->dclass != qclass)
389 return NULL;
390 /* count number of labels matched */
391 (void)dname_lab_cmp(result->name, result->labs, key.name,
392 key.labs, &m);
393 while(result) { /* go up until qname is subdomain of stub */
394 if(result->labs <= m)
395 break;
396 result = result->parent;
397 }
398 }
399 return result;
400 }
401
402 /**
403 * Find closest existing parent data for the given name.
404 * @param zone: to look in.
405 * @param nm: name to look for
406 * @param nm_len: length of nm
407 * @param labs: labelcount of nm.
408 * @return the data or NULL if none found.
409 */
neg_closest_data_parent(struct val_neg_zone * zone,uint8_t * nm,size_t nm_len,int labs)410 static struct val_neg_data* neg_closest_data_parent(
411 struct val_neg_zone* zone, uint8_t* nm, size_t nm_len, int labs)
412 {
413 struct val_neg_data key;
414 struct val_neg_data* result;
415 rbnode_type* res = NULL;
416 key.node.key = &key;
417 key.name = nm;
418 key.len = nm_len;
419 key.labs = labs;
420 if(rbtree_find_less_equal(&zone->tree, &key, &res)) {
421 /* exact match */
422 result = (struct val_neg_data*)res;
423 } else {
424 /* smaller element (or no element) */
425 int m;
426 result = (struct val_neg_data*)res;
427 if(!result)
428 return NULL;
429 /* count number of labels matched */
430 (void)dname_lab_cmp(result->name, result->labs, key.name,
431 key.labs, &m);
432 while(result) { /* go up until qname is subdomain of stub */
433 if(result->labs <= m)
434 break;
435 result = result->parent;
436 }
437 }
438 return result;
439 }
440
441 /**
442 * Create a single zone node
443 * @param nm: name for zone (copied)
444 * @param nm_len: length of name
445 * @param labs: labels in name.
446 * @param dclass: class of zone, host order.
447 * @return new zone or NULL on failure
448 */
neg_setup_zone_node(uint8_t * nm,size_t nm_len,int labs,uint16_t dclass)449 static struct val_neg_zone* neg_setup_zone_node(
450 uint8_t* nm, size_t nm_len, int labs, uint16_t dclass)
451 {
452 struct val_neg_zone* zone =
453 (struct val_neg_zone*)calloc(1, sizeof(*zone));
454 if(!zone) {
455 return NULL;
456 }
457 zone->node.key = zone;
458 zone->name = memdup(nm, nm_len);
459 if(!zone->name) {
460 free(zone);
461 return NULL;
462 }
463 zone->len = nm_len;
464 zone->labs = labs;
465 zone->dclass = dclass;
466
467 rbtree_init(&zone->tree, &val_neg_data_compare);
468 return zone;
469 }
470
471 /**
472 * Create a linked list of parent zones, starting at longname ending on
473 * the parent (can be NULL, creates to the root).
474 * @param nm: name for lowest in chain
475 * @param nm_len: length of name
476 * @param labs: labels in name.
477 * @param dclass: class of zone.
478 * @param parent: NULL for to root, else so it fits under here.
479 * @return zone; a chain of zones and their parents up to the parent.
480 * or NULL on malloc failure
481 */
neg_zone_chain(uint8_t * nm,size_t nm_len,int labs,uint16_t dclass,struct val_neg_zone * parent)482 static struct val_neg_zone* neg_zone_chain(
483 uint8_t* nm, size_t nm_len, int labs, uint16_t dclass,
484 struct val_neg_zone* parent)
485 {
486 int i;
487 int tolabs = parent?parent->labs:0;
488 struct val_neg_zone* zone, *prev = NULL, *first = NULL;
489
490 /* create the new subtree, i is labelcount of current creation */
491 /* this creates a 'first' to z->parent=NULL list of zones */
492 for(i=labs; i!=tolabs; i--) {
493 /* create new item */
494 zone = neg_setup_zone_node(nm, nm_len, i, dclass);
495 if(!zone) {
496 /* need to delete other allocations in this routine!*/
497 struct val_neg_zone* p=first, *np;
498 while(p) {
499 np = p->parent;
500 free(p->name);
501 free(p);
502 p = np;
503 }
504 return NULL;
505 }
506 if(i == labs) {
507 first = zone;
508 } else {
509 prev->parent = zone;
510 }
511 /* prepare for next name */
512 prev = zone;
513 dname_remove_label(&nm, &nm_len);
514 }
515 return first;
516 }
517
val_neg_zone_take_inuse(struct val_neg_zone * zone)518 void val_neg_zone_take_inuse(struct val_neg_zone* zone)
519 {
520 if(!zone->in_use) {
521 struct val_neg_zone* p;
522 zone->in_use = 1;
523 /* increase usage count of all parents */
524 for(p=zone; p; p = p->parent) {
525 p->count++;
526 }
527 }
528 }
529
neg_create_zone(struct val_neg_cache * neg,uint8_t * nm,size_t nm_len,uint16_t dclass)530 struct val_neg_zone* neg_create_zone(struct val_neg_cache* neg,
531 uint8_t* nm, size_t nm_len, uint16_t dclass)
532 {
533 struct val_neg_zone* zone;
534 struct val_neg_zone* parent;
535 struct val_neg_zone* p, *np;
536 int labs = dname_count_labels(nm);
537
538 /* find closest enclosing parent zone that (still) exists */
539 parent = neg_closest_zone_parent(neg, nm, nm_len, labs, dclass);
540 if(parent && query_dname_compare(parent->name, nm) == 0)
541 return parent; /* already exists, weird */
542 /* if parent exists, it is in use */
543 log_assert(!parent || parent->count > 0);
544 zone = neg_zone_chain(nm, nm_len, labs, dclass, parent);
545 if(!zone) {
546 return NULL;
547 }
548
549 /* insert the list of zones into the tree */
550 p = zone;
551 while(p) {
552 np = p->parent;
553 /* mem use */
554 neg->use += sizeof(struct val_neg_zone) + p->len;
555 /* insert in tree */
556 (void)rbtree_insert(&neg->tree, &p->node);
557 /* last one needs proper parent pointer */
558 if(np == NULL)
559 p->parent = parent;
560 p = np;
561 }
562 return zone;
563 }
564
565 /** find zone name of message, returns the SOA record */
reply_find_soa(struct reply_info * rep)566 static struct ub_packed_rrset_key* reply_find_soa(struct reply_info* rep)
567 {
568 size_t i;
569 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
570 if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_SOA)
571 return rep->rrsets[i];
572 }
573 return NULL;
574 }
575
576 /** see if the reply has NSEC records worthy of caching */
reply_has_nsec(struct reply_info * rep)577 static int reply_has_nsec(struct reply_info* rep)
578 {
579 size_t i;
580 struct packed_rrset_data* d;
581 if(rep->security != sec_status_secure)
582 return 0;
583 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
584 if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC) {
585 d = (struct packed_rrset_data*)rep->rrsets[i]->
586 entry.data;
587 if(d->security == sec_status_secure)
588 return 1;
589 }
590 }
591 return 0;
592 }
593
594
595 /**
596 * Create single node of data element.
597 * @param nm: name (copied)
598 * @param nm_len: length of name
599 * @param labs: labels in name.
600 * @return element with name nm, or NULL malloc failure.
601 */
neg_setup_data_node(uint8_t * nm,size_t nm_len,int labs)602 static struct val_neg_data* neg_setup_data_node(
603 uint8_t* nm, size_t nm_len, int labs)
604 {
605 struct val_neg_data* el;
606 el = (struct val_neg_data*)calloc(1, sizeof(*el));
607 if(!el) {
608 return NULL;
609 }
610 el->node.key = el;
611 el->name = memdup(nm, nm_len);
612 if(!el->name) {
613 free(el);
614 return NULL;
615 }
616 el->len = nm_len;
617 el->labs = labs;
618 return el;
619 }
620
621 /**
622 * Create chain of data element and parents
623 * @param nm: name
624 * @param nm_len: length of name
625 * @param labs: labels in name.
626 * @param parent: up to where to make, if NULL up to root label.
627 * @return lowest element with name nm, or NULL malloc failure.
628 */
neg_data_chain(uint8_t * nm,size_t nm_len,int labs,struct val_neg_data * parent)629 static struct val_neg_data* neg_data_chain(
630 uint8_t* nm, size_t nm_len, int labs, struct val_neg_data* parent)
631 {
632 int i;
633 int tolabs = parent?parent->labs:0;
634 struct val_neg_data* el, *first = NULL, *prev = NULL;
635
636 /* create the new subtree, i is labelcount of current creation */
637 /* this creates a 'first' to z->parent=NULL list of zones */
638 for(i=labs; i!=tolabs; i--) {
639 /* create new item */
640 el = neg_setup_data_node(nm, nm_len, i);
641 if(!el) {
642 /* need to delete other allocations in this routine!*/
643 struct val_neg_data* p = first, *np;
644 while(p) {
645 np = p->parent;
646 free(p->name);
647 free(p);
648 p = np;
649 }
650 return NULL;
651 }
652 if(i == labs) {
653 first = el;
654 } else {
655 prev->parent = el;
656 }
657
658 /* prepare for next name */
659 prev = el;
660 dname_remove_label(&nm, &nm_len);
661 }
662 return first;
663 }
664
665 /**
666 * Remove NSEC records between start and end points.
667 * By walking the tree, the tree is sorted canonically.
668 * @param neg: negative cache.
669 * @param zone: the zone
670 * @param el: element to start walking at.
671 * @param nsec: the nsec record with the end point
672 */
wipeout(struct val_neg_cache * neg,struct val_neg_zone * zone,struct val_neg_data * el,struct ub_packed_rrset_key * nsec)673 static void wipeout(struct val_neg_cache* neg, struct val_neg_zone* zone,
674 struct val_neg_data* el, struct ub_packed_rrset_key* nsec)
675 {
676 struct packed_rrset_data* d = (struct packed_rrset_data*)nsec->
677 entry.data;
678 uint8_t* end;
679 size_t end_len;
680 int end_labs, m;
681 rbnode_type* walk, *next;
682 struct val_neg_data* cur;
683 uint8_t buf[257];
684 /* get endpoint */
685 if(!d || d->count == 0 || d->rr_len[0] < 2+1)
686 return;
687 if(ntohs(nsec->rk.type) == LDNS_RR_TYPE_NSEC) {
688 end = d->rr_data[0]+2;
689 end_len = dname_valid(end, d->rr_len[0]-2);
690 end_labs = dname_count_labels(end);
691 } else {
692 /* NSEC3 */
693 if(!nsec3_get_nextowner_b32(nsec, 0, buf, sizeof(buf)))
694 return;
695 end = buf;
696 end_labs = dname_count_size_labels(end, &end_len);
697 }
698
699 /* sanity check, both owner and end must be below the zone apex */
700 if(!dname_subdomain_c(el->name, zone->name) ||
701 !dname_subdomain_c(end, zone->name))
702 return;
703
704 /* detect end of zone NSEC ; wipe until the end of zone */
705 if(query_dname_compare(end, zone->name) == 0) {
706 end = NULL;
707 }
708
709 walk = rbtree_next(&el->node);
710 while(walk && walk != RBTREE_NULL) {
711 cur = (struct val_neg_data*)walk;
712 /* sanity check: must be larger than start */
713 if(dname_canon_lab_cmp(cur->name, cur->labs,
714 el->name, el->labs, &m) <= 0) {
715 /* r == 0 skip original record. */
716 /* r < 0 too small! */
717 walk = rbtree_next(walk);
718 continue;
719 }
720 /* stop at endpoint, also data at empty nonterminals must be
721 * removed (no NSECs there) so everything between
722 * start and end */
723 if(end && dname_canon_lab_cmp(cur->name, cur->labs,
724 end, end_labs, &m) >= 0) {
725 break;
726 }
727 /* this element has to be deleted, but we cannot do it
728 * now, because we are walking the tree still ... */
729 /* get the next element: */
730 next = rbtree_next(walk);
731 /* now delete the original element, this may trigger
732 * rbtree rebalances, but really, the next element is
733 * the one we need.
734 * But it may trigger delete of other data and the
735 * entire zone. However, if that happens, this is done
736 * by deleting the *parents* of the element for deletion,
737 * and maybe also the entire zone if it is empty.
738 * But parents are smaller in canonical compare, thus,
739 * if a larger element exists, then it is not a parent,
740 * it cannot get deleted, the zone cannot get empty.
741 * If the next==NULL, then zone can be empty. */
742 if(cur->in_use)
743 neg_delete_data(neg, cur);
744 walk = next;
745 }
746 }
747
neg_insert_data(struct val_neg_cache * neg,struct val_neg_zone * zone,struct ub_packed_rrset_key * nsec)748 void neg_insert_data(struct val_neg_cache* neg,
749 struct val_neg_zone* zone, struct ub_packed_rrset_key* nsec)
750 {
751 struct packed_rrset_data* d;
752 struct val_neg_data* parent;
753 struct val_neg_data* el;
754 uint8_t* nm = nsec->rk.dname;
755 size_t nm_len = nsec->rk.dname_len;
756 int labs = dname_count_labels(nsec->rk.dname);
757
758 d = (struct packed_rrset_data*)nsec->entry.data;
759 if( !(d->security == sec_status_secure ||
760 (d->security == sec_status_unchecked && d->rrsig_count > 0)))
761 return;
762 log_nametypeclass(VERB_ALGO, "negcache rr",
763 nsec->rk.dname, ntohs(nsec->rk.type),
764 ntohs(nsec->rk.rrset_class));
765
766 /* find closest enclosing parent data that (still) exists */
767 parent = neg_closest_data_parent(zone, nm, nm_len, labs);
768 if(parent && query_dname_compare(parent->name, nm) == 0) {
769 /* perfect match already exists */
770 log_assert(parent->count > 0);
771 el = parent;
772 } else {
773 struct val_neg_data* p, *np;
774
775 /* create subtree for perfect match */
776 /* if parent exists, it is in use */
777 log_assert(!parent || parent->count > 0);
778
779 el = neg_data_chain(nm, nm_len, labs, parent);
780 if(!el) {
781 log_err("out of memory inserting NSEC negative cache");
782 return;
783 }
784 el->in_use = 0; /* set on below */
785
786 /* insert the list of zones into the tree */
787 p = el;
788 while(p) {
789 np = p->parent;
790 /* mem use */
791 neg->use += sizeof(struct val_neg_data) + p->len;
792 /* insert in tree */
793 p->zone = zone;
794 (void)rbtree_insert(&zone->tree, &p->node);
795 /* last one needs proper parent pointer */
796 if(np == NULL)
797 p->parent = parent;
798 p = np;
799 }
800 }
801
802 if(!el->in_use) {
803 struct val_neg_data* p;
804
805 el->in_use = 1;
806 /* increase usage count of all parents */
807 for(p=el; p; p = p->parent) {
808 p->count++;
809 }
810
811 neg_lru_front(neg, el);
812 } else {
813 /* in use, bring to front, lru */
814 neg_lru_touch(neg, el);
815 }
816
817 /* if nsec3 store last used parameters */
818 if(ntohs(nsec->rk.type) == LDNS_RR_TYPE_NSEC3) {
819 int h;
820 uint8_t* s;
821 size_t slen, it;
822 if(nsec3_get_params(nsec, 0, &h, &it, &s, &slen) &&
823 it <= neg->nsec3_max_iter &&
824 (h != zone->nsec3_hash || it != zone->nsec3_iter ||
825 slen != zone->nsec3_saltlen ||
826 (slen != 0 && zone->nsec3_salt && s
827 && memcmp(zone->nsec3_salt, s, slen) != 0))) {
828
829 if(slen > 0) {
830 uint8_t* sa = memdup(s, slen);
831 if(sa) {
832 free(zone->nsec3_salt);
833 zone->nsec3_salt = sa;
834 zone->nsec3_saltlen = slen;
835 zone->nsec3_iter = it;
836 zone->nsec3_hash = h;
837 }
838 } else {
839 free(zone->nsec3_salt);
840 zone->nsec3_salt = NULL;
841 zone->nsec3_saltlen = 0;
842 zone->nsec3_iter = it;
843 zone->nsec3_hash = h;
844 }
845 }
846 }
847
848 /* wipe out the cache items between NSEC start and end */
849 wipeout(neg, zone, el, nsec);
850 }
851
852 /** see if the reply has signed NSEC records and return the signer */
reply_nsec_signer(struct reply_info * rep,size_t * signer_len,uint16_t * dclass)853 static uint8_t* reply_nsec_signer(struct reply_info* rep, size_t* signer_len,
854 uint16_t* dclass)
855 {
856 size_t i;
857 struct packed_rrset_data* d;
858 uint8_t* s;
859 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
860 if(ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC ||
861 ntohs(rep->rrsets[i]->rk.type) == LDNS_RR_TYPE_NSEC3) {
862 d = (struct packed_rrset_data*)rep->rrsets[i]->
863 entry.data;
864 /* return first signer name of first NSEC */
865 if(d->rrsig_count != 0) {
866 val_find_rrset_signer(rep->rrsets[i],
867 &s, signer_len);
868 if(s && *signer_len) {
869 *dclass = ntohs(rep->rrsets[i]->
870 rk.rrset_class);
871 return s;
872 }
873 }
874 }
875 }
876 return 0;
877 }
878
val_neg_addreply(struct val_neg_cache * neg,struct reply_info * rep)879 void val_neg_addreply(struct val_neg_cache* neg, struct reply_info* rep)
880 {
881 size_t i, need;
882 struct ub_packed_rrset_key* soa;
883 uint8_t* dname = NULL;
884 size_t dname_len;
885 uint16_t rrset_class;
886 struct val_neg_zone* zone;
887 /* see if secure nsecs inside */
888 if(!reply_has_nsec(rep))
889 return;
890 /* find the zone name in message */
891 if((soa = reply_find_soa(rep))) {
892 dname = soa->rk.dname;
893 dname_len = soa->rk.dname_len;
894 rrset_class = ntohs(soa->rk.rrset_class);
895 }
896 else {
897 /* No SOA in positive (wildcard) answer. Use signer from the
898 * validated answer RRsets' signature. */
899 if(!(dname = reply_nsec_signer(rep, &dname_len, &rrset_class)))
900 return;
901 }
902
903 log_nametypeclass(VERB_ALGO, "negcache insert for zone",
904 dname, LDNS_RR_TYPE_SOA, rrset_class);
905
906 /* ask for enough space to store all of it */
907 need = calc_data_need(rep) +
908 calc_zone_need(dname, dname_len);
909 lock_basic_lock(&neg->lock);
910 neg_make_space(neg, need);
911
912 /* find or create the zone entry */
913 zone = neg_find_zone(neg, dname, dname_len, rrset_class);
914 if(!zone) {
915 if(!(zone = neg_create_zone(neg, dname, dname_len,
916 rrset_class))) {
917 lock_basic_unlock(&neg->lock);
918 log_err("out of memory adding negative zone");
919 return;
920 }
921 }
922 val_neg_zone_take_inuse(zone);
923
924 /* insert the NSECs */
925 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
926 if(ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC)
927 continue;
928 if(!dname_subdomain_c(rep->rrsets[i]->rk.dname,
929 zone->name)) continue;
930 /* insert NSEC into this zone's tree */
931 neg_insert_data(neg, zone, rep->rrsets[i]);
932 }
933 if(zone->tree.count == 0) {
934 /* remove empty zone if inserts failed */
935 neg_delete_zone(neg, zone);
936 }
937 lock_basic_unlock(&neg->lock);
938 }
939
940 /**
941 * Lookup closest data record. For NSEC denial.
942 * @param zone: zone to look in
943 * @param qname: name to look for.
944 * @param len: length of name
945 * @param labs: labels in name
946 * @param data: data element, exact or smaller or NULL
947 * @return true if exact match.
948 */
neg_closest_data(struct val_neg_zone * zone,uint8_t * qname,size_t len,int labs,struct val_neg_data ** data)949 static int neg_closest_data(struct val_neg_zone* zone,
950 uint8_t* qname, size_t len, int labs, struct val_neg_data** data)
951 {
952 struct val_neg_data key;
953 rbnode_type* r;
954 key.node.key = &key;
955 key.name = qname;
956 key.len = len;
957 key.labs = labs;
958 if(rbtree_find_less_equal(&zone->tree, &key, &r)) {
959 /* exact match */
960 *data = (struct val_neg_data*)r;
961 return 1;
962 } else {
963 /* smaller match */
964 *data = (struct val_neg_data*)r;
965 return 0;
966 }
967 }
968
val_neg_addreferral(struct val_neg_cache * neg,struct reply_info * rep,uint8_t * zone_name)969 void val_neg_addreferral(struct val_neg_cache* neg, struct reply_info* rep,
970 uint8_t* zone_name)
971 {
972 size_t i, need;
973 uint8_t* signer;
974 size_t signer_len;
975 uint16_t dclass;
976 struct val_neg_zone* zone;
977 /* no SOA in this message, find RRSIG over NSEC's signer name.
978 * note the NSEC records are maybe not validated yet */
979 signer = reply_nsec_signer(rep, &signer_len, &dclass);
980 if(!signer)
981 return;
982 if(!dname_subdomain_c(signer, zone_name)) {
983 /* the signer is not in the bailiwick, throw it out */
984 return;
985 }
986
987 log_nametypeclass(VERB_ALGO, "negcache insert referral ",
988 signer, LDNS_RR_TYPE_NS, dclass);
989
990 /* ask for enough space to store all of it */
991 need = calc_data_need(rep) + calc_zone_need(signer, signer_len);
992 lock_basic_lock(&neg->lock);
993 neg_make_space(neg, need);
994
995 /* find or create the zone entry */
996 zone = neg_find_zone(neg, signer, signer_len, dclass);
997 if(!zone) {
998 if(!(zone = neg_create_zone(neg, signer, signer_len,
999 dclass))) {
1000 lock_basic_unlock(&neg->lock);
1001 log_err("out of memory adding negative zone");
1002 return;
1003 }
1004 }
1005 val_neg_zone_take_inuse(zone);
1006
1007 /* insert the NSECs */
1008 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
1009 if(ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC &&
1010 ntohs(rep->rrsets[i]->rk.type) != LDNS_RR_TYPE_NSEC3)
1011 continue;
1012 if(!dname_subdomain_c(rep->rrsets[i]->rk.dname,
1013 zone->name)) continue;
1014 /* insert NSEC into this zone's tree */
1015 neg_insert_data(neg, zone, rep->rrsets[i]);
1016 }
1017 if(zone->tree.count == 0) {
1018 /* remove empty zone if inserts failed */
1019 neg_delete_zone(neg, zone);
1020 }
1021 lock_basic_unlock(&neg->lock);
1022 }
1023
1024 /**
1025 * Check that an NSEC3 rrset does not have a type set.
1026 * None of the nsec3s in a hash-collision are allowed to have the type.
1027 * (since we do not know which one is the nsec3 looked at, flags, ..., we
1028 * ignore the cached item and let it bypass negative caching).
1029 * @param k: the nsec3 rrset to check.
1030 * @param t: type to check
1031 * @return true if no RRs have the type.
1032 */
nsec3_no_type(struct ub_packed_rrset_key * k,uint16_t t)1033 static int nsec3_no_type(struct ub_packed_rrset_key* k, uint16_t t)
1034 {
1035 int count = (int)((struct packed_rrset_data*)k->entry.data)->count;
1036 int i;
1037 for(i=0; i<count; i++)
1038 if(nsec3_has_type(k, i, t))
1039 return 0;
1040 return 1;
1041 }
1042
1043 /**
1044 * See if rrset exists in rrset cache.
1045 * If it does, the bit is checked, and if not expired, it is returned
1046 * allocated in region.
1047 * @param rrset_cache: rrset cache
1048 * @param qname: to lookup rrset name
1049 * @param qname_len: length of qname.
1050 * @param qtype: type of rrset to lookup, host order
1051 * @param qclass: class of rrset to lookup, host order
1052 * @param flags: flags for rrset to lookup
1053 * @param region: where to alloc result
1054 * @param checkbit: if true, a bit in the nsec typemap is checked for absence.
1055 * @param checktype: which bit to check
1056 * @param now: to check ttl against
1057 * @return rrset or NULL
1058 */
1059 static struct ub_packed_rrset_key*
grab_nsec(struct rrset_cache * rrset_cache,uint8_t * qname,size_t qname_len,uint16_t qtype,uint16_t qclass,uint32_t flags,struct regional * region,int checkbit,uint16_t checktype,time_t now)1060 grab_nsec(struct rrset_cache* rrset_cache, uint8_t* qname, size_t qname_len,
1061 uint16_t qtype, uint16_t qclass, uint32_t flags,
1062 struct regional* region, int checkbit, uint16_t checktype,
1063 time_t now)
1064 {
1065 struct ub_packed_rrset_key* r, *k = rrset_cache_lookup(rrset_cache,
1066 qname, qname_len, qtype, qclass, flags, now, 0);
1067 struct packed_rrset_data* d;
1068 if(!k) return NULL;
1069 d = (struct packed_rrset_data*)k->entry.data;
1070 if(d->ttl < now) {
1071 lock_rw_unlock(&k->entry.lock);
1072 return NULL;
1073 }
1074 /* only secure or unchecked records that have signatures. */
1075 if( ! ( d->security == sec_status_secure ||
1076 (d->security == sec_status_unchecked &&
1077 d->rrsig_count > 0) ) ) {
1078 lock_rw_unlock(&k->entry.lock);
1079 return NULL;
1080 }
1081 /* check if checktype is absent */
1082 if(checkbit && (
1083 (qtype == LDNS_RR_TYPE_NSEC && nsec_has_type(k, checktype)) ||
1084 (qtype == LDNS_RR_TYPE_NSEC3 && !nsec3_no_type(k, checktype))
1085 )) {
1086 lock_rw_unlock(&k->entry.lock);
1087 return NULL;
1088 }
1089 /* looks OK! copy to region and return it */
1090 r = packed_rrset_copy_region(k, region, now);
1091 /* if it failed, we return the NULL */
1092 lock_rw_unlock(&k->entry.lock);
1093 return r;
1094 }
1095
1096 /**
1097 * Get best NSEC record for qname. Might be matching, covering or totally
1098 * useless.
1099 * @param neg_cache: neg cache
1100 * @param qname: to lookup rrset name
1101 * @param qname_len: length of qname.
1102 * @param qclass: class of rrset to lookup, host order
1103 * @param rrset_cache: rrset cache
1104 * @param now: to check ttl against
1105 * @param region: where to alloc result
1106 * @return rrset or NULL
1107 */
1108 static struct ub_packed_rrset_key*
neg_find_nsec(struct val_neg_cache * neg_cache,uint8_t * qname,size_t qname_len,uint16_t qclass,struct rrset_cache * rrset_cache,time_t now,struct regional * region)1109 neg_find_nsec(struct val_neg_cache* neg_cache, uint8_t* qname, size_t qname_len,
1110 uint16_t qclass, struct rrset_cache* rrset_cache, time_t now,
1111 struct regional* region)
1112 {
1113 int labs;
1114 uint32_t flags;
1115 struct val_neg_zone* zone;
1116 struct val_neg_data* data;
1117 struct ub_packed_rrset_key* nsec;
1118
1119 labs = dname_count_labels(qname);
1120 lock_basic_lock(&neg_cache->lock);
1121 zone = neg_closest_zone_parent(neg_cache, qname, qname_len, labs,
1122 qclass);
1123 while(zone && !zone->in_use)
1124 zone = zone->parent;
1125 if(!zone) {
1126 lock_basic_unlock(&neg_cache->lock);
1127 return NULL;
1128 }
1129
1130 /* NSEC only for now */
1131 if(zone->nsec3_hash) {
1132 lock_basic_unlock(&neg_cache->lock);
1133 return NULL;
1134 }
1135
1136 /* ignore return value, don't care if it is an exact or smaller match */
1137 (void)neg_closest_data(zone, qname, qname_len, labs, &data);
1138 if(!data) {
1139 lock_basic_unlock(&neg_cache->lock);
1140 return NULL;
1141 }
1142
1143 /* ENT nodes are not in use, try the previous node. If the previous node
1144 * is not in use, we don't have an useful NSEC and give up. */
1145 if(!data->in_use) {
1146 data = (struct val_neg_data*)rbtree_previous((rbnode_type*)data);
1147 if((rbnode_type*)data == RBTREE_NULL || !data->in_use) {
1148 lock_basic_unlock(&neg_cache->lock);
1149 return NULL;
1150 }
1151 }
1152
1153 flags = 0;
1154 if(query_dname_compare(data->name, zone->name) == 0)
1155 flags = PACKED_RRSET_NSEC_AT_APEX;
1156
1157 nsec = grab_nsec(rrset_cache, data->name, data->len, LDNS_RR_TYPE_NSEC,
1158 zone->dclass, flags, region, 0, 0, now);
1159 lock_basic_unlock(&neg_cache->lock);
1160 return nsec;
1161 }
1162
1163 /** find nsec3 closest encloser in neg cache */
1164 static struct val_neg_data*
neg_find_nsec3_ce(struct val_neg_zone * zone,uint8_t * qname,size_t qname_len,int qlabs,sldns_buffer * buf,uint8_t * hashnc,size_t * nclen)1165 neg_find_nsec3_ce(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1166 int qlabs, sldns_buffer* buf, uint8_t* hashnc, size_t* nclen)
1167 {
1168 struct val_neg_data* data;
1169 uint8_t hashce[NSEC3_SHA_LEN];
1170 uint8_t b32[257];
1171 size_t celen, b32len;
1172
1173 *nclen = 0;
1174 while(qlabs > 0) {
1175 /* hash */
1176 if(!(celen=nsec3_get_hashed(buf, qname, qname_len,
1177 zone->nsec3_hash, zone->nsec3_iter, zone->nsec3_salt,
1178 zone->nsec3_saltlen, hashce, sizeof(hashce))))
1179 return NULL;
1180 if(!(b32len=nsec3_hash_to_b32(hashce, celen, zone->name,
1181 zone->len, b32, sizeof(b32))))
1182 return NULL;
1183
1184 /* lookup (exact match only) */
1185 data = neg_find_data(zone, b32, b32len, zone->labs+1);
1186 if(data && data->in_use) {
1187 /* found ce match! */
1188 return data;
1189 }
1190
1191 *nclen = celen;
1192 memmove(hashnc, hashce, celen);
1193 dname_remove_label(&qname, &qname_len);
1194 qlabs --;
1195 }
1196 return NULL;
1197 }
1198
1199 /** check nsec3 parameters on nsec3 rrset with current zone values */
1200 static int
neg_params_ok(struct val_neg_zone * zone,struct ub_packed_rrset_key * rrset)1201 neg_params_ok(struct val_neg_zone* zone, struct ub_packed_rrset_key* rrset)
1202 {
1203 int h;
1204 uint8_t* s;
1205 size_t slen, it;
1206 if(!nsec3_get_params(rrset, 0, &h, &it, &s, &slen))
1207 return 0;
1208 return (h == zone->nsec3_hash && it == zone->nsec3_iter &&
1209 slen == zone->nsec3_saltlen &&
1210 (slen != 0 && zone->nsec3_salt && s
1211 && memcmp(zone->nsec3_salt, s, slen) == 0));
1212 }
1213
1214 /** get next closer for nsec3 proof */
1215 static struct ub_packed_rrset_key*
neg_nsec3_getnc(struct val_neg_zone * zone,uint8_t * hashnc,size_t nclen,struct rrset_cache * rrset_cache,struct regional * region,time_t now,uint8_t * b32,size_t maxb32)1216 neg_nsec3_getnc(struct val_neg_zone* zone, uint8_t* hashnc, size_t nclen,
1217 struct rrset_cache* rrset_cache, struct regional* region,
1218 time_t now, uint8_t* b32, size_t maxb32)
1219 {
1220 struct ub_packed_rrset_key* nc_rrset;
1221 struct val_neg_data* data;
1222 size_t b32len;
1223
1224 if(!(b32len=nsec3_hash_to_b32(hashnc, nclen, zone->name,
1225 zone->len, b32, maxb32)))
1226 return NULL;
1227 (void)neg_closest_data(zone, b32, b32len, zone->labs+1, &data);
1228 if(!data && zone->tree.count != 0) {
1229 /* could be before the first entry ; return the last
1230 * entry (possibly the rollover nsec3 at end) */
1231 data = (struct val_neg_data*)rbtree_last(&zone->tree);
1232 }
1233 while(data && !data->in_use)
1234 data = data->parent;
1235 if(!data)
1236 return NULL;
1237 /* got a data element in tree, grab it */
1238 nc_rrset = grab_nsec(rrset_cache, data->name, data->len,
1239 LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 0, 0, now);
1240 if(!nc_rrset)
1241 return NULL;
1242 if(!neg_params_ok(zone, nc_rrset))
1243 return NULL;
1244 return nc_rrset;
1245 }
1246
1247 /** neg cache nsec3 proof procedure*/
1248 static struct dns_msg*
neg_nsec3_proof_ds(struct val_neg_zone * zone,uint8_t * qname,size_t qname_len,int qlabs,sldns_buffer * buf,struct rrset_cache * rrset_cache,struct regional * region,time_t now,uint8_t * topname)1249 neg_nsec3_proof_ds(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1250 int qlabs, sldns_buffer* buf, struct rrset_cache* rrset_cache,
1251 struct regional* region, time_t now, uint8_t* topname)
1252 {
1253 struct dns_msg* msg;
1254 struct val_neg_data* data;
1255 uint8_t hashnc[NSEC3_SHA_LEN];
1256 size_t nclen;
1257 struct ub_packed_rrset_key* ce_rrset, *nc_rrset;
1258 struct nsec3_cached_hash c;
1259 uint8_t nc_b32[257];
1260
1261 /* for NSEC3 ; determine the closest encloser for which we
1262 * can find an exact match. Remember the hashed lower name,
1263 * since that is the one we need a closest match for.
1264 * If we find a match straight away, then it becomes NODATA.
1265 * Otherwise, NXDOMAIN or if OPTOUT, an insecure delegation.
1266 * Also check that parameters are the same on closest encloser
1267 * and on closest match.
1268 */
1269 if(!zone->nsec3_hash)
1270 return NULL; /* not nsec3 zone */
1271
1272 if(!(data=neg_find_nsec3_ce(zone, qname, qname_len, qlabs, buf,
1273 hashnc, &nclen))) {
1274 return NULL;
1275 }
1276
1277 /* grab the ce rrset */
1278 ce_rrset = grab_nsec(rrset_cache, data->name, data->len,
1279 LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 1,
1280 LDNS_RR_TYPE_DS, now);
1281 if(!ce_rrset)
1282 return NULL;
1283 if(!neg_params_ok(zone, ce_rrset))
1284 return NULL;
1285
1286 if(nclen == 0) {
1287 /* exact match, just check the type bits */
1288 /* need: -SOA, -DS, +NS */
1289 if(nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_SOA) ||
1290 nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_DS) ||
1291 !nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_NS))
1292 return NULL;
1293 if(!(msg = dns_msg_create(qname, qname_len,
1294 LDNS_RR_TYPE_DS, zone->dclass, region, 1)))
1295 return NULL;
1296 /* TTL reduced in grab_nsec */
1297 if(!dns_msg_authadd(msg, region, ce_rrset, 0))
1298 return NULL;
1299 return msg;
1300 }
1301
1302 /* optout is not allowed without knowing the trust-anchor in use,
1303 * otherwise the optout could spoof away that anchor */
1304 if(!topname)
1305 return NULL;
1306
1307 /* if there is no exact match, it must be in an optout span
1308 * (an existing DS implies an NSEC3 must exist) */
1309 nc_rrset = neg_nsec3_getnc(zone, hashnc, nclen, rrset_cache,
1310 region, now, nc_b32, sizeof(nc_b32));
1311 if(!nc_rrset)
1312 return NULL;
1313 if(!neg_params_ok(zone, nc_rrset))
1314 return NULL;
1315 if(!nsec3_has_optout(nc_rrset, 0))
1316 return NULL;
1317 c.hash = hashnc;
1318 c.hash_len = nclen;
1319 c.b32 = nc_b32+1;
1320 c.b32_len = (size_t)nc_b32[0];
1321 if(nsec3_covers(zone->name, &c, nc_rrset, 0, buf)) {
1322 /* nc_rrset covers the next closer name.
1323 * ce_rrset equals a closer encloser.
1324 * nc_rrset is optout.
1325 * No need to check wildcard for type DS */
1326 /* capacity=3: ce + nc + soa(if needed) */
1327 if(!(msg = dns_msg_create(qname, qname_len,
1328 LDNS_RR_TYPE_DS, zone->dclass, region, 3)))
1329 return NULL;
1330 /* now=0 because TTL was reduced in grab_nsec */
1331 if(!dns_msg_authadd(msg, region, ce_rrset, 0))
1332 return NULL;
1333 if(!dns_msg_authadd(msg, region, nc_rrset, 0))
1334 return NULL;
1335 return msg;
1336 }
1337 return NULL;
1338 }
1339
1340 /**
1341 * Add SOA record for external responses.
1342 * @param rrset_cache: to look into.
1343 * @param now: current time.
1344 * @param region: where to perform the allocation
1345 * @param msg: current msg with NSEC.
1346 * @param zone: val_neg_zone if we have one.
1347 * @return false on lookup or alloc failure.
1348 */
add_soa(struct rrset_cache * rrset_cache,time_t now,struct regional * region,struct dns_msg * msg,struct val_neg_zone * zone)1349 static int add_soa(struct rrset_cache* rrset_cache, time_t now,
1350 struct regional* region, struct dns_msg* msg, struct val_neg_zone* zone)
1351 {
1352 struct ub_packed_rrset_key* soa;
1353 uint8_t* nm;
1354 size_t nmlen;
1355 uint16_t dclass;
1356 if(zone) {
1357 nm = zone->name;
1358 nmlen = zone->len;
1359 dclass = zone->dclass;
1360 } else {
1361 /* Assumes the signer is the zone SOA to add */
1362 nm = reply_nsec_signer(msg->rep, &nmlen, &dclass);
1363 if(!nm)
1364 return 0;
1365 }
1366 soa = rrset_cache_lookup(rrset_cache, nm, nmlen, LDNS_RR_TYPE_SOA,
1367 dclass, PACKED_RRSET_SOA_NEG, now, 0);
1368 if(!soa)
1369 return 0;
1370 if(!dns_msg_authadd(msg, region, soa, now)) {
1371 lock_rw_unlock(&soa->entry.lock);
1372 return 0;
1373 }
1374 lock_rw_unlock(&soa->entry.lock);
1375 return 1;
1376 }
1377
1378 struct dns_msg*
val_neg_getmsg(struct val_neg_cache * neg,struct query_info * qinfo,struct regional * region,struct rrset_cache * rrset_cache,sldns_buffer * buf,time_t now,int addsoa,uint8_t * topname,struct config_file * cfg)1379 val_neg_getmsg(struct val_neg_cache* neg, struct query_info* qinfo,
1380 struct regional* region, struct rrset_cache* rrset_cache,
1381 sldns_buffer* buf, time_t now, int addsoa, uint8_t* topname,
1382 struct config_file* cfg)
1383 {
1384 struct dns_msg* msg;
1385 struct ub_packed_rrset_key* nsec; /* qname matching/covering nsec */
1386 struct ub_packed_rrset_key* wcrr; /* wildcard record or nsec */
1387 uint8_t* nodata_wc = NULL;
1388 uint8_t* ce = NULL;
1389 size_t ce_len;
1390 uint8_t wc_ce[LDNS_MAX_DOMAINLEN+3];
1391 struct query_info wc_qinfo;
1392 struct ub_packed_rrset_key* cache_wc;
1393 struct packed_rrset_data* wcrr_data;
1394 int rcode = LDNS_RCODE_NOERROR;
1395 uint8_t* zname;
1396 size_t zname_len;
1397 int zname_labs;
1398 struct val_neg_zone* zone;
1399
1400 /* only for DS queries when aggressive use of NSEC is disabled */
1401 if(qinfo->qtype != LDNS_RR_TYPE_DS && !cfg->aggressive_nsec)
1402 return NULL;
1403 log_assert(!topname || dname_subdomain_c(qinfo->qname, topname));
1404
1405 /* Get best available NSEC for qname */
1406 nsec = neg_find_nsec(neg, qinfo->qname, qinfo->qname_len, qinfo->qclass,
1407 rrset_cache, now, region);
1408
1409 /* Matching NSEC, use to generate No Data answer. Not creating answers
1410 * yet for No Data proven using wildcard. */
1411 if(nsec && nsec_proves_nodata(nsec, qinfo, &nodata_wc) && !nodata_wc) {
1412 /* do not create nodata answers for qtype ANY, it is a query
1413 * type, not an rrtype to disprove. Nameerrors are useful for
1414 * qtype ANY, in the else branch. */
1415 if(qinfo->qtype == LDNS_RR_TYPE_ANY)
1416 return NULL;
1417 if(!(msg = dns_msg_create(qinfo->qname, qinfo->qname_len,
1418 qinfo->qtype, qinfo->qclass, region, 2)))
1419 return NULL;
1420 if(!dns_msg_authadd(msg, region, nsec, 0))
1421 return NULL;
1422 if(addsoa && !add_soa(rrset_cache, now, region, msg, NULL))
1423 return NULL;
1424
1425 lock_basic_lock(&neg->lock);
1426 neg->num_neg_cache_noerror++;
1427 lock_basic_unlock(&neg->lock);
1428 return msg;
1429 } else if(nsec && val_nsec_proves_name_error(nsec, qinfo->qname)) {
1430 if(!(msg = dns_msg_create(qinfo->qname, qinfo->qname_len,
1431 qinfo->qtype, qinfo->qclass, region, 3)))
1432 return NULL;
1433 if(!(ce = nsec_closest_encloser(qinfo->qname, nsec)))
1434 return NULL;
1435 dname_count_size_labels(ce, &ce_len);
1436
1437 /* No extra extra NSEC required if both nameerror qname and
1438 * nodata *.ce. are proven already. */
1439 if(!nodata_wc || query_dname_compare(nodata_wc, ce) != 0) {
1440 /* Qname proven non existing, get wildcard record for
1441 * QTYPE or NSEC covering or matching wildcard. */
1442
1443 /* Num labels in ce is always smaller than in qname,
1444 * therefore adding the wildcard label cannot overflow
1445 * buffer. */
1446 wc_ce[0] = 1;
1447 wc_ce[1] = (uint8_t)'*';
1448 memmove(wc_ce+2, ce, ce_len);
1449 wc_qinfo.qname = wc_ce;
1450 wc_qinfo.qname_len = ce_len + 2;
1451 wc_qinfo.qtype = qinfo->qtype;
1452
1453
1454 if((cache_wc = rrset_cache_lookup(rrset_cache, wc_qinfo.qname,
1455 wc_qinfo.qname_len, wc_qinfo.qtype,
1456 qinfo->qclass, 0/*flags*/, now, 0/*read only*/))) {
1457 /* Synthesize wildcard answer */
1458 wcrr_data = (struct packed_rrset_data*)cache_wc->entry.data;
1459 if(!(wcrr_data->security == sec_status_secure ||
1460 (wcrr_data->security == sec_status_unchecked &&
1461 wcrr_data->rrsig_count > 0))) {
1462 lock_rw_unlock(&cache_wc->entry.lock);
1463 return NULL;
1464 }
1465 if(!(wcrr = packed_rrset_copy_region(cache_wc,
1466 region, now))) {
1467 lock_rw_unlock(&cache_wc->entry.lock);
1468 return NULL;
1469 };
1470 lock_rw_unlock(&cache_wc->entry.lock);
1471 wcrr->rk.dname = qinfo->qname;
1472 wcrr->rk.dname_len = qinfo->qname_len;
1473 if(!dns_msg_ansadd(msg, region, wcrr, 0))
1474 return NULL;
1475 /* No SOA needed for wildcard synthesised
1476 * answer. */
1477 addsoa = 0;
1478 } else {
1479 /* Get wildcard NSEC for possible non existence
1480 * proof */
1481 if(!(wcrr = neg_find_nsec(neg, wc_qinfo.qname,
1482 wc_qinfo.qname_len, qinfo->qclass,
1483 rrset_cache, now, region)))
1484 return NULL;
1485
1486 nodata_wc = NULL;
1487 if(val_nsec_proves_name_error(wcrr, wc_ce))
1488 rcode = LDNS_RCODE_NXDOMAIN;
1489 else if(!nsec_proves_nodata(wcrr, &wc_qinfo,
1490 &nodata_wc) || nodata_wc)
1491 /* &nodata_wc shouldn't be set, wc_qinfo
1492 * already contains wildcard domain. */
1493 /* NSEC doesn't prove anything for
1494 * wildcard. */
1495 return NULL;
1496 if(query_dname_compare(wcrr->rk.dname,
1497 nsec->rk.dname) != 0)
1498 if(!dns_msg_authadd(msg, region, wcrr, 0))
1499 return NULL;
1500 }
1501 }
1502
1503 if(!dns_msg_authadd(msg, region, nsec, 0))
1504 return NULL;
1505 if(addsoa && !add_soa(rrset_cache, now, region, msg, NULL))
1506 return NULL;
1507
1508 /* Increment statistic counters */
1509 lock_basic_lock(&neg->lock);
1510 if(rcode == LDNS_RCODE_NOERROR)
1511 neg->num_neg_cache_noerror++;
1512 else if(rcode == LDNS_RCODE_NXDOMAIN)
1513 neg->num_neg_cache_nxdomain++;
1514 lock_basic_unlock(&neg->lock);
1515
1516 FLAGS_SET_RCODE(msg->rep->flags, rcode);
1517 return msg;
1518 }
1519
1520 /* No aggressive use of NSEC3 for now, only proceed for DS types. */
1521 if(qinfo->qtype != LDNS_RR_TYPE_DS){
1522 return NULL;
1523 }
1524 /* check NSEC3 neg cache for type DS */
1525 /* need to look one zone higher for DS type */
1526 zname = qinfo->qname;
1527 zname_len = qinfo->qname_len;
1528 dname_remove_label(&zname, &zname_len);
1529 zname_labs = dname_count_labels(zname);
1530
1531 /* lookup closest zone */
1532 lock_basic_lock(&neg->lock);
1533 zone = neg_closest_zone_parent(neg, zname, zname_len, zname_labs,
1534 qinfo->qclass);
1535 while(zone && !zone->in_use)
1536 zone = zone->parent;
1537 /* check that the zone is not too high up so that we do not pick data
1538 * out of a zone that is above the last-seen key (or trust-anchor). */
1539 if(zone && topname) {
1540 if(!dname_subdomain_c(zone->name, topname))
1541 zone = NULL;
1542 }
1543 if(!zone) {
1544 lock_basic_unlock(&neg->lock);
1545 return NULL;
1546 }
1547
1548 msg = neg_nsec3_proof_ds(zone, qinfo->qname, qinfo->qname_len,
1549 zname_labs+1, buf, rrset_cache, region, now, topname);
1550 if(msg && addsoa && !add_soa(rrset_cache, now, region, msg, zone)) {
1551 lock_basic_unlock(&neg->lock);
1552 return NULL;
1553 }
1554 lock_basic_unlock(&neg->lock);
1555 return msg;
1556 }
1557