2 * validator/val_neg.c - validator aggressive negative caching functions.
4 * Copyright (c) 2008, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains helper functions for the validator module.
40 * The functions help with aggressive negative caching.
41 * This creates new denials of existance, and proofs for absence of types
42 * from cached NSEC records.
45 #ifdef HAVE_OPENSSL_SSL_H
46 #include "openssl/ssl.h"
47 #define NSEC3_SHA_LEN SHA_DIGEST_LENGTH
49 #define NSEC3_SHA_LEN 20
51 #include "validator/val_neg.h"
52 #include "validator/val_nsec.h"
53 #include "validator/val_nsec3.h"
54 #include "validator/val_utils.h"
55 #include "util/data/dname.h"
56 #include "util/data/msgreply.h"
58 #include "util/net_help.h"
59 #include "util/config_file.h"
60 #include "services/cache/rrset.h"
61 #include "services/cache/dns.h"
62 #include "ldns/rrdef.h"
63 #include "ldns/sbuffer.h"
65 int val_neg_data_compare(const void* a
, const void* b
)
67 struct val_neg_data
* x
= (struct val_neg_data
*)a
;
68 struct val_neg_data
* y
= (struct val_neg_data
*)b
;
70 return dname_canon_lab_cmp(x
->name
, x
->labs
, y
->name
, y
->labs
, &m
);
73 int val_neg_zone_compare(const void* a
, const void* b
)
75 struct val_neg_zone
* x
= (struct val_neg_zone
*)a
;
76 struct val_neg_zone
* y
= (struct val_neg_zone
*)b
;
78 if(x
->dclass
!= y
->dclass
) {
79 if(x
->dclass
< y
->dclass
)
83 return dname_canon_lab_cmp(x
->name
, x
->labs
, y
->name
, y
->labs
, &m
);
86 struct val_neg_cache
* val_neg_create(struct config_file
* cfg
, size_t maxiter
)
88 struct val_neg_cache
* neg
= (struct val_neg_cache
*)calloc(1,
91 log_err("Could not create neg cache: out of memory");
94 neg
->nsec3_max_iter
= maxiter
;
95 neg
->max
= 1024*1024; /* 1 M is thousands of entries */
96 if(cfg
) neg
->max
= cfg
->neg_cache_size
;
97 rbtree_init(&neg
->tree
, &val_neg_zone_compare
);
98 lock_basic_init(&neg
->lock
);
99 lock_protect(&neg
->lock
, neg
, sizeof(*neg
));
103 size_t val_neg_get_mem(struct val_neg_cache
* neg
)
106 lock_basic_lock(&neg
->lock
);
107 result
= sizeof(*neg
) + neg
->use
;
108 lock_basic_unlock(&neg
->lock
);
112 /** clear datas on cache deletion */
114 neg_clear_datas(rbnode_t
* n
, void* ATTR_UNUSED(arg
))
116 struct val_neg_data
* d
= (struct val_neg_data
*)n
;
121 /** clear zones on cache deletion */
123 neg_clear_zones(rbnode_t
* n
, void* ATTR_UNUSED(arg
))
125 struct val_neg_zone
* z
= (struct val_neg_zone
*)n
;
126 /* delete all the rrset entries in the tree */
127 traverse_postorder(&z
->tree
, &neg_clear_datas
, NULL
);
133 void neg_cache_delete(struct val_neg_cache
* neg
)
136 lock_basic_destroy(&neg
->lock
);
137 /* delete all the zones in the tree */
138 traverse_postorder(&neg
->tree
, &neg_clear_zones
, NULL
);
143 * Put data element at the front of the LRU list.
144 * @param neg: negative cache with LRU start and end.
145 * @param data: this data is fronted.
147 static void neg_lru_front(struct val_neg_cache
* neg
,
148 struct val_neg_data
* data
)
151 data
->next
= neg
->first
;
154 else neg
->first
->prev
= data
;
159 * Remove data element from LRU list.
160 * @param neg: negative cache with LRU start and end.
161 * @param data: this data is removed from the list.
163 static void neg_lru_remove(struct val_neg_cache
* neg
,
164 struct val_neg_data
* data
)
167 data
->prev
->next
= data
->next
;
168 else neg
->first
= data
->next
;
170 data
->next
->prev
= data
->prev
;
171 else neg
->last
= data
->prev
;
175 * Touch LRU for data element, put it at the start of the LRU list.
176 * @param neg: negative cache with LRU start and end.
177 * @param data: this data is used.
179 static void neg_lru_touch(struct val_neg_cache
* neg
,
180 struct val_neg_data
* data
)
182 if(data
== neg
->first
)
183 return; /* nothing to do */
184 /* remove from current lru position */
185 neg_lru_remove(neg
, data
);
187 neg_lru_front(neg
, data
);
191 * Delete a zone element from the negative cache.
192 * May delete other zone elements to keep tree coherent, or
193 * only mark the element as 'not in use'.
194 * @param neg: negative cache.
195 * @param z: zone element to delete.
197 static void neg_delete_zone(struct val_neg_cache
* neg
, struct val_neg_zone
* z
)
199 struct val_neg_zone
* p
, *np
;
201 log_assert(z
->in_use
);
202 log_assert(z
->count
> 0);
205 /* go up the tree and reduce counts */
208 log_assert(p
->count
> 0);
213 /* remove zones with zero count */
215 while(p
&& p
->count
== 0) {
217 (void)rbtree_delete(&neg
->tree
, &p
->node
);
218 neg
->use
-= p
->len
+ sizeof(*p
);
226 void neg_delete_data(struct val_neg_cache
* neg
, struct val_neg_data
* el
)
228 struct val_neg_zone
* z
;
229 struct val_neg_data
* p
, *np
;
232 log_assert(el
->in_use
);
233 log_assert(el
->count
> 0);
236 /* remove it from the lru list */
237 neg_lru_remove(neg
, el
);
239 /* go up the tree and reduce counts */
242 log_assert(p
->count
> 0);
247 /* delete 0 count items from tree */
249 while(p
&& p
->count
== 0) {
251 (void)rbtree_delete(&z
->tree
, &p
->node
);
252 neg
->use
-= p
->len
+ sizeof(*p
);
258 /* check if the zone is now unused */
259 if(z
->tree
.count
== 0) {
260 neg_delete_zone(neg
, z
);
265 * Create more space in negative cache
266 * The oldest elements are deleted until enough space is present.
267 * Empty zones are deleted.
268 * @param neg: negative cache.
269 * @param need: how many bytes are needed.
271 static void neg_make_space(struct val_neg_cache
* neg
, size_t need
)
273 /* delete elements until enough space or its empty */
274 while(neg
->last
&& neg
->max
< neg
->use
+ need
) {
275 neg_delete_data(neg
, neg
->last
);
279 struct val_neg_zone
* neg_find_zone(struct val_neg_cache
* neg
,
280 uint8_t* nm
, size_t len
, uint16_t dclass
)
282 struct val_neg_zone lookfor
;
283 struct val_neg_zone
* result
;
284 lookfor
.node
.key
= &lookfor
;
287 lookfor
.labs
= dname_count_labels(lookfor
.name
);
288 lookfor
.dclass
= dclass
;
290 result
= (struct val_neg_zone
*)
291 rbtree_search(&neg
->tree
, lookfor
.node
.key
);
296 * Find the given data
297 * @param zone: negative zone
298 * @param nm: what to look for.
299 * @param len: length of nm
300 * @param labs: labels in nm
301 * @return data or NULL if not found.
303 static struct val_neg_data
* neg_find_data(struct val_neg_zone
* zone
,
304 uint8_t* nm
, size_t len
, int labs
)
306 struct val_neg_data lookfor
;
307 struct val_neg_data
* result
;
308 lookfor
.node
.key
= &lookfor
;
313 result
= (struct val_neg_data
*)
314 rbtree_search(&zone
->tree
, lookfor
.node
.key
);
319 * Calculate space needed for the data and all its parents
320 * @param rep: NSEC entries.
323 static size_t calc_data_need(struct reply_info
* rep
)
326 size_t i
, len
, res
= 0;
328 for(i
=rep
->an_numrrsets
; i
<rep
->an_numrrsets
+rep
->ns_numrrsets
; i
++) {
329 if(ntohs(rep
->rrsets
[i
]->rk
.type
) == LDNS_RR_TYPE_NSEC
) {
330 d
= rep
->rrsets
[i
]->rk
.dname
;
331 len
= rep
->rrsets
[i
]->rk
.dname_len
;
332 res
= sizeof(struct val_neg_data
) + len
;
333 while(!dname_is_root(d
)) {
334 log_assert(len
> 1); /* not root label */
335 dname_remove_label(&d
, &len
);
336 res
+= sizeof(struct val_neg_data
) + len
;
344 * Calculate space needed for zone and all its parents
345 * @param d: name of zone
346 * @param len: length of name
349 static size_t calc_zone_need(uint8_t* d
, size_t len
)
351 size_t res
= sizeof(struct val_neg_zone
) + len
;
352 while(!dname_is_root(d
)) {
353 log_assert(len
> 1); /* not root label */
354 dname_remove_label(&d
, &len
);
355 res
+= sizeof(struct val_neg_zone
) + len
;
361 * Find closest existing parent zone of the given name.
362 * @param neg: negative cache.
363 * @param nm: name to look for
364 * @param nm_len: length of nm
365 * @param labs: labelcount of nm.
366 * @param qclass: class.
367 * @return the zone or NULL if none found.
369 static struct val_neg_zone
* neg_closest_zone_parent(struct val_neg_cache
* neg
,
370 uint8_t* nm
, size_t nm_len
, int labs
, uint16_t qclass
)
372 struct val_neg_zone key
;
373 struct val_neg_zone
* result
;
374 rbnode_t
* res
= NULL
;
380 if(rbtree_find_less_equal(&neg
->tree
, &key
, &res
)) {
382 result
= (struct val_neg_zone
*)res
;
384 /* smaller element (or no element) */
386 result
= (struct val_neg_zone
*)res
;
387 if(!result
|| result
->dclass
!= qclass
)
389 /* count number of labels matched */
390 (void)dname_lab_cmp(result
->name
, result
->labs
, key
.name
,
392 while(result
) { /* go up until qname is subdomain of stub */
393 if(result
->labs
<= m
)
395 result
= result
->parent
;
402 * Find closest existing parent data for the given name.
403 * @param zone: to look in.
404 * @param nm: name to look for
405 * @param nm_len: length of nm
406 * @param labs: labelcount of nm.
407 * @return the data or NULL if none found.
409 static struct val_neg_data
* neg_closest_data_parent(
410 struct val_neg_zone
* zone
, uint8_t* nm
, size_t nm_len
, int labs
)
412 struct val_neg_data key
;
413 struct val_neg_data
* result
;
414 rbnode_t
* res
= NULL
;
419 if(rbtree_find_less_equal(&zone
->tree
, &key
, &res
)) {
421 result
= (struct val_neg_data
*)res
;
423 /* smaller element (or no element) */
425 result
= (struct val_neg_data
*)res
;
428 /* count number of labels matched */
429 (void)dname_lab_cmp(result
->name
, result
->labs
, key
.name
,
431 while(result
) { /* go up until qname is subdomain of stub */
432 if(result
->labs
<= m
)
434 result
= result
->parent
;
441 * Create a single zone node
442 * @param nm: name for zone (copied)
443 * @param nm_len: length of name
444 * @param labs: labels in name.
445 * @param dclass: class of zone, host order.
446 * @return new zone or NULL on failure
448 static struct val_neg_zone
* neg_setup_zone_node(
449 uint8_t* nm
, size_t nm_len
, int labs
, uint16_t dclass
)
451 struct val_neg_zone
* zone
=
452 (struct val_neg_zone
*)calloc(1, sizeof(*zone
));
456 zone
->node
.key
= zone
;
457 zone
->name
= memdup(nm
, nm_len
);
464 zone
->dclass
= dclass
;
466 rbtree_init(&zone
->tree
, &val_neg_data_compare
);
471 * Create a linked list of parent zones, starting at longname ending on
472 * the parent (can be NULL, creates to the root).
473 * @param nm: name for lowest in chain
474 * @param nm_len: length of name
475 * @param labs: labels in name.
476 * @param dclass: class of zone.
477 * @param parent: NULL for to root, else so it fits under here.
478 * @return zone; a chain of zones and their parents up to the parent.
479 * or NULL on malloc failure
481 static struct val_neg_zone
* neg_zone_chain(
482 uint8_t* nm
, size_t nm_len
, int labs
, uint16_t dclass
,
483 struct val_neg_zone
* parent
)
486 int tolabs
= parent
?parent
->labs
:0;
487 struct val_neg_zone
* zone
, *prev
= NULL
, *first
= NULL
;
489 /* create the new subtree, i is labelcount of current creation */
490 /* this creates a 'first' to z->parent=NULL list of zones */
491 for(i
=labs
; i
!=tolabs
; i
--) {
492 /* create new item */
493 zone
= neg_setup_zone_node(nm
, nm_len
, i
, dclass
);
495 /* need to delete other allocations in this routine!*/
496 struct val_neg_zone
* p
=first
, *np
;
510 /* prepare for next name */
512 dname_remove_label(&nm
, &nm_len
);
517 void val_neg_zone_take_inuse(struct val_neg_zone
* zone
)
520 struct val_neg_zone
* p
;
522 /* increase usage count of all parents */
523 for(p
=zone
; p
; p
= p
->parent
) {
529 struct val_neg_zone
* neg_create_zone(struct val_neg_cache
* neg
,
530 uint8_t* nm
, size_t nm_len
, uint16_t dclass
)
532 struct val_neg_zone
* zone
;
533 struct val_neg_zone
* parent
;
534 struct val_neg_zone
* p
, *np
;
535 int labs
= dname_count_labels(nm
);
537 /* find closest enclosing parent zone that (still) exists */
538 parent
= neg_closest_zone_parent(neg
, nm
, nm_len
, labs
, dclass
);
539 if(parent
&& query_dname_compare(parent
->name
, nm
) == 0)
540 return parent
; /* already exists, weird */
541 /* if parent exists, it is in use */
542 log_assert(!parent
|| parent
->count
> 0);
543 zone
= neg_zone_chain(nm
, nm_len
, labs
, dclass
, parent
);
548 /* insert the list of zones into the tree */
553 neg
->use
+= sizeof(struct val_neg_zone
) + p
->len
;
555 (void)rbtree_insert(&neg
->tree
, &p
->node
);
556 /* last one needs proper parent pointer */
564 /** find zone name of message, returns the SOA record */
565 static struct ub_packed_rrset_key
* reply_find_soa(struct reply_info
* rep
)
568 for(i
=rep
->an_numrrsets
; i
< rep
->an_numrrsets
+rep
->ns_numrrsets
; i
++){
569 if(ntohs(rep
->rrsets
[i
]->rk
.type
) == LDNS_RR_TYPE_SOA
)
570 return rep
->rrsets
[i
];
575 /** see if the reply has NSEC records worthy of caching */
576 static int reply_has_nsec(struct reply_info
* rep
)
579 struct packed_rrset_data
* d
;
580 if(rep
->security
!= sec_status_secure
)
582 for(i
=rep
->an_numrrsets
; i
< rep
->an_numrrsets
+rep
->ns_numrrsets
; i
++){
583 if(ntohs(rep
->rrsets
[i
]->rk
.type
) == LDNS_RR_TYPE_NSEC
) {
584 d
= (struct packed_rrset_data
*)rep
->rrsets
[i
]->
586 if(d
->security
== sec_status_secure
)
595 * Create single node of data element.
596 * @param nm: name (copied)
597 * @param nm_len: length of name
598 * @param labs: labels in name.
599 * @return element with name nm, or NULL malloc failure.
601 static struct val_neg_data
* neg_setup_data_node(
602 uint8_t* nm
, size_t nm_len
, int labs
)
604 struct val_neg_data
* el
;
605 el
= (struct val_neg_data
*)calloc(1, sizeof(*el
));
610 el
->name
= memdup(nm
, nm_len
);
621 * Create chain of data element and parents
623 * @param nm_len: length of name
624 * @param labs: labels in name.
625 * @param parent: up to where to make, if NULL up to root label.
626 * @return lowest element with name nm, or NULL malloc failure.
628 static struct val_neg_data
* neg_data_chain(
629 uint8_t* nm
, size_t nm_len
, int labs
, struct val_neg_data
* parent
)
632 int tolabs
= parent
?parent
->labs
:0;
633 struct val_neg_data
* el
, *first
= NULL
, *prev
= NULL
;
635 /* create the new subtree, i is labelcount of current creation */
636 /* this creates a 'first' to z->parent=NULL list of zones */
637 for(i
=labs
; i
!=tolabs
; i
--) {
638 /* create new item */
639 el
= neg_setup_data_node(nm
, nm_len
, i
);
641 /* need to delete other allocations in this routine!*/
642 struct val_neg_data
* p
= first
, *np
;
657 /* prepare for next name */
659 dname_remove_label(&nm
, &nm_len
);
665 * Remove NSEC records between start and end points.
666 * By walking the tree, the tree is sorted canonically.
667 * @param neg: negative cache.
668 * @param zone: the zone
669 * @param el: element to start walking at.
670 * @param nsec: the nsec record with the end point
672 static void wipeout(struct val_neg_cache
* neg
, struct val_neg_zone
* zone
,
673 struct val_neg_data
* el
, struct ub_packed_rrset_key
* nsec
)
675 struct packed_rrset_data
* d
= (struct packed_rrset_data
*)nsec
->
680 rbnode_t
* walk
, *next
;
681 struct val_neg_data
* cur
;
684 if(!d
|| d
->count
== 0 || d
->rr_len
[0] < 2+1)
686 if(ntohs(nsec
->rk
.type
) == LDNS_RR_TYPE_NSEC
) {
687 end
= d
->rr_data
[0]+2;
688 end_len
= dname_valid(end
, d
->rr_len
[0]-2);
689 end_labs
= dname_count_labels(end
);
692 if(!nsec3_get_nextowner_b32(nsec
, 0, buf
, sizeof(buf
)))
695 end_labs
= dname_count_size_labels(end
, &end_len
);
698 /* sanity check, both owner and end must be below the zone apex */
699 if(!dname_subdomain_c(el
->name
, zone
->name
) ||
700 !dname_subdomain_c(end
, zone
->name
))
703 /* detect end of zone NSEC ; wipe until the end of zone */
704 if(query_dname_compare(end
, zone
->name
) == 0) {
708 walk
= rbtree_next(&el
->node
);
709 while(walk
&& walk
!= RBTREE_NULL
) {
710 cur
= (struct val_neg_data
*)walk
;
711 /* sanity check: must be larger than start */
712 if(dname_canon_lab_cmp(cur
->name
, cur
->labs
,
713 el
->name
, el
->labs
, &m
) <= 0) {
714 /* r == 0 skip original record. */
715 /* r < 0 too small! */
716 walk
= rbtree_next(walk
);
719 /* stop at endpoint, also data at empty nonterminals must be
720 * removed (no NSECs there) so everything between
722 if(end
&& dname_canon_lab_cmp(cur
->name
, cur
->labs
,
723 end
, end_labs
, &m
) >= 0) {
726 /* this element has to be deleted, but we cannot do it
727 * now, because we are walking the tree still ... */
728 /* get the next element: */
729 next
= rbtree_next(walk
);
730 /* now delete the original element, this may trigger
731 * rbtree rebalances, but really, the next element is
733 * But it may trigger delete of other data and the
734 * entire zone. However, if that happens, this is done
735 * by deleting the *parents* of the element for deletion,
736 * and maybe also the entire zone if it is empty.
737 * But parents are smaller in canonical compare, thus,
738 * if a larger element exists, then it is not a parent,
739 * it cannot get deleted, the zone cannot get empty.
740 * If the next==NULL, then zone can be empty. */
742 neg_delete_data(neg
, cur
);
747 void neg_insert_data(struct val_neg_cache
* neg
,
748 struct val_neg_zone
* zone
, struct ub_packed_rrset_key
* nsec
)
750 struct packed_rrset_data
* d
;
751 struct val_neg_data
* parent
;
752 struct val_neg_data
* el
;
753 uint8_t* nm
= nsec
->rk
.dname
;
754 size_t nm_len
= nsec
->rk
.dname_len
;
755 int labs
= dname_count_labels(nsec
->rk
.dname
);
757 d
= (struct packed_rrset_data
*)nsec
->entry
.data
;
758 if( !(d
->security
== sec_status_secure
||
759 (d
->security
== sec_status_unchecked
&& d
->rrsig_count
> 0)))
761 log_nametypeclass(VERB_ALGO
, "negcache rr",
762 nsec
->rk
.dname
, ntohs(nsec
->rk
.type
),
763 ntohs(nsec
->rk
.rrset_class
));
765 /* find closest enclosing parent data that (still) exists */
766 parent
= neg_closest_data_parent(zone
, nm
, nm_len
, labs
);
767 if(parent
&& query_dname_compare(parent
->name
, nm
) == 0) {
768 /* perfect match already exists */
769 log_assert(parent
->count
> 0);
772 struct val_neg_data
* p
, *np
;
774 /* create subtree for perfect match */
775 /* if parent exists, it is in use */
776 log_assert(!parent
|| parent
->count
> 0);
778 el
= neg_data_chain(nm
, nm_len
, labs
, parent
);
780 log_err("out of memory inserting NSEC negative cache");
783 el
->in_use
= 0; /* set on below */
785 /* insert the list of zones into the tree */
790 neg
->use
+= sizeof(struct val_neg_data
) + p
->len
;
793 (void)rbtree_insert(&zone
->tree
, &p
->node
);
794 /* last one needs proper parent pointer */
802 struct val_neg_data
* p
;
805 /* increase usage count of all parents */
806 for(p
=el
; p
; p
= p
->parent
) {
810 neg_lru_front(neg
, el
);
812 /* in use, bring to front, lru */
813 neg_lru_touch(neg
, el
);
816 /* if nsec3 store last used parameters */
817 if(ntohs(nsec
->rk
.type
) == LDNS_RR_TYPE_NSEC3
) {
821 if(nsec3_get_params(nsec
, 0, &h
, &it
, &s
, &slen
) &&
822 it
<= neg
->nsec3_max_iter
&&
823 (h
!= zone
->nsec3_hash
|| it
!= zone
->nsec3_iter
||
824 slen
!= zone
->nsec3_saltlen
||
825 memcmp(zone
->nsec3_salt
, s
, slen
) != 0)) {
826 uint8_t* sa
= memdup(s
, slen
);
828 free(zone
->nsec3_salt
);
829 zone
->nsec3_salt
= sa
;
830 zone
->nsec3_saltlen
= slen
;
831 zone
->nsec3_hash
= h
;
832 zone
->nsec3_iter
= it
;
837 /* wipe out the cache items between NSEC start and end */
838 wipeout(neg
, zone
, el
, nsec
);
841 void val_neg_addreply(struct val_neg_cache
* neg
, struct reply_info
* rep
)
844 struct ub_packed_rrset_key
* soa
;
845 struct val_neg_zone
* zone
;
846 /* see if secure nsecs inside */
847 if(!reply_has_nsec(rep
))
849 /* find the zone name in message */
850 soa
= reply_find_soa(rep
);
854 log_nametypeclass(VERB_ALGO
, "negcache insert for zone",
855 soa
->rk
.dname
, LDNS_RR_TYPE_SOA
, ntohs(soa
->rk
.rrset_class
));
857 /* ask for enough space to store all of it */
858 need
= calc_data_need(rep
) +
859 calc_zone_need(soa
->rk
.dname
, soa
->rk
.dname_len
);
860 lock_basic_lock(&neg
->lock
);
861 neg_make_space(neg
, need
);
863 /* find or create the zone entry */
864 zone
= neg_find_zone(neg
, soa
->rk
.dname
, soa
->rk
.dname_len
,
865 ntohs(soa
->rk
.rrset_class
));
867 if(!(zone
= neg_create_zone(neg
, soa
->rk
.dname
,
868 soa
->rk
.dname_len
, ntohs(soa
->rk
.rrset_class
)))) {
869 lock_basic_unlock(&neg
->lock
);
870 log_err("out of memory adding negative zone");
874 val_neg_zone_take_inuse(zone
);
876 /* insert the NSECs */
877 for(i
=rep
->an_numrrsets
; i
< rep
->an_numrrsets
+rep
->ns_numrrsets
; i
++){
878 if(ntohs(rep
->rrsets
[i
]->rk
.type
) != LDNS_RR_TYPE_NSEC
)
880 if(!dname_subdomain_c(rep
->rrsets
[i
]->rk
.dname
,
881 zone
->name
)) continue;
882 /* insert NSEC into this zone's tree */
883 neg_insert_data(neg
, zone
, rep
->rrsets
[i
]);
885 if(zone
->tree
.count
== 0) {
886 /* remove empty zone if inserts failed */
887 neg_delete_zone(neg
, zone
);
889 lock_basic_unlock(&neg
->lock
);
893 * Lookup closest data record. For NSEC denial.
894 * @param zone: zone to look in
895 * @param qname: name to look for.
896 * @param len: length of name
897 * @param labs: labels in name
898 * @param data: data element, exact or smaller or NULL
899 * @return true if exact match.
901 static int neg_closest_data(struct val_neg_zone
* zone
,
902 uint8_t* qname
, size_t len
, int labs
, struct val_neg_data
** data
)
904 struct val_neg_data key
;
910 if(rbtree_find_less_equal(&zone
->tree
, &key
, &r
)) {
912 *data
= (struct val_neg_data
*)r
;
916 *data
= (struct val_neg_data
*)r
;
921 int val_neg_dlvlookup(struct val_neg_cache
* neg
, uint8_t* qname
, size_t len
,
922 uint16_t qclass
, struct rrset_cache
* rrset_cache
, time_t now
)
924 /* lookup closest zone */
925 struct val_neg_zone
* zone
;
926 struct val_neg_data
* data
;
928 struct ub_packed_rrset_key
* nsec
;
929 struct packed_rrset_data
* d
;
932 struct query_info qinfo
;
935 log_nametypeclass(VERB_ALGO
, "negcache dlvlookup", qname
,
936 LDNS_RR_TYPE_DLV
, qclass
);
938 labs
= dname_count_labels(qname
);
939 lock_basic_lock(&neg
->lock
);
940 zone
= neg_closest_zone_parent(neg
, qname
, len
, labs
, qclass
);
941 while(zone
&& !zone
->in_use
)
944 lock_basic_unlock(&neg
->lock
);
947 log_nametypeclass(VERB_ALGO
, "negcache zone", zone
->name
, 0,
950 /* DLV is defined to use NSEC only */
951 if(zone
->nsec3_hash
) {
952 lock_basic_unlock(&neg
->lock
);
956 /* lookup closest data record */
957 (void)neg_closest_data(zone
, qname
, len
, labs
, &data
);
958 while(data
&& !data
->in_use
)
961 lock_basic_unlock(&neg
->lock
);
964 log_nametypeclass(VERB_ALGO
, "negcache rr", data
->name
,
965 LDNS_RR_TYPE_NSEC
, zone
->dclass
);
967 /* lookup rrset in rrset cache */
969 if(query_dname_compare(data
->name
, zone
->name
) == 0)
970 flags
= PACKED_RRSET_NSEC_AT_APEX
;
971 nsec
= rrset_cache_lookup(rrset_cache
, data
->name
, data
->len
,
972 LDNS_RR_TYPE_NSEC
, zone
->dclass
, flags
, now
, 0);
974 /* check if secure and TTL ok */
976 lock_basic_unlock(&neg
->lock
);
979 d
= (struct packed_rrset_data
*)nsec
->entry
.data
;
980 if(!d
|| now
> d
->ttl
) {
981 lock_rw_unlock(&nsec
->entry
.lock
);
982 /* delete data record if expired */
983 neg_delete_data(neg
, data
);
984 lock_basic_unlock(&neg
->lock
);
987 if(d
->security
!= sec_status_secure
) {
988 lock_rw_unlock(&nsec
->entry
.lock
);
989 neg_delete_data(neg
, data
);
990 lock_basic_unlock(&neg
->lock
);
993 verbose(VERB_ALGO
, "negcache got secure rrset");
995 /* check NSEC security */
996 /* check if NSEC proves no DLV type exists */
997 /* check if NSEC proves NXDOMAIN for qname */
999 qinfo
.qtype
= LDNS_RR_TYPE_DLV
;
1000 qinfo
.qclass
= qclass
;
1001 if(!nsec_proves_nodata(nsec
, &qinfo
, &wc
) &&
1002 !val_nsec_proves_name_error(nsec
, qname
)) {
1003 /* the NSEC is not a denial for the DLV */
1004 lock_rw_unlock(&nsec
->entry
.lock
);
1005 lock_basic_unlock(&neg
->lock
);
1006 verbose(VERB_ALGO
, "negcache not proven");
1009 /* so the NSEC was a NODATA proof, or NXDOMAIN proof. */
1011 /* no need to check for wildcard NSEC; no wildcards in DLV repos */
1012 /* no need to lookup SOA record for client; no response message */
1014 lock_rw_unlock(&nsec
->entry
.lock
);
1015 /* if OK touch the LRU for neg_data element */
1016 neg_lru_touch(neg
, data
);
1017 lock_basic_unlock(&neg
->lock
);
1018 verbose(VERB_ALGO
, "negcache DLV denial proven");
1022 /** see if the reply has signed NSEC records and return the signer */
1023 static uint8_t* reply_nsec_signer(struct reply_info
* rep
, size_t* signer_len
,
1027 struct packed_rrset_data
* d
;
1029 for(i
=rep
->an_numrrsets
; i
< rep
->an_numrrsets
+rep
->ns_numrrsets
; i
++){
1030 if(ntohs(rep
->rrsets
[i
]->rk
.type
) == LDNS_RR_TYPE_NSEC
||
1031 ntohs(rep
->rrsets
[i
]->rk
.type
) == LDNS_RR_TYPE_NSEC3
) {
1032 d
= (struct packed_rrset_data
*)rep
->rrsets
[i
]->
1034 /* return first signer name of first NSEC */
1035 if(d
->rrsig_count
!= 0) {
1036 val_find_rrset_signer(rep
->rrsets
[i
],
1038 if(s
&& *signer_len
) {
1039 *dclass
= ntohs(rep
->rrsets
[i
]->
1049 void val_neg_addreferral(struct val_neg_cache
* neg
, struct reply_info
* rep
,
1056 struct val_neg_zone
* zone
;
1057 /* no SOA in this message, find RRSIG over NSEC's signer name.
1058 * note the NSEC records are maybe not validated yet */
1059 signer
= reply_nsec_signer(rep
, &signer_len
, &dclass
);
1062 if(!dname_subdomain_c(signer
, zone_name
)) {
1063 /* the signer is not in the bailiwick, throw it out */
1067 log_nametypeclass(VERB_ALGO
, "negcache insert referral ",
1068 signer
, LDNS_RR_TYPE_NS
, dclass
);
1070 /* ask for enough space to store all of it */
1071 need
= calc_data_need(rep
) + calc_zone_need(signer
, signer_len
);
1072 lock_basic_lock(&neg
->lock
);
1073 neg_make_space(neg
, need
);
1075 /* find or create the zone entry */
1076 zone
= neg_find_zone(neg
, signer
, signer_len
, dclass
);
1078 if(!(zone
= neg_create_zone(neg
, signer
, signer_len
,
1080 lock_basic_unlock(&neg
->lock
);
1081 log_err("out of memory adding negative zone");
1085 val_neg_zone_take_inuse(zone
);
1087 /* insert the NSECs */
1088 for(i
=rep
->an_numrrsets
; i
< rep
->an_numrrsets
+rep
->ns_numrrsets
; i
++){
1089 if(ntohs(rep
->rrsets
[i
]->rk
.type
) != LDNS_RR_TYPE_NSEC
&&
1090 ntohs(rep
->rrsets
[i
]->rk
.type
) != LDNS_RR_TYPE_NSEC3
)
1092 if(!dname_subdomain_c(rep
->rrsets
[i
]->rk
.dname
,
1093 zone
->name
)) continue;
1094 /* insert NSEC into this zone's tree */
1095 neg_insert_data(neg
, zone
, rep
->rrsets
[i
]);
1097 if(zone
->tree
.count
== 0) {
1098 /* remove empty zone if inserts failed */
1099 neg_delete_zone(neg
, zone
);
1101 lock_basic_unlock(&neg
->lock
);
1105 * Check that an NSEC3 rrset does not have a type set.
1106 * None of the nsec3s in a hash-collision are allowed to have the type.
1107 * (since we do not know which one is the nsec3 looked at, flags, ..., we
1108 * ignore the cached item and let it bypass negative caching).
1109 * @param k: the nsec3 rrset to check.
1110 * @param t: type to check
1111 * @return true if no RRs have the type.
1113 static int nsec3_no_type(struct ub_packed_rrset_key
* k
, uint16_t t
)
1115 int count
= (int)((struct packed_rrset_data
*)k
->entry
.data
)->count
;
1117 for(i
=0; i
<count
; i
++)
1118 if(nsec3_has_type(k
, i
, t
))
1124 * See if rrset exists in rrset cache.
1125 * If it does, the bit is checked, and if not expired, it is returned
1126 * allocated in region.
1127 * @param rrset_cache: rrset cache
1128 * @param qname: to lookup rrset name
1129 * @param qname_len: length of qname.
1130 * @param qtype: type of rrset to lookup, host order
1131 * @param qclass: class of rrset to lookup, host order
1132 * @param flags: flags for rrset to lookup
1133 * @param region: where to alloc result
1134 * @param checkbit: if true, a bit in the nsec typemap is checked for absence.
1135 * @param checktype: which bit to check
1136 * @param now: to check ttl against
1137 * @return rrset or NULL
1139 static struct ub_packed_rrset_key
*
1140 grab_nsec(struct rrset_cache
* rrset_cache
, uint8_t* qname
, size_t qname_len
,
1141 uint16_t qtype
, uint16_t qclass
, uint32_t flags
,
1142 struct regional
* region
, int checkbit
, uint16_t checktype
,
1145 struct ub_packed_rrset_key
* r
, *k
= rrset_cache_lookup(rrset_cache
,
1146 qname
, qname_len
, qtype
, qclass
, flags
, now
, 0);
1147 struct packed_rrset_data
* d
;
1149 d
= (struct packed_rrset_data
*)k
->entry
.data
;
1151 lock_rw_unlock(&k
->entry
.lock
);
1154 /* only secure or unchecked records that have signatures. */
1155 if( ! ( d
->security
== sec_status_secure
||
1156 (d
->security
== sec_status_unchecked
&&
1157 d
->rrsig_count
> 0) ) ) {
1158 lock_rw_unlock(&k
->entry
.lock
);
1161 /* check if checktype is absent */
1163 (qtype
== LDNS_RR_TYPE_NSEC
&& nsec_has_type(k
, checktype
)) ||
1164 (qtype
== LDNS_RR_TYPE_NSEC3
&& !nsec3_no_type(k
, checktype
))
1166 lock_rw_unlock(&k
->entry
.lock
);
1169 /* looks OK! copy to region and return it */
1170 r
= packed_rrset_copy_region(k
, region
, now
);
1171 /* if it failed, we return the NULL */
1172 lock_rw_unlock(&k
->entry
.lock
);
1176 /** find nsec3 closest encloser in neg cache */
1177 static struct val_neg_data
*
1178 neg_find_nsec3_ce(struct val_neg_zone
* zone
, uint8_t* qname
, size_t qname_len
,
1179 int qlabs
, sldns_buffer
* buf
, uint8_t* hashnc
, size_t* nclen
)
1181 struct val_neg_data
* data
;
1182 uint8_t hashce
[NSEC3_SHA_LEN
];
1184 size_t celen
, b32len
;
1189 if(!(celen
=nsec3_get_hashed(buf
, qname
, qname_len
,
1190 zone
->nsec3_hash
, zone
->nsec3_iter
, zone
->nsec3_salt
,
1191 zone
->nsec3_saltlen
, hashce
, sizeof(hashce
))))
1193 if(!(b32len
=nsec3_hash_to_b32(hashce
, celen
, zone
->name
,
1194 zone
->len
, b32
, sizeof(b32
))))
1197 /* lookup (exact match only) */
1198 data
= neg_find_data(zone
, b32
, b32len
, zone
->labs
+1);
1199 if(data
&& data
->in_use
) {
1200 /* found ce match! */
1205 memmove(hashnc
, hashce
, celen
);
1206 dname_remove_label(&qname
, &qname_len
);
1212 /** check nsec3 parameters on nsec3 rrset with current zone values */
1214 neg_params_ok(struct val_neg_zone
* zone
, struct ub_packed_rrset_key
* rrset
)
1219 if(!nsec3_get_params(rrset
, 0, &h
, &it
, &s
, &slen
))
1221 return (h
== zone
->nsec3_hash
&& it
== zone
->nsec3_iter
&&
1222 slen
== zone
->nsec3_saltlen
&&
1223 memcmp(zone
->nsec3_salt
, s
, slen
) == 0);
1226 /** get next closer for nsec3 proof */
1227 static struct ub_packed_rrset_key
*
1228 neg_nsec3_getnc(struct val_neg_zone
* zone
, uint8_t* hashnc
, size_t nclen
,
1229 struct rrset_cache
* rrset_cache
, struct regional
* region
,
1230 time_t now
, uint8_t* b32
, size_t maxb32
)
1232 struct ub_packed_rrset_key
* nc_rrset
;
1233 struct val_neg_data
* data
;
1236 if(!(b32len
=nsec3_hash_to_b32(hashnc
, nclen
, zone
->name
,
1237 zone
->len
, b32
, maxb32
)))
1239 (void)neg_closest_data(zone
, b32
, b32len
, zone
->labs
+1, &data
);
1240 if(!data
&& zone
->tree
.count
!= 0) {
1241 /* could be before the first entry ; return the last
1242 * entry (possibly the rollover nsec3 at end) */
1243 data
= (struct val_neg_data
*)rbtree_last(&zone
->tree
);
1245 while(data
&& !data
->in_use
)
1246 data
= data
->parent
;
1249 /* got a data element in tree, grab it */
1250 nc_rrset
= grab_nsec(rrset_cache
, data
->name
, data
->len
,
1251 LDNS_RR_TYPE_NSEC3
, zone
->dclass
, 0, region
, 0, 0, now
);
1254 if(!neg_params_ok(zone
, nc_rrset
))
1259 /** neg cache nsec3 proof procedure*/
1260 static struct dns_msg
*
1261 neg_nsec3_proof_ds(struct val_neg_zone
* zone
, uint8_t* qname
, size_t qname_len
,
1262 int qlabs
, sldns_buffer
* buf
, struct rrset_cache
* rrset_cache
,
1263 struct regional
* region
, time_t now
, uint8_t* topname
)
1265 struct dns_msg
* msg
;
1266 struct val_neg_data
* data
;
1267 uint8_t hashnc
[NSEC3_SHA_LEN
];
1269 struct ub_packed_rrset_key
* ce_rrset
, *nc_rrset
;
1270 struct nsec3_cached_hash c
;
1271 uint8_t nc_b32
[257];
1273 /* for NSEC3 ; determine the closest encloser for which we
1274 * can find an exact match. Remember the hashed lower name,
1275 * since that is the one we need a closest match for.
1276 * If we find a match straight away, then it becomes NODATA.
1277 * Otherwise, NXDOMAIN or if OPTOUT, an insecure delegation.
1278 * Also check that parameters are the same on closest encloser
1279 * and on closest match.
1281 if(!zone
->nsec3_hash
)
1282 return NULL
; /* not nsec3 zone */
1284 if(!(data
=neg_find_nsec3_ce(zone
, qname
, qname_len
, qlabs
, buf
,
1289 /* grab the ce rrset */
1290 ce_rrset
= grab_nsec(rrset_cache
, data
->name
, data
->len
,
1291 LDNS_RR_TYPE_NSEC3
, zone
->dclass
, 0, region
, 1,
1292 LDNS_RR_TYPE_DS
, now
);
1295 if(!neg_params_ok(zone
, ce_rrset
))
1299 /* exact match, just check the type bits */
1300 /* need: -SOA, -DS, +NS */
1301 if(nsec3_has_type(ce_rrset
, 0, LDNS_RR_TYPE_SOA
) ||
1302 nsec3_has_type(ce_rrset
, 0, LDNS_RR_TYPE_DS
) ||
1303 !nsec3_has_type(ce_rrset
, 0, LDNS_RR_TYPE_NS
))
1305 if(!(msg
= dns_msg_create(qname
, qname_len
,
1306 LDNS_RR_TYPE_DS
, zone
->dclass
, region
, 1)))
1308 /* TTL reduced in grab_nsec */
1309 if(!dns_msg_authadd(msg
, region
, ce_rrset
, 0))
1314 /* optout is not allowed without knowing the trust-anchor in use,
1315 * otherwise the optout could spoof away that anchor */
1319 /* if there is no exact match, it must be in an optout span
1320 * (an existing DS implies an NSEC3 must exist) */
1321 nc_rrset
= neg_nsec3_getnc(zone
, hashnc
, nclen
, rrset_cache
,
1322 region
, now
, nc_b32
, sizeof(nc_b32
));
1325 if(!neg_params_ok(zone
, nc_rrset
))
1327 if(!nsec3_has_optout(nc_rrset
, 0))
1332 c
.b32_len
= (size_t)nc_b32
[0];
1333 if(nsec3_covers(zone
->name
, &c
, nc_rrset
, 0, buf
)) {
1334 /* nc_rrset covers the next closer name.
1335 * ce_rrset equals a closer encloser.
1336 * nc_rrset is optout.
1337 * No need to check wildcard for type DS */
1338 /* capacity=3: ce + nc + soa(if needed) */
1339 if(!(msg
= dns_msg_create(qname
, qname_len
,
1340 LDNS_RR_TYPE_DS
, zone
->dclass
, region
, 3)))
1342 /* now=0 because TTL was reduced in grab_nsec */
1343 if(!dns_msg_authadd(msg
, region
, ce_rrset
, 0))
1345 if(!dns_msg_authadd(msg
, region
, nc_rrset
, 0))
1353 * Add SOA record for external responses.
1354 * @param rrset_cache: to look into.
1355 * @param now: current time.
1356 * @param region: where to perform the allocation
1357 * @param msg: current msg with NSEC.
1358 * @param zone: val_neg_zone if we have one.
1359 * @return false on lookup or alloc failure.
1361 static int add_soa(struct rrset_cache
* rrset_cache
, time_t now
,
1362 struct regional
* region
, struct dns_msg
* msg
, struct val_neg_zone
* zone
)
1364 struct ub_packed_rrset_key
* soa
;
1371 dclass
= zone
->dclass
;
1373 /* Assumes the signer is the zone SOA to add */
1374 nm
= reply_nsec_signer(msg
->rep
, &nmlen
, &dclass
);
1378 soa
= rrset_cache_lookup(rrset_cache
, nm
, nmlen
, LDNS_RR_TYPE_SOA
,
1379 dclass
, PACKED_RRSET_SOA_NEG
, now
, 0);
1382 if(!dns_msg_authadd(msg
, region
, soa
, now
)) {
1383 lock_rw_unlock(&soa
->entry
.lock
);
1386 lock_rw_unlock(&soa
->entry
.lock
);
1391 val_neg_getmsg(struct val_neg_cache
* neg
, struct query_info
* qinfo
,
1392 struct regional
* region
, struct rrset_cache
* rrset_cache
,
1393 sldns_buffer
* buf
, time_t now
, int addsoa
, uint8_t* topname
)
1395 struct dns_msg
* msg
;
1396 struct ub_packed_rrset_key
* rrset
;
1400 struct val_neg_zone
* zone
;
1402 /* only for DS queries */
1403 if(qinfo
->qtype
!= LDNS_RR_TYPE_DS
)
1405 log_assert(!topname
|| dname_subdomain_c(qinfo
->qname
, topname
));
1407 /* see if info from neg cache is available
1408 * For NSECs, because there is no optout; a DS next to a delegation
1409 * always has exactly an NSEC for it itself; check its DS bit.
1410 * flags=0 (not the zone apex).
1412 rrset
= grab_nsec(rrset_cache
, qinfo
->qname
, qinfo
->qname_len
,
1413 LDNS_RR_TYPE_NSEC
, qinfo
->qclass
, 0, region
, 1,
1416 /* return msg with that rrset */
1417 if(!(msg
= dns_msg_create(qinfo
->qname
, qinfo
->qname_len
,
1418 qinfo
->qtype
, qinfo
->qclass
, region
, 2)))
1420 /* TTL already subtracted in grab_nsec */
1421 if(!dns_msg_authadd(msg
, region
, rrset
, 0))
1423 if(addsoa
&& !add_soa(rrset_cache
, now
, region
, msg
, NULL
))
1428 /* check NSEC3 neg cache for type DS */
1429 /* need to look one zone higher for DS type */
1430 zname
= qinfo
->qname
;
1431 zname_len
= qinfo
->qname_len
;
1432 dname_remove_label(&zname
, &zname_len
);
1433 zname_labs
= dname_count_labels(zname
);
1435 /* lookup closest zone */
1436 lock_basic_lock(&neg
->lock
);
1437 zone
= neg_closest_zone_parent(neg
, zname
, zname_len
, zname_labs
,
1439 while(zone
&& !zone
->in_use
)
1440 zone
= zone
->parent
;
1441 /* check that the zone is not too high up so that we do not pick data
1442 * out of a zone that is above the last-seen key (or trust-anchor). */
1443 if(zone
&& topname
) {
1444 if(!dname_subdomain_c(zone
->name
, topname
))
1448 lock_basic_unlock(&neg
->lock
);
1452 msg
= neg_nsec3_proof_ds(zone
, qinfo
->qname
, qinfo
->qname_len
,
1453 zname_labs
+1, buf
, rrset_cache
, region
, now
, topname
);
1454 if(msg
&& addsoa
&& !add_soa(rrset_cache
, now
, region
, msg
, zone
)) {
1455 lock_basic_unlock(&neg
->lock
);
1458 lock_basic_unlock(&neg
->lock
);