]>
git.saurik.com Git - apple/network_cmds.git/blob - unbound/services/cache/rrset.c
5f52dbce194826ace70658602e913dee4a55a302
2 * services/cache/rrset.c - Resource record set cache.
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains the rrset cache.
42 #include "services/cache/rrset.h"
43 #include "ldns/rrdef.h"
44 #include "util/storage/slabhash.h"
45 #include "util/config_file.h"
46 #include "util/data/packed_rrset.h"
47 #include "util/data/msgreply.h"
48 #include "util/regional.h"
49 #include "util/alloc.h"
52 rrset_markdel(void* key
)
54 struct ub_packed_rrset_key
* r
= (struct ub_packed_rrset_key
*)key
;
58 struct rrset_cache
* rrset_cache_create(struct config_file
* cfg
,
59 struct alloc_cache
* alloc
)
61 size_t slabs
= (cfg
?cfg
->rrset_cache_slabs
:HASH_DEFAULT_SLABS
);
62 size_t startarray
= HASH_DEFAULT_STARTARRAY
;
63 size_t maxmem
= (cfg
?cfg
->rrset_cache_size
:HASH_DEFAULT_MAXMEM
);
65 struct rrset_cache
*r
= (struct rrset_cache
*)slabhash_create(slabs
,
66 startarray
, maxmem
, ub_rrset_sizefunc
, ub_rrset_compare
,
67 ub_rrset_key_delete
, rrset_data_delete
, alloc
);
68 slabhash_setmarkdel(&r
->table
, &rrset_markdel
);
72 void rrset_cache_delete(struct rrset_cache
* r
)
76 slabhash_delete(&r
->table
);
77 /* slabhash delete also does free(r), since table is first in struct*/
80 struct rrset_cache
* rrset_cache_adjust(struct rrset_cache
*r
,
81 struct config_file
* cfg
, struct alloc_cache
* alloc
)
83 if(!r
|| !cfg
|| cfg
->rrset_cache_slabs
!= r
->table
.size
||
84 cfg
->rrset_cache_size
!= slabhash_get_size(&r
->table
))
86 rrset_cache_delete(r
);
87 r
= rrset_cache_create(cfg
, alloc
);
93 rrset_cache_touch(struct rrset_cache
* r
, struct ub_packed_rrset_key
* key
,
94 hashvalue_t hash
, rrset_id_t id
)
96 struct lruhash
* table
= slabhash_gettable(&r
->table
, hash
);
98 * This leads to locking problems, deadlocks, if the caller is
99 * holding any other rrset lock.
100 * Because a lookup through the hashtable does:
101 * tablelock -> entrylock (for that entry caller holds)
103 * entrylock(already held) -> tablelock
104 * And if two threads do this, it results in deadlock.
105 * So, the caller must not hold entrylock.
107 lock_quick_lock(&table
->lock
);
108 /* we have locked the hash table, the item can still be deleted.
109 * because it could already have been reclaimed, but not yet set id=0.
110 * This is because some lruhash routines have lazy deletion.
111 * so, we must acquire a lock on the item to verify the id != 0.
112 * also, with hash not changed, we are using the right slab.
114 lock_rw_rdlock(&key
->entry
.lock
);
115 if(key
->id
== id
&& key
->entry
.hash
== hash
) {
116 lru_touch(table
, &key
->entry
);
118 lock_rw_unlock(&key
->entry
.lock
);
119 lock_quick_unlock(&table
->lock
);
122 /** see if rrset needs to be updated in the cache */
124 need_to_update_rrset(void* nd
, void* cd
, time_t timenow
, int equal
, int ns
)
126 struct packed_rrset_data
* newd
= (struct packed_rrset_data
*)nd
;
127 struct packed_rrset_data
* cached
= (struct packed_rrset_data
*)cd
;
128 /* o store if rrset has been validated
129 * everything better than bogus data
130 * secure is preferred */
131 if( newd
->security
== sec_status_secure
&&
132 cached
->security
!= sec_status_secure
)
134 if( cached
->security
== sec_status_bogus
&&
135 newd
->security
!= sec_status_bogus
&& !equal
)
137 /* o if current RRset is more trustworthy - insert it */
138 if( newd
->trust
> cached
->trust
) {
139 /* if the cached rrset is bogus, and this one equal,
140 * do not update the TTL - let it expire. */
141 if(equal
&& cached
->ttl
>= timenow
&&
142 cached
->security
== sec_status_bogus
)
146 /* o item in cache has expired */
147 if( cached
->ttl
< timenow
)
149 /* o same trust, but different in data - insert it */
150 if( newd
->trust
== cached
->trust
&& !equal
) {
151 /* if this is type NS, do not 'stick' to owner that changes
152 * the NS RRset, but use the old TTL for the new data, and
153 * update to fetch the latest data. ttl is not expired, because
154 * that check was before this one. */
157 newd
->ttl
= cached
->ttl
;
158 for(i
=0; i
<(newd
->count
+newd
->rrsig_count
); i
++)
159 if(newd
->rr_ttl
[i
] > newd
->ttl
)
160 newd
->rr_ttl
[i
] = newd
->ttl
;
167 /** Update RRSet special key ID */
169 rrset_update_id(struct rrset_ref
* ref
, struct alloc_cache
* alloc
)
171 /* this may clear the cache and invalidate lock below */
172 uint64_t newid
= alloc_get_id(alloc
);
173 /* obtain writelock */
174 lock_rw_wrlock(&ref
->key
->entry
.lock
);
175 /* check if it was deleted in the meantime, if so, skip update */
176 if(ref
->key
->id
== ref
->id
) {
177 ref
->key
->id
= newid
;
180 lock_rw_unlock(&ref
->key
->entry
.lock
);
184 rrset_cache_update(struct rrset_cache
* r
, struct rrset_ref
* ref
,
185 struct alloc_cache
* alloc
, time_t timenow
)
187 struct lruhash_entry
* e
;
188 struct ub_packed_rrset_key
* k
= ref
->key
;
189 hashvalue_t h
= k
->entry
.hash
;
190 uint16_t rrset_type
= ntohs(k
->rk
.type
);
192 log_assert(ref
->id
!= 0 && k
->id
!= 0);
193 /* looks up item with a readlock - no editing! */
194 if((e
=slabhash_lookup(&r
->table
, h
, k
, 0)) != 0) {
195 /* return id and key as they will be used in the cache
196 * since the lruhash_insert, if item already exists, deallocs
197 * the passed key in favor of the already stored key.
198 * because of the small gap (see below) this key ptr and id
199 * may prove later to be already deleted, which is no problem
200 * as it only makes a cache miss.
202 ref
->key
= (struct ub_packed_rrset_key
*)e
->key
;
203 ref
->id
= ref
->key
->id
;
204 equal
= rrsetdata_equal((struct packed_rrset_data
*)k
->entry
.
205 data
, (struct packed_rrset_data
*)e
->data
);
206 if(!need_to_update_rrset(k
->entry
.data
, e
->data
, timenow
,
207 equal
, (rrset_type
==LDNS_RR_TYPE_NS
))) {
208 /* cache is superior, return that value */
209 lock_rw_unlock(&e
->lock
);
210 ub_packed_rrset_parsedelete(k
, alloc
);
214 lock_rw_unlock(&e
->lock
);
215 /* Go on and insert the passed item.
216 * small gap here, where entry is not locked.
217 * possibly entry is updated with something else.
218 * we then overwrite that with our data.
219 * this is just too bad, its cache anyway. */
220 /* use insert to update entry to manage lruhash
221 * cache size values nicely. */
223 log_assert(ref
->key
->id
!= 0);
224 slabhash_insert(&r
->table
, h
, &k
->entry
, k
->entry
.data
, alloc
);
226 /* For NSEC, NSEC3, DNAME, when rdata is updated, update
227 * the ID number so that proofs in message cache are
229 if((rrset_type
== LDNS_RR_TYPE_NSEC
230 || rrset_type
== LDNS_RR_TYPE_NSEC3
231 || rrset_type
== LDNS_RR_TYPE_DNAME
) && !equal
) {
232 rrset_update_id(ref
, alloc
);
239 struct ub_packed_rrset_key
*
240 rrset_cache_lookup(struct rrset_cache
* r
, uint8_t* qname
, size_t qnamelen
,
241 uint16_t qtype
, uint16_t qclass
, uint32_t flags
, time_t timenow
,
244 struct lruhash_entry
* e
;
245 struct ub_packed_rrset_key key
;
247 key
.entry
.key
= &key
;
248 key
.entry
.data
= NULL
;
249 key
.rk
.dname
= qname
;
250 key
.rk
.dname_len
= qnamelen
;
251 key
.rk
.type
= htons(qtype
);
252 key
.rk
.rrset_class
= htons(qclass
);
253 key
.rk
.flags
= flags
;
255 key
.entry
.hash
= rrset_key_hash(&key
.rk
);
257 if((e
= slabhash_lookup(&r
->table
, key
.entry
.hash
, &key
, wr
))) {
259 struct packed_rrset_data
* data
=
260 (struct packed_rrset_data
*)e
->data
;
261 if(timenow
> data
->ttl
) {
262 lock_rw_unlock(&e
->lock
);
266 return (struct ub_packed_rrset_key
*)e
->key
;
272 rrset_array_lock(struct rrset_ref
* ref
, size_t count
, time_t timenow
)
275 for(i
=0; i
<count
; i
++) {
276 if(i
>0 && ref
[i
].key
== ref
[i
-1].key
)
277 continue; /* only lock items once */
278 lock_rw_rdlock(&ref
[i
].key
->entry
.lock
);
279 if(ref
[i
].id
!= ref
[i
].key
->id
|| timenow
>
280 ((struct packed_rrset_data
*)(ref
[i
].key
->entry
.data
))
282 /* failure! rollback our readlocks */
283 rrset_array_unlock(ref
, i
+1);
291 rrset_array_unlock(struct rrset_ref
* ref
, size_t count
)
294 for(i
=0; i
<count
; i
++) {
295 if(i
>0 && ref
[i
].key
== ref
[i
-1].key
)
296 continue; /* only unlock items once */
297 lock_rw_unlock(&ref
[i
].key
->entry
.lock
);
302 rrset_array_unlock_touch(struct rrset_cache
* r
, struct regional
* scratch
,
303 struct rrset_ref
* ref
, size_t count
)
307 if(!(h
= (hashvalue_t
*)regional_alloc(scratch
,
308 sizeof(hashvalue_t
)*count
)))
309 log_warn("rrset LRU: memory allocation failed");
310 else /* store hash values */
311 for(i
=0; i
<count
; i
++)
312 h
[i
] = ref
[i
].key
->entry
.hash
;
314 for(i
=0; i
<count
; i
++) {
315 if(i
>0 && ref
[i
].key
== ref
[i
-1].key
)
316 continue; /* only unlock items once */
317 lock_rw_unlock(&ref
[i
].key
->entry
.lock
);
320 /* LRU touch, with no rrset locks held */
321 for(i
=0; i
<count
; i
++) {
322 if(i
>0 && ref
[i
].key
== ref
[i
-1].key
)
323 continue; /* only touch items once */
324 rrset_cache_touch(r
, ref
[i
].key
, h
[i
], ref
[i
].id
);
330 rrset_update_sec_status(struct rrset_cache
* r
,
331 struct ub_packed_rrset_key
* rrset
, time_t now
)
333 struct packed_rrset_data
* updata
=
334 (struct packed_rrset_data
*)rrset
->entry
.data
;
335 struct lruhash_entry
* e
;
336 struct packed_rrset_data
* cachedata
;
338 /* hash it again to make sure it has a hash */
339 rrset
->entry
.hash
= rrset_key_hash(&rrset
->rk
);
341 e
= slabhash_lookup(&r
->table
, rrset
->entry
.hash
, rrset
, 1);
343 return; /* not in the cache anymore */
344 cachedata
= (struct packed_rrset_data
*)e
->data
;
345 if(!rrsetdata_equal(updata
, cachedata
)) {
346 lock_rw_unlock(&e
->lock
);
347 return; /* rrset has changed in the meantime */
349 /* update the cached rrset */
350 if(updata
->security
> cachedata
->security
) {
352 if(updata
->trust
> cachedata
->trust
)
353 cachedata
->trust
= updata
->trust
;
354 cachedata
->security
= updata
->security
;
355 /* for NS records only shorter TTLs, other types: update it */
356 if(ntohs(rrset
->rk
.type
) != LDNS_RR_TYPE_NS
||
357 updata
->ttl
+now
< cachedata
->ttl
||
358 cachedata
->ttl
< now
||
359 updata
->security
== sec_status_bogus
) {
360 cachedata
->ttl
= updata
->ttl
+ now
;
361 for(i
=0; i
<cachedata
->count
+cachedata
->rrsig_count
; i
++)
362 cachedata
->rr_ttl
[i
] = updata
->rr_ttl
[i
]+now
;
365 lock_rw_unlock(&e
->lock
);
369 rrset_check_sec_status(struct rrset_cache
* r
,
370 struct ub_packed_rrset_key
* rrset
, time_t now
)
372 struct packed_rrset_data
* updata
=
373 (struct packed_rrset_data
*)rrset
->entry
.data
;
374 struct lruhash_entry
* e
;
375 struct packed_rrset_data
* cachedata
;
377 /* hash it again to make sure it has a hash */
378 rrset
->entry
.hash
= rrset_key_hash(&rrset
->rk
);
380 e
= slabhash_lookup(&r
->table
, rrset
->entry
.hash
, rrset
, 0);
382 return; /* not in the cache anymore */
383 cachedata
= (struct packed_rrset_data
*)e
->data
;
384 if(now
> cachedata
->ttl
|| !rrsetdata_equal(updata
, cachedata
)) {
385 lock_rw_unlock(&e
->lock
);
386 return; /* expired, or rrset has changed in the meantime */
388 if(cachedata
->security
> updata
->security
) {
389 updata
->security
= cachedata
->security
;
390 if(cachedata
->security
== sec_status_bogus
) {
392 updata
->ttl
= cachedata
->ttl
- now
;
393 for(i
=0; i
<cachedata
->count
+cachedata
->rrsig_count
; i
++)
394 if(cachedata
->rr_ttl
[i
] < now
)
395 updata
->rr_ttl
[i
] = 0;
396 else updata
->rr_ttl
[i
] =
397 cachedata
->rr_ttl
[i
]-now
;
399 if(cachedata
->trust
> updata
->trust
)
400 updata
->trust
= cachedata
->trust
;
402 lock_rw_unlock(&e
->lock
);
405 void rrset_cache_remove(struct rrset_cache
* r
, uint8_t* nm
, size_t nmlen
,
406 uint16_t type
, uint16_t dclass
, uint32_t flags
)
408 struct ub_packed_rrset_key key
;
409 key
.entry
.key
= &key
;
411 key
.rk
.dname_len
= nmlen
;
412 key
.rk
.rrset_class
= htons(dclass
);
413 key
.rk
.type
= htons(type
);
414 key
.rk
.flags
= flags
;
415 key
.entry
.hash
= rrset_key_hash(&key
.rk
);
416 slabhash_remove(&r
->table
, key
.entry
.hash
, &key
);