xref: /freebsd/contrib/unbound/services/cache/rrset.c (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1 /*
2  * services/cache/rrset.c - Resource record set cache.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains the rrset cache.
40  */
41 #include "config.h"
42 #include "services/cache/rrset.h"
43 #include "sldns/rrdef.h"
44 #include "util/storage/slabhash.h"
45 #include "util/config_file.h"
46 #include "util/data/packed_rrset.h"
47 #include "util/data/msgreply.h"
48 #include "util/regional.h"
49 #include "util/alloc.h"
50 
51 void
52 rrset_markdel(void* key)
53 {
54 	struct ub_packed_rrset_key* r = (struct ub_packed_rrset_key*)key;
55 	r->id = 0;
56 }
57 
58 struct rrset_cache* rrset_cache_create(struct config_file* cfg,
59 	struct alloc_cache* alloc)
60 {
61 	size_t slabs = (cfg?cfg->rrset_cache_slabs:HASH_DEFAULT_SLABS);
62 	size_t startarray = HASH_DEFAULT_STARTARRAY;
63 	size_t maxmem = (cfg?cfg->rrset_cache_size:HASH_DEFAULT_MAXMEM);
64 
65 	struct rrset_cache *r = (struct rrset_cache*)slabhash_create(slabs,
66 		startarray, maxmem, ub_rrset_sizefunc, ub_rrset_compare,
67 		ub_rrset_key_delete, rrset_data_delete, alloc);
68 	slabhash_setmarkdel(&r->table, &rrset_markdel);
69 	return r;
70 }
71 
72 void rrset_cache_delete(struct rrset_cache* r)
73 {
74 	if(!r)
75 		return;
76 	slabhash_delete(&r->table);
77 	/* slabhash delete also does free(r), since table is first in struct*/
78 }
79 
80 struct rrset_cache* rrset_cache_adjust(struct rrset_cache *r,
81 	struct config_file* cfg, struct alloc_cache* alloc)
82 {
83 	if(!r || !cfg || cfg->rrset_cache_slabs != r->table.size ||
84 		cfg->rrset_cache_size != slabhash_get_size(&r->table))
85 	{
86 		rrset_cache_delete(r);
87 		r = rrset_cache_create(cfg, alloc);
88 	}
89 	return r;
90 }
91 
92 void
93 rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key,
94         hashvalue_t hash, rrset_id_t id)
95 {
96 	struct lruhash* table = slabhash_gettable(&r->table, hash);
97 	/*
98 	 * This leads to locking problems, deadlocks, if the caller is
99 	 * holding any other rrset lock.
100 	 * Because a lookup through the hashtable does:
101 	 *	tablelock -> entrylock  (for that entry caller holds)
102 	 * And this would do
103 	 *	entrylock(already held) -> tablelock
104 	 * And if two threads do this, it results in deadlock.
105 	 * So, the caller must not hold entrylock.
106 	 */
107 	lock_quick_lock(&table->lock);
108 	/* we have locked the hash table, the item can still be deleted.
109 	 * because it could already have been reclaimed, but not yet set id=0.
110 	 * This is because some lruhash routines have lazy deletion.
111 	 * so, we must acquire a lock on the item to verify the id != 0.
112 	 * also, with hash not changed, we are using the right slab.
113 	 */
114 	lock_rw_rdlock(&key->entry.lock);
115 	if(key->id == id && key->entry.hash == hash) {
116 		lru_touch(table, &key->entry);
117 	}
118 	lock_rw_unlock(&key->entry.lock);
119 	lock_quick_unlock(&table->lock);
120 }
121 
122 /** see if rrset needs to be updated in the cache */
123 static int
124 need_to_update_rrset(void* nd, void* cd, time_t timenow, int equal, int ns)
125 {
126 	struct packed_rrset_data* newd = (struct packed_rrset_data*)nd;
127 	struct packed_rrset_data* cached = (struct packed_rrset_data*)cd;
128 	/* 	o store if rrset has been validated
129 	 *  		everything better than bogus data
130 	 *  		secure is preferred */
131 	if( newd->security == sec_status_secure &&
132 		cached->security != sec_status_secure)
133 		return 1;
134 	if( cached->security == sec_status_bogus &&
135 		newd->security != sec_status_bogus && !equal)
136 		return 1;
137         /*      o if current RRset is more trustworthy - insert it */
138         if( newd->trust > cached->trust ) {
139 		/* if the cached rrset is bogus, and this one equal,
140 		 * do not update the TTL - let it expire. */
141 		if(equal && cached->ttl >= timenow &&
142 			cached->security == sec_status_bogus)
143 			return 0;
144                 return 1;
145 	}
146 	/*	o item in cache has expired */
147 	if( cached->ttl < timenow )
148 		return 1;
149 	/*  o same trust, but different in data - insert it */
150 	if( newd->trust == cached->trust && !equal ) {
151 		/* if this is type NS, do not 'stick' to owner that changes
152 		 * the NS RRset, but use the old TTL for the new data, and
153 		 * update to fetch the latest data. ttl is not expired, because
154 		 * that check was before this one. */
155 		if(ns) {
156 			size_t i;
157 			newd->ttl = cached->ttl;
158 			for(i=0; i<(newd->count+newd->rrsig_count); i++)
159 				if(newd->rr_ttl[i] > newd->ttl)
160 					newd->rr_ttl[i] = newd->ttl;
161 		}
162 		return 1;
163 	}
164 	return 0;
165 }
166 
167 /** Update RRSet special key ID */
168 static void
169 rrset_update_id(struct rrset_ref* ref, struct alloc_cache* alloc)
170 {
171 	/* this may clear the cache and invalidate lock below */
172 	uint64_t newid = alloc_get_id(alloc);
173 	/* obtain writelock */
174 	lock_rw_wrlock(&ref->key->entry.lock);
175 	/* check if it was deleted in the meantime, if so, skip update */
176 	if(ref->key->id == ref->id) {
177 		ref->key->id = newid;
178 		ref->id = newid;
179 	}
180 	lock_rw_unlock(&ref->key->entry.lock);
181 }
182 
183 int
184 rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref,
185 	struct alloc_cache* alloc, time_t timenow)
186 {
187 	struct lruhash_entry* e;
188 	struct ub_packed_rrset_key* k = ref->key;
189 	hashvalue_t h = k->entry.hash;
190 	uint16_t rrset_type = ntohs(k->rk.type);
191 	int equal = 0;
192 	log_assert(ref->id != 0 && k->id != 0);
193 	/* looks up item with a readlock - no editing! */
194 	if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) {
195 		/* return id and key as they will be used in the cache
196 		 * since the lruhash_insert, if item already exists, deallocs
197 		 * the passed key in favor of the already stored key.
198 		 * because of the small gap (see below) this key ptr and id
199 		 * may prove later to be already deleted, which is no problem
200 		 * as it only makes a cache miss.
201 		 */
202 		ref->key = (struct ub_packed_rrset_key*)e->key;
203 		ref->id = ref->key->id;
204 		equal = rrsetdata_equal((struct packed_rrset_data*)k->entry.
205 			data, (struct packed_rrset_data*)e->data);
206 		if(!need_to_update_rrset(k->entry.data, e->data, timenow,
207 			equal, (rrset_type==LDNS_RR_TYPE_NS))) {
208 			/* cache is superior, return that value */
209 			lock_rw_unlock(&e->lock);
210 			ub_packed_rrset_parsedelete(k, alloc);
211 			if(equal) return 2;
212 			return 1;
213 		}
214 		lock_rw_unlock(&e->lock);
215 		/* Go on and insert the passed item.
216 		 * small gap here, where entry is not locked.
217 		 * possibly entry is updated with something else.
218 		 * we then overwrite that with our data.
219 		 * this is just too bad, its cache anyway. */
220 		/* use insert to update entry to manage lruhash
221 		 * cache size values nicely. */
222 	}
223 	log_assert(ref->key->id != 0);
224 	slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc);
225 	if(e) {
226 		/* For NSEC, NSEC3, DNAME, when rdata is updated, update
227 		 * the ID number so that proofs in message cache are
228 		 * invalidated */
229 		if((rrset_type == LDNS_RR_TYPE_NSEC
230 			|| rrset_type == LDNS_RR_TYPE_NSEC3
231 			|| rrset_type == LDNS_RR_TYPE_DNAME) && !equal) {
232 			rrset_update_id(ref, alloc);
233 		}
234 		return 1;
235 	}
236 	return 0;
237 }
238 
239 struct ub_packed_rrset_key*
240 rrset_cache_lookup(struct rrset_cache* r, uint8_t* qname, size_t qnamelen,
241 	uint16_t qtype, uint16_t qclass, uint32_t flags, time_t timenow,
242 	int wr)
243 {
244 	struct lruhash_entry* e;
245 	struct ub_packed_rrset_key key;
246 
247 	key.entry.key = &key;
248 	key.entry.data = NULL;
249 	key.rk.dname = qname;
250 	key.rk.dname_len = qnamelen;
251 	key.rk.type = htons(qtype);
252 	key.rk.rrset_class = htons(qclass);
253 	key.rk.flags = flags;
254 
255 	key.entry.hash = rrset_key_hash(&key.rk);
256 
257 	if((e = slabhash_lookup(&r->table, key.entry.hash, &key, wr))) {
258 		/* check TTL */
259 		struct packed_rrset_data* data =
260 			(struct packed_rrset_data*)e->data;
261 		if(timenow > data->ttl) {
262 			lock_rw_unlock(&e->lock);
263 			return NULL;
264 		}
265 		/* we're done */
266 		return (struct ub_packed_rrset_key*)e->key;
267 	}
268 	return NULL;
269 }
270 
271 int
272 rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow)
273 {
274 	size_t i;
275 	for(i=0; i<count; i++) {
276 		if(i>0 && ref[i].key == ref[i-1].key)
277 			continue; /* only lock items once */
278 		lock_rw_rdlock(&ref[i].key->entry.lock);
279 		if(ref[i].id != ref[i].key->id || timenow >
280 			((struct packed_rrset_data*)(ref[i].key->entry.data))
281 			->ttl) {
282 			/* failure! rollback our readlocks */
283 			rrset_array_unlock(ref, i+1);
284 			return 0;
285 		}
286 	}
287 	return 1;
288 }
289 
290 void
291 rrset_array_unlock(struct rrset_ref* ref, size_t count)
292 {
293 	size_t i;
294 	for(i=0; i<count; i++) {
295 		if(i>0 && ref[i].key == ref[i-1].key)
296 			continue; /* only unlock items once */
297 		lock_rw_unlock(&ref[i].key->entry.lock);
298 	}
299 }
300 
301 void
302 rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch,
303 	struct rrset_ref* ref, size_t count)
304 {
305 	hashvalue_t* h;
306 	size_t i;
307 	if(count > RR_COUNT_MAX || !(h = (hashvalue_t*)regional_alloc(scratch,
308 		sizeof(hashvalue_t)*count))) {
309 		log_warn("rrset LRU: memory allocation failed");
310 		h = NULL;
311 	} else 	/* store hash values */
312 		for(i=0; i<count; i++)
313 			h[i] = ref[i].key->entry.hash;
314 	/* unlock */
315 	for(i=0; i<count; i++) {
316 		if(i>0 && ref[i].key == ref[i-1].key)
317 			continue; /* only unlock items once */
318 		lock_rw_unlock(&ref[i].key->entry.lock);
319 	}
320 	if(h) {
321 		/* LRU touch, with no rrset locks held */
322 		for(i=0; i<count; i++) {
323 			if(i>0 && ref[i].key == ref[i-1].key)
324 				continue; /* only touch items once */
325 			rrset_cache_touch(r, ref[i].key, h[i], ref[i].id);
326 		}
327 	}
328 }
329 
330 void
331 rrset_update_sec_status(struct rrset_cache* r,
332 	struct ub_packed_rrset_key* rrset, time_t now)
333 {
334 	struct packed_rrset_data* updata =
335 		(struct packed_rrset_data*)rrset->entry.data;
336 	struct lruhash_entry* e;
337 	struct packed_rrset_data* cachedata;
338 
339 	/* hash it again to make sure it has a hash */
340 	rrset->entry.hash = rrset_key_hash(&rrset->rk);
341 
342 	e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 1);
343 	if(!e)
344 		return; /* not in the cache anymore */
345 	cachedata = (struct packed_rrset_data*)e->data;
346 	if(!rrsetdata_equal(updata, cachedata)) {
347 		lock_rw_unlock(&e->lock);
348 		return; /* rrset has changed in the meantime */
349 	}
350 	/* update the cached rrset */
351 	if(updata->security > cachedata->security) {
352 		size_t i;
353 		if(updata->trust > cachedata->trust)
354 			cachedata->trust = updata->trust;
355 		cachedata->security = updata->security;
356 		/* for NS records only shorter TTLs, other types: update it */
357 		if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_NS ||
358 			updata->ttl+now < cachedata->ttl ||
359 			cachedata->ttl < now ||
360 			updata->security == sec_status_bogus) {
361 			cachedata->ttl = updata->ttl + now;
362 			for(i=0; i<cachedata->count+cachedata->rrsig_count; i++)
363 				cachedata->rr_ttl[i] = updata->rr_ttl[i]+now;
364 		}
365 	}
366 	lock_rw_unlock(&e->lock);
367 }
368 
369 void
370 rrset_check_sec_status(struct rrset_cache* r,
371 	struct ub_packed_rrset_key* rrset, time_t now)
372 {
373 	struct packed_rrset_data* updata =
374 		(struct packed_rrset_data*)rrset->entry.data;
375 	struct lruhash_entry* e;
376 	struct packed_rrset_data* cachedata;
377 
378 	/* hash it again to make sure it has a hash */
379 	rrset->entry.hash = rrset_key_hash(&rrset->rk);
380 
381 	e = slabhash_lookup(&r->table, rrset->entry.hash, rrset, 0);
382 	if(!e)
383 		return; /* not in the cache anymore */
384 	cachedata = (struct packed_rrset_data*)e->data;
385 	if(now > cachedata->ttl || !rrsetdata_equal(updata, cachedata)) {
386 		lock_rw_unlock(&e->lock);
387 		return; /* expired, or rrset has changed in the meantime */
388 	}
389 	if(cachedata->security > updata->security) {
390 		updata->security = cachedata->security;
391 		if(cachedata->security == sec_status_bogus) {
392 			size_t i;
393 			updata->ttl = cachedata->ttl - now;
394 			for(i=0; i<cachedata->count+cachedata->rrsig_count; i++)
395 				if(cachedata->rr_ttl[i] < now)
396 					updata->rr_ttl[i] = 0;
397 				else updata->rr_ttl[i] =
398 					cachedata->rr_ttl[i]-now;
399 		}
400 		if(cachedata->trust > updata->trust)
401 			updata->trust = cachedata->trust;
402 	}
403 	lock_rw_unlock(&e->lock);
404 }
405 
406 void rrset_cache_remove(struct rrset_cache* r, uint8_t* nm, size_t nmlen,
407 	uint16_t type, uint16_t dclass, uint32_t flags)
408 {
409 	struct ub_packed_rrset_key key;
410 	key.entry.key = &key;
411 	key.rk.dname = nm;
412 	key.rk.dname_len = nmlen;
413 	key.rk.rrset_class = htons(dclass);
414 	key.rk.type = htons(type);
415 	key.rk.flags = flags;
416 	key.entry.hash = rrset_key_hash(&key.rk);
417 	slabhash_remove(&r->table, key.entry.hash, &key);
418 }
419