xref: /freebsd/contrib/unbound/util/storage/lruhash.c (revision 98e0ffaefb0f241cda3a72395d3be04192ae0d47)
1 /*
2  * util/storage/lruhash.c - hashtable, hash function, LRU keeping.
3  *
4  * Copyright (c) 2007, NLnet Labs. All rights reserved.
5  *
6  * This software is open source.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * Redistributions of source code must retain the above copyright notice,
13  * this list of conditions and the following disclaimer.
14  *
15  * Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  *
19  * Neither the name of the NLNET LABS nor the names of its contributors may
20  * be used to endorse or promote products derived from this software without
21  * specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /**
37  * \file
38  *
39  * This file contains a hashtable with LRU keeping of entries.
40  *
41  */
42 
43 #include "config.h"
44 #include "util/storage/lruhash.h"
45 #include "util/fptr_wlist.h"
46 
47 void
48 bin_init(struct lruhash_bin* array, size_t size)
49 {
50 	size_t i;
51 #ifdef THREADS_DISABLED
52 	(void)array;
53 #endif
54 	for(i=0; i<size; i++) {
55 		lock_quick_init(&array[i].lock);
56 		lock_protect(&array[i].lock, &array[i],
57 			sizeof(struct lruhash_bin));
58 	}
59 }
60 
61 struct lruhash*
62 lruhash_create(size_t start_size, size_t maxmem, lruhash_sizefunc_t sizefunc,
63 	lruhash_compfunc_t compfunc, lruhash_delkeyfunc_t delkeyfunc,
64 	lruhash_deldatafunc_t deldatafunc, void* arg)
65 {
66 	struct lruhash* table = (struct lruhash*)calloc(1,
67 		sizeof(struct lruhash));
68 	if(!table)
69 		return NULL;
70 	lock_quick_init(&table->lock);
71 	table->sizefunc = sizefunc;
72 	table->compfunc = compfunc;
73 	table->delkeyfunc = delkeyfunc;
74 	table->deldatafunc = deldatafunc;
75 	table->cb_arg = arg;
76 	table->size = start_size;
77 	table->size_mask = (int)(start_size-1);
78 	table->lru_start = NULL;
79 	table->lru_end = NULL;
80 	table->num = 0;
81 	table->space_used = 0;
82 	table->space_max = maxmem;
83 	table->array = calloc(table->size, sizeof(struct lruhash_bin));
84 	if(!table->array) {
85 		lock_quick_destroy(&table->lock);
86 		free(table);
87 		return NULL;
88 	}
89 	bin_init(table->array, table->size);
90 	lock_protect(&table->lock, table, sizeof(*table));
91 	lock_protect(&table->lock, table->array,
92 		table->size*sizeof(struct lruhash_bin));
93 	return table;
94 }
95 
96 void
97 bin_delete(struct lruhash* table, struct lruhash_bin* bin)
98 {
99 	struct lruhash_entry* p, *np;
100 	void *d;
101 	if(!bin)
102 		return;
103 	lock_quick_destroy(&bin->lock);
104 	p = bin->overflow_list;
105 	bin->overflow_list = NULL;
106 	while(p) {
107 		np = p->overflow_next;
108 		d = p->data;
109 		(*table->delkeyfunc)(p->key, table->cb_arg);
110 		(*table->deldatafunc)(d, table->cb_arg);
111 		p = np;
112 	}
113 }
114 
115 void
116 bin_split(struct lruhash* table, struct lruhash_bin* newa,
117 	int newmask)
118 {
119 	size_t i;
120 	struct lruhash_entry *p, *np;
121 	struct lruhash_bin* newbin;
122 	/* move entries to new table. Notice that since hash x is mapped to
123 	 * bin x & mask, and new mask uses one more bit, so all entries in
124 	 * one bin will go into the old bin or bin | newbit */
125 #ifndef THREADS_DISABLED
126 	int newbit = newmask - table->size_mask;
127 #endif
128 	/* so, really, this task could also be threaded, per bin. */
129 	/* LRU list is not changed */
130 	for(i=0; i<table->size; i++)
131 	{
132 		lock_quick_lock(&table->array[i].lock);
133 		p = table->array[i].overflow_list;
134 		/* lock both destination bins */
135 		lock_quick_lock(&newa[i].lock);
136 		lock_quick_lock(&newa[newbit|i].lock);
137 		while(p) {
138 			np = p->overflow_next;
139 			/* link into correct new bin */
140 			newbin = &newa[p->hash & newmask];
141 			p->overflow_next = newbin->overflow_list;
142 			newbin->overflow_list = p;
143 			p=np;
144 		}
145 		lock_quick_unlock(&newa[i].lock);
146 		lock_quick_unlock(&newa[newbit|i].lock);
147 		lock_quick_unlock(&table->array[i].lock);
148 	}
149 }
150 
151 void
152 lruhash_delete(struct lruhash* table)
153 {
154 	size_t i;
155 	if(!table)
156 		return;
157 	/* delete lock on hashtable to force check its OK */
158 	lock_quick_destroy(&table->lock);
159 	for(i=0; i<table->size; i++)
160 		bin_delete(table, &table->array[i]);
161 	free(table->array);
162 	free(table);
163 }
164 
165 void
166 bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
167 {
168 	struct lruhash_entry* p = bin->overflow_list;
169 	struct lruhash_entry** prevp = &bin->overflow_list;
170 	while(p) {
171 		if(p == entry) {
172 			*prevp = p->overflow_next;
173 			return;
174 		}
175 		prevp = &p->overflow_next;
176 		p = p->overflow_next;
177 	}
178 }
179 
180 void
181 reclaim_space(struct lruhash* table, struct lruhash_entry** list)
182 {
183 	struct lruhash_entry* d;
184 	struct lruhash_bin* bin;
185 	log_assert(table);
186 	/* does not delete MRU entry, so table will not be empty. */
187 	while(table->num > 1 && table->space_used > table->space_max) {
188 		/* notice that since we hold the hashtable lock, nobody
189 		   can change the lru chain. So it cannot be deleted underneath
190 		   us. We still need the hashbin and entry write lock to make
191 		   sure we flush all users away from the entry.
192 		   which is unlikely, since it is LRU, if someone got a rdlock
193 		   it would be moved to front, but to be sure. */
194 		d = table->lru_end;
195 		/* specialised, delete from end of double linked list,
196 		   and we know num>1, so there is a previous lru entry. */
197 		log_assert(d && d->lru_prev);
198 		table->lru_end = d->lru_prev;
199 		d->lru_prev->lru_next = NULL;
200 		/* schedule entry for deletion */
201 		bin = &table->array[d->hash & table->size_mask];
202 		table->num --;
203 		lock_quick_lock(&bin->lock);
204 		bin_overflow_remove(bin, d);
205 		d->overflow_next = *list;
206 		*list = d;
207 		lock_rw_wrlock(&d->lock);
208 		table->space_used -= table->sizefunc(d->key, d->data);
209 		if(table->markdelfunc)
210 			(*table->markdelfunc)(d->key);
211 		lock_rw_unlock(&d->lock);
212 		lock_quick_unlock(&bin->lock);
213 	}
214 }
215 
216 struct lruhash_entry*
217 bin_find_entry(struct lruhash* table,
218 	struct lruhash_bin* bin, hashvalue_t hash, void* key)
219 {
220 	struct lruhash_entry* p = bin->overflow_list;
221 	while(p) {
222 		if(p->hash == hash && table->compfunc(p->key, key) == 0)
223 			return p;
224 		p = p->overflow_next;
225 	}
226 	return NULL;
227 }
228 
229 void
230 table_grow(struct lruhash* table)
231 {
232 	struct lruhash_bin* newa;
233 	int newmask;
234 	size_t i;
235 	if(table->size_mask == (int)(((size_t)-1)>>1)) {
236 		log_err("hash array malloc: size_t too small");
237 		return;
238 	}
239 	/* try to allocate new array, if not fail */
240 	newa = calloc(table->size*2, sizeof(struct lruhash_bin));
241 	if(!newa) {
242 		log_err("hash grow: malloc failed");
243 		/* continue with smaller array. Though its slower. */
244 		return;
245 	}
246 	bin_init(newa, table->size*2);
247 	newmask = (table->size_mask << 1) | 1;
248 	bin_split(table, newa, newmask);
249 	/* delete the old bins */
250 	lock_unprotect(&table->lock, table->array);
251 	for(i=0; i<table->size; i++) {
252 		lock_quick_destroy(&table->array[i].lock);
253 	}
254 	free(table->array);
255 
256 	table->size *= 2;
257 	table->size_mask = newmask;
258 	table->array = newa;
259 	lock_protect(&table->lock, table->array,
260 		table->size*sizeof(struct lruhash_bin));
261 	return;
262 }
263 
264 void
265 lru_front(struct lruhash* table, struct lruhash_entry* entry)
266 {
267 	entry->lru_prev = NULL;
268 	entry->lru_next = table->lru_start;
269 	if(!table->lru_start)
270 		table->lru_end = entry;
271 	else	table->lru_start->lru_prev = entry;
272 	table->lru_start = entry;
273 }
274 
275 void
276 lru_remove(struct lruhash* table, struct lruhash_entry* entry)
277 {
278 	if(entry->lru_prev)
279 		entry->lru_prev->lru_next = entry->lru_next;
280 	else	table->lru_start = entry->lru_next;
281 	if(entry->lru_next)
282 		entry->lru_next->lru_prev = entry->lru_prev;
283 	else	table->lru_end = entry->lru_prev;
284 }
285 
286 void
287 lru_touch(struct lruhash* table, struct lruhash_entry* entry)
288 {
289 	log_assert(table && entry);
290 	if(entry == table->lru_start)
291 		return; /* nothing to do */
292 	/* remove from current lru position */
293 	lru_remove(table, entry);
294 	/* add at front */
295 	lru_front(table, entry);
296 }
297 
298 void
299 lruhash_insert(struct lruhash* table, hashvalue_t hash,
300         struct lruhash_entry* entry, void* data, void* cb_arg)
301 {
302 	struct lruhash_bin* bin;
303 	struct lruhash_entry* found, *reclaimlist=NULL;
304 	size_t need_size;
305 	fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
306 	fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
307 	fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
308 	fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
309 	fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
310 	need_size = table->sizefunc(entry->key, data);
311 	if(cb_arg == NULL) cb_arg = table->cb_arg;
312 
313 	/* find bin */
314 	lock_quick_lock(&table->lock);
315 	bin = &table->array[hash & table->size_mask];
316 	lock_quick_lock(&bin->lock);
317 
318 	/* see if entry exists already */
319 	if(!(found=bin_find_entry(table, bin, hash, entry->key))) {
320 		/* if not: add to bin */
321 		entry->overflow_next = bin->overflow_list;
322 		bin->overflow_list = entry;
323 		lru_front(table, entry);
324 		table->num++;
325 		table->space_used += need_size;
326 	} else {
327 		/* if so: update data - needs a writelock */
328 		table->space_used += need_size -
329 			(*table->sizefunc)(found->key, found->data);
330 		(*table->delkeyfunc)(entry->key, cb_arg);
331 		lru_touch(table, found);
332 		lock_rw_wrlock(&found->lock);
333 		(*table->deldatafunc)(found->data, cb_arg);
334 		found->data = data;
335 		lock_rw_unlock(&found->lock);
336 	}
337 	lock_quick_unlock(&bin->lock);
338 	if(table->space_used > table->space_max)
339 		reclaim_space(table, &reclaimlist);
340 	if(table->num >= table->size)
341 		table_grow(table);
342 	lock_quick_unlock(&table->lock);
343 
344 	/* finish reclaim if any (outside of critical region) */
345 	while(reclaimlist) {
346 		struct lruhash_entry* n = reclaimlist->overflow_next;
347 		void* d = reclaimlist->data;
348 		(*table->delkeyfunc)(reclaimlist->key, cb_arg);
349 		(*table->deldatafunc)(d, cb_arg);
350 		reclaimlist = n;
351 	}
352 }
353 
354 struct lruhash_entry*
355 lruhash_lookup(struct lruhash* table, hashvalue_t hash, void* key, int wr)
356 {
357 	struct lruhash_entry* entry;
358 	struct lruhash_bin* bin;
359 	fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
360 
361 	lock_quick_lock(&table->lock);
362 	bin = &table->array[hash & table->size_mask];
363 	lock_quick_lock(&bin->lock);
364 	if((entry=bin_find_entry(table, bin, hash, key)))
365 		lru_touch(table, entry);
366 	lock_quick_unlock(&table->lock);
367 
368 	if(entry) {
369 		if(wr)	{ lock_rw_wrlock(&entry->lock); }
370 		else	{ lock_rw_rdlock(&entry->lock); }
371 	}
372 	lock_quick_unlock(&bin->lock);
373 	return entry;
374 }
375 
376 void
377 lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key)
378 {
379 	struct lruhash_entry* entry;
380 	struct lruhash_bin* bin;
381 	void *d;
382 	fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
383 	fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
384 	fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
385 	fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
386 	fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
387 
388 	lock_quick_lock(&table->lock);
389 	bin = &table->array[hash & table->size_mask];
390 	lock_quick_lock(&bin->lock);
391 	if((entry=bin_find_entry(table, bin, hash, key))) {
392 		bin_overflow_remove(bin, entry);
393 		lru_remove(table, entry);
394 	} else {
395 		lock_quick_unlock(&table->lock);
396 		lock_quick_unlock(&bin->lock);
397 		return;
398 	}
399 	table->num--;
400 	table->space_used -= (*table->sizefunc)(entry->key, entry->data);
401 	lock_quick_unlock(&table->lock);
402 	lock_rw_wrlock(&entry->lock);
403 	if(table->markdelfunc)
404 		(*table->markdelfunc)(entry->key);
405 	lock_rw_unlock(&entry->lock);
406 	lock_quick_unlock(&bin->lock);
407 	/* finish removal */
408 	d = entry->data;
409 	(*table->delkeyfunc)(entry->key, table->cb_arg);
410 	(*table->deldatafunc)(d, table->cb_arg);
411 }
412 
413 /** clear bin, respecting locks, does not do space, LRU */
414 static void
415 bin_clear(struct lruhash* table, struct lruhash_bin* bin)
416 {
417 	struct lruhash_entry* p, *np;
418 	void *d;
419 	lock_quick_lock(&bin->lock);
420 	p = bin->overflow_list;
421 	while(p) {
422 		lock_rw_wrlock(&p->lock);
423 		np = p->overflow_next;
424 		d = p->data;
425 		if(table->markdelfunc)
426 			(*table->markdelfunc)(p->key);
427 		lock_rw_unlock(&p->lock);
428 		(*table->delkeyfunc)(p->key, table->cb_arg);
429 		(*table->deldatafunc)(d, table->cb_arg);
430 		p = np;
431 	}
432 	bin->overflow_list = NULL;
433 	lock_quick_unlock(&bin->lock);
434 }
435 
436 void
437 lruhash_clear(struct lruhash* table)
438 {
439 	size_t i;
440 	if(!table)
441 		return;
442 	fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
443 	fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
444 	fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
445 
446 	lock_quick_lock(&table->lock);
447 	for(i=0; i<table->size; i++) {
448 		bin_clear(table, &table->array[i]);
449 	}
450 	table->lru_start = NULL;
451 	table->lru_end = NULL;
452 	table->num = 0;
453 	table->space_used = 0;
454 	lock_quick_unlock(&table->lock);
455 }
456 
457 void
458 lruhash_status(struct lruhash* table, const char* id, int extended)
459 {
460 	lock_quick_lock(&table->lock);
461 	log_info("%s: %u entries, memory %u / %u",
462 		id, (unsigned)table->num, (unsigned)table->space_used,
463 		(unsigned)table->space_max);
464 	log_info("  itemsize %u, array %u, mask %d",
465 		(unsigned)(table->num? table->space_used/table->num : 0),
466 		(unsigned)table->size, table->size_mask);
467 	if(extended) {
468 		size_t i;
469 		int min=(int)table->size*2, max=-2;
470 		for(i=0; i<table->size; i++) {
471 			int here = 0;
472 			struct lruhash_entry *en;
473 			lock_quick_lock(&table->array[i].lock);
474 			en = table->array[i].overflow_list;
475 			while(en) {
476 				here ++;
477 				en = en->overflow_next;
478 			}
479 			lock_quick_unlock(&table->array[i].lock);
480 			if(extended >= 2)
481 				log_info("bin[%d] %d", (int)i, here);
482 			if(here > max) max = here;
483 			if(here < min) min = here;
484 		}
485 		log_info("  bin min %d, avg %.2lf, max %d", min,
486 			(double)table->num/(double)table->size, max);
487 	}
488 	lock_quick_unlock(&table->lock);
489 }
490 
491 size_t
492 lruhash_get_mem(struct lruhash* table)
493 {
494 	size_t s;
495 	lock_quick_lock(&table->lock);
496 	s = sizeof(struct lruhash) + table->space_used;
497 #ifdef USE_THREAD_DEBUG
498 	if(table->size != 0) {
499 		size_t i;
500 		for(i=0; i<table->size; i++)
501 			s += sizeof(struct lruhash_bin) +
502 				lock_get_mem(&table->array[i].lock);
503 	}
504 #else /* no THREAD_DEBUG */
505 	if(table->size != 0)
506 		s += (table->size)*(sizeof(struct lruhash_bin) +
507 			lock_get_mem(&table->array[0].lock));
508 #endif
509 	lock_quick_unlock(&table->lock);
510 	s += lock_get_mem(&table->lock);
511 	return s;
512 }
513 
514 void
515 lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_t md)
516 {
517 	lock_quick_lock(&table->lock);
518 	table->markdelfunc = md;
519 	lock_quick_unlock(&table->lock);
520 }
521 
522 void
523 lruhash_traverse(struct lruhash* h, int wr,
524 	void (*func)(struct lruhash_entry*, void*), void* arg)
525 {
526 	size_t i;
527 	struct lruhash_entry* e;
528 
529 	lock_quick_lock(&h->lock);
530 	for(i=0; i<h->size; i++) {
531 		lock_quick_lock(&h->array[i].lock);
532 		for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
533 			if(wr) {
534 				lock_rw_wrlock(&e->lock);
535 			} else {
536 				lock_rw_rdlock(&e->lock);
537 			}
538 			(*func)(e, arg);
539 			lock_rw_unlock(&e->lock);
540 		}
541 		lock_quick_unlock(&h->array[i].lock);
542 	}
543 	lock_quick_unlock(&h->lock);
544 }
545