1 /* 2 Copyright (c) 2003-2013, Troy D. Hanson http://troydhanson.github.com/uthash/ 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 8 * Redistributions of source code must retain the above copyright 9 notice, this list of conditions and the following disclaimer. 10 11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 */ 23 24 #ifndef UTHASH_H 25 #define UTHASH_H 26 27 #include <string.h> /* memcmp,strlen */ 28 #include <stddef.h> /* ptrdiff_t */ 29 #include <stdlib.h> /* exit() */ 30 #include "mum.h" 31 32 /* These macros use decltype or the earlier __typeof GNU extension. 33 As decltype is only available in newer compilers (VS2010 or gcc 4.3+ 34 when compiling c++ source) this code uses whatever method is needed 35 or, for VS2008 where neither is available, uses casting workarounds. */ 36 #ifdef _MSC_VER /* MS compiler */ 37 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ 38 #define DECLTYPE(x) (decltype(x)) 39 #else /* VS2008 or older (or VS2010 in C mode) */ 40 #define NO_DECLTYPE 41 #define DECLTYPE(x) 42 #endif 43 #else /* GNU, Sun and other compilers */ 44 #define DECLTYPE(x) (__typeof(x)) 45 #endif 46 47 #ifdef NO_DECLTYPE 48 #define DECLTYPE_ASSIGN(dst,src) \ 49 do { \ 50 char **_da_dst = (char**)(&(dst)); \ 51 *_da_dst = (char*)(src); \ 52 } while(0) 53 #else 54 #define DECLTYPE_ASSIGN(dst,src) \ 55 do { \ 56 (dst) = DECLTYPE(dst)(src); \ 57 } while(0) 58 #endif 59 60 /* a number of the hash function use uint32_t which isn't defined on win32 */ 61 #ifdef _MSC_VER 62 typedef unsigned int uint32_t; 63 typedef unsigned char uint8_t; 64 #else 65 #include <inttypes.h> /* uint32_t */ 66 #endif 67 68 #define UTHASH_VERSION 1.9.8 69 70 #ifndef uthash_fatal 71 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */ 72 #endif 73 #ifndef uthash_malloc 74 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 75 #endif 76 #ifndef uthash_free 77 #define uthash_free(ptr,sz) free(ptr) /* free fcn */ 78 #endif 79 80 #ifndef uthash_noexpand_fyi 81 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 82 #endif 83 #ifndef uthash_expand_fyi 84 #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 85 #endif 86 87 /* initial number of buckets */ 88 #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */ 89 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */ 90 #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */ 91 92 /* calculate the element whose hash handle address is hhe */ 93 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) 94 95 #define HASH_FIND(hh,head,keyptr,keylen,out) \ 96 do { \ 97 unsigned _hf_bkt,_hf_hashv; \ 98 out=NULL; \ 99 if (head) { \ 100 HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \ 101 if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \ 102 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \ 103 keyptr,keylen,out); \ 104 } \ 105 } \ 106 } while (0) 107 108 #ifdef HASH_BLOOM 109 #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM) 110 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0) 111 #define HASH_BLOOM_MAKE(tbl) \ 112 do { \ 113 (tbl)->bloom_nbits = HASH_BLOOM; \ 114 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ 115 if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \ 116 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ 117 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 118 } while (0) 119 120 #define HASH_BLOOM_FREE(tbl) \ 121 do { \ 122 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 123 } while (0) 124 125 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8))) 126 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8))) 127 128 #define HASH_BLOOM_ADD(tbl,hashv) \ 129 HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 130 131 #define HASH_BLOOM_TEST(tbl,hashv) \ 132 HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 133 134 #else 135 #define HASH_BLOOM_MAKE(tbl) 136 #define HASH_BLOOM_FREE(tbl) 137 #define HASH_BLOOM_ADD(tbl,hashv) 138 #define HASH_BLOOM_TEST(tbl,hashv) (1) 139 #define HASH_BLOOM_BYTELEN 0 140 #endif 141 142 #define HASH_MAKE_TABLE(hh,head) \ 143 do { \ 144 (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \ 145 sizeof(UT_hash_table)); \ 146 if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \ 147 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ 148 (head)->hh.tbl->tail = &((head)->hh); \ 149 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 150 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 151 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ 152 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ 153 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 154 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \ 155 memset((head)->hh.tbl->buckets, 0, \ 156 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 157 HASH_BLOOM_MAKE((head)->hh.tbl); \ 158 (head)->hh.tbl->signature = HASH_SIGNATURE; \ 159 } while(0) 160 161 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ 162 HASH_ADD_KEYPTR(hh,head,&((add)->fieldname),keylen_in,add) 163 164 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ 165 do { \ 166 replaced=NULL; \ 167 HASH_FIND(hh,head,&((add)->fieldname),keylen_in,replaced); \ 168 if (replaced!=NULL) { \ 169 HASH_DELETE(hh,head,replaced); \ 170 }; \ 171 HASH_ADD(hh,head,fieldname,keylen_in,add); \ 172 } while(0) 173 174 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ 175 do { \ 176 unsigned _ha_bkt; \ 177 (add)->hh.next = NULL; \ 178 (add)->hh.key = (const char*)keyptr; \ 179 (add)->hh.keylen = (unsigned)keylen_in; \ 180 if (!(head)) { \ 181 head = (add); \ 182 (head)->hh.prev = NULL; \ 183 HASH_MAKE_TABLE(hh,head); \ 184 } else { \ 185 (head)->hh.tbl->tail->next = (add); \ 186 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 187 (head)->hh.tbl->tail = &((add)->hh); \ 188 } \ 189 (head)->hh.tbl->num_items++; \ 190 (add)->hh.tbl = (head)->hh.tbl; \ 191 HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \ 192 (add)->hh.hashv, _ha_bkt); \ 193 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \ 194 HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \ 195 HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \ 196 HASH_FSCK(hh,head); \ 197 } while(0) 198 199 #define HASH_TO_BKT( hashv, num_bkts, bkt ) \ 200 do { \ 201 bkt = ((hashv) & ((num_bkts) - 1)); \ 202 } while(0) 203 204 /* delete "delptr" from the hash table. 205 * "the usual" patch-up process for the app-order doubly-linked-list. 206 * The use of _hd_hh_del below deserves special explanation. 207 * These used to be expressed using (delptr) but that led to a bug 208 * if someone used the same symbol for the head and deletee, like 209 * HASH_DELETE(hh,users,users); 210 * We want that to work, but by changing the head (users) below 211 * we were forfeiting our ability to further refer to the deletee (users) 212 * in the patch-up process. Solution: use scratch space to 213 * copy the deletee pointer, then the latter references are via that 214 * scratch pointer rather than through the repointed (users) symbol. 215 */ 216 #define HASH_DELETE(hh,head,delptr) \ 217 do { \ 218 unsigned _hd_bkt; \ 219 struct UT_hash_handle *_hd_hh_del; \ 220 if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \ 221 uthash_free((head)->hh.tbl->buckets, \ 222 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 223 HASH_BLOOM_FREE((head)->hh.tbl); \ 224 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 225 head = NULL; \ 226 } else { \ 227 _hd_hh_del = &((delptr)->hh); \ 228 if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \ 229 (head)->hh.tbl->tail = \ 230 (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ 231 (head)->hh.tbl->hho); \ 232 } \ 233 if ((delptr)->hh.prev) { \ 234 ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ 235 (head)->hh.tbl->hho))->next = (delptr)->hh.next; \ 236 } else { \ 237 DECLTYPE_ASSIGN(head,(delptr)->hh.next); \ 238 } \ 239 if (_hd_hh_del->next) { \ 240 ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \ 241 (head)->hh.tbl->hho))->prev = \ 242 _hd_hh_del->prev; \ 243 } \ 244 HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 245 HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 246 (head)->hh.tbl->num_items--; \ 247 } \ 248 HASH_FSCK(hh,head); \ 249 } while (0) 250 251 252 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 253 #define HASH_FIND_STR(head,findstr,out) \ 254 HASH_FIND(hh,head,findstr,strlen(findstr),out) 255 #define HASH_ADD_STR(head,strfield,add) \ 256 HASH_ADD(hh,head,strfield,strlen(add->strfield),add) 257 #define HASH_REPLACE_STR(head,strfield,add,replaced) \ 258 HASH_REPLACE(hh,head,strfield,strlen(add->strfield),add,replaced) 259 #define HASH_FIND_INT(head,findint,out) \ 260 HASH_FIND(hh,head,findint,sizeof(int),out) 261 #define HASH_ADD_INT(head,intfield,add) \ 262 HASH_ADD(hh,head,intfield,sizeof(int),add) 263 #define HASH_REPLACE_INT(head,intfield,add,replaced) \ 264 HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) 265 #define HASH_FIND_PTR(head,findptr,out) \ 266 HASH_FIND(hh,head,findptr,sizeof(void *),out) 267 #define HASH_ADD_PTR(head,ptrfield,add) \ 268 HASH_ADD(hh,head,ptrfield,sizeof(void *),add) 269 #define HASH_REPLACE_PTR(head,ptrfield,add) \ 270 HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) 271 #define HASH_DEL(head,delptr) \ 272 HASH_DELETE(hh,head,delptr) 273 274 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. 275 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. 276 */ 277 #ifdef HASH_DEBUG 278 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) 279 #define HASH_FSCK(hh,head) \ 280 do { \ 281 unsigned _bkt_i; \ 282 unsigned _count, _bkt_count; \ 283 char *_prev; \ 284 struct UT_hash_handle *_thh; \ 285 if (head) { \ 286 _count = 0; \ 287 for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ 288 _bkt_count = 0; \ 289 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 290 _prev = NULL; \ 291 while (_thh) { \ 292 if (_prev != (char*)(_thh->hh_prev)) { \ 293 HASH_OOPS("invalid hh_prev %p, actual %p\n", \ 294 _thh->hh_prev, _prev ); \ 295 } \ 296 _bkt_count++; \ 297 _prev = (char*)(_thh); \ 298 _thh = _thh->hh_next; \ 299 } \ 300 _count += _bkt_count; \ 301 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 302 HASH_OOPS("invalid bucket count %d, actual %d\n", \ 303 (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ 304 } \ 305 } \ 306 if (_count != (head)->hh.tbl->num_items) { \ 307 HASH_OOPS("invalid hh item count %d, actual %d\n", \ 308 (head)->hh.tbl->num_items, _count ); \ 309 } \ 310 /* traverse hh in app order; check next/prev integrity, count */ \ 311 _count = 0; \ 312 _prev = NULL; \ 313 _thh = &(head)->hh; \ 314 while (_thh) { \ 315 _count++; \ 316 if (_prev !=(char*)(_thh->prev)) { \ 317 HASH_OOPS("invalid prev %p, actual %p\n", \ 318 _thh->prev, _prev ); \ 319 } \ 320 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 321 _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \ 322 (head)->hh.tbl->hho) : NULL ); \ 323 } \ 324 if (_count != (head)->hh.tbl->num_items) { \ 325 HASH_OOPS("invalid app item count %d, actual %d\n", \ 326 (head)->hh.tbl->num_items, _count ); \ 327 } \ 328 } \ 329 } while (0) 330 #else 331 #define HASH_FSCK(hh,head) 332 #endif 333 334 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 335 * the descriptor to which this macro is defined for tuning the hash function. 336 * The app can #include <unistd.h> to get the prototype for write(2). */ 337 #ifdef HASH_EMIT_KEYS 338 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ 339 do { \ 340 unsigned _klen = fieldlen; \ 341 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 342 write(HASH_EMIT_KEYS, keyptr, fieldlen); \ 343 } while (0) 344 #else 345 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) 346 #endif 347 348 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ 349 #ifdef HASH_FUNCTION 350 #define HASH_FCN HASH_FUNCTION 351 #else 352 #define HASH_FCN HASH_XX 353 #endif 354 355 #define XX_HASH_PRIME 2654435761U 356 357 #define HASH_XX(key,keylen,num_bkts,hashv,bkt) \ 358 do { \ 359 hashv = mum_hash (key, keylen, XX_HASH_PRIME); \ 360 bkt = (hashv) & (num_bkts-1); \ 361 } while (0) 362 363 364 365 /* key comparison function; return 0 if keys equal */ 366 #define HASH_KEYCMP(a,b,len) memcmp(a,b,len) 367 368 /* iterate over items in a known bucket to find desired item */ 369 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \ 370 do { \ 371 if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \ 372 else out=NULL; \ 373 while (out) { \ 374 if ((out)->hh.keylen == keylen_in) { \ 375 if ((HASH_KEYCMP((out)->hh.key,keyptr,keylen_in)) == 0) break; \ 376 } \ 377 if ((out)->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,(out)->hh.hh_next)); \ 378 else out = NULL; \ 379 } \ 380 } while(0) 381 382 /* add an item to a bucket */ 383 #define HASH_ADD_TO_BKT(head,addhh) \ 384 do { \ 385 head.count++; \ 386 (addhh)->hh_next = head.hh_head; \ 387 (addhh)->hh_prev = NULL; \ 388 if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \ 389 (head).hh_head=addhh; \ 390 if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \ 391 && (addhh)->tbl->noexpand != 1) { \ 392 HASH_EXPAND_BUCKETS((addhh)->tbl); \ 393 } \ 394 } while(0) 395 396 /* remove an item from a given bucket */ 397 #define HASH_DEL_IN_BKT(hh,head,hh_del) \ 398 (head).count--; \ 399 if ((head).hh_head == hh_del) { \ 400 (head).hh_head = hh_del->hh_next; \ 401 } \ 402 if (hh_del->hh_prev) { \ 403 hh_del->hh_prev->hh_next = hh_del->hh_next; \ 404 } \ 405 if (hh_del->hh_next) { \ 406 hh_del->hh_next->hh_prev = hh_del->hh_prev; \ 407 } 408 409 /* Bucket expansion has the effect of doubling the number of buckets 410 * and redistributing the items into the new buckets. Ideally the 411 * items will distribute more or less evenly into the new buckets 412 * (the extent to which this is true is a measure of the quality of 413 * the hash function as it applies to the key domain). 414 * 415 * With the items distributed into more buckets, the chain length 416 * (item count) in each bucket is reduced. Thus by expanding buckets 417 * the hash keeps a bound on the chain length. This bounded chain 418 * length is the essence of how a hash provides constant time lookup. 419 * 420 * The calculation of tbl->ideal_chain_maxlen below deserves some 421 * explanation. First, keep in mind that we're calculating the ideal 422 * maximum chain length based on the *new* (doubled) bucket count. 423 * In fractions this is just n/b (n=number of items,b=new num buckets). 424 * Since the ideal chain length is an integer, we want to calculate 425 * ceil(n/b). We don't depend on floating point arithmetic in this 426 * hash, so to calculate ceil(n/b) with integers we could write 427 * 428 * ceil(n/b) = (n/b) + ((n%b)?1:0) 429 * 430 * and in fact a previous version of this hash did just that. 431 * But now we have improved things a bit by recognizing that b is 432 * always a power of two. We keep its base 2 log handy (call it lb), 433 * so now we can write this with a bit shift and logical AND: 434 * 435 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) 436 * 437 */ 438 #define HASH_EXPAND_BUCKETS(tbl) \ 439 do { \ 440 unsigned _he_bkt; \ 441 unsigned _he_bkt_i; \ 442 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 443 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 444 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ 445 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 446 if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \ 447 memset(_he_new_buckets, 0, \ 448 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 449 tbl->ideal_chain_maxlen = \ 450 (tbl->num_items >> (tbl->log2_num_buckets+1)) + \ 451 ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \ 452 tbl->nonideal_items = 0; \ 453 for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \ 454 { \ 455 _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \ 456 while (_he_thh) { \ 457 _he_hh_nxt = _he_thh->hh_next; \ 458 HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \ 459 _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \ 460 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ 461 tbl->nonideal_items++; \ 462 _he_newbkt->expand_mult = _he_newbkt->count / \ 463 tbl->ideal_chain_maxlen; \ 464 } \ 465 _he_thh->hh_prev = NULL; \ 466 _he_thh->hh_next = _he_newbkt->hh_head; \ 467 if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \ 468 _he_thh; \ 469 _he_newbkt->hh_head = _he_thh; \ 470 _he_thh = _he_hh_nxt; \ 471 } \ 472 } \ 473 uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 474 tbl->num_buckets *= 2; \ 475 tbl->log2_num_buckets++; \ 476 tbl->buckets = _he_new_buckets; \ 477 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \ 478 (tbl->ineff_expands+1) : 0; \ 479 if (tbl->ineff_expands > 1) { \ 480 tbl->noexpand=1; \ 481 uthash_noexpand_fyi(tbl); \ 482 } \ 483 uthash_expand_fyi(tbl); \ 484 } while(0) 485 486 487 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 488 /* Note that HASH_SORT assumes the hash handle name to be hh. 489 * HASH_SRT was added to allow the hash handle name to be passed in. */ 490 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) 491 #define HASH_SRT(hh,head,cmpfcn) \ 492 do { \ 493 unsigned _hs_i; \ 494 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ 495 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 496 if (head) { \ 497 _hs_insize = 1; \ 498 _hs_looping = 1; \ 499 _hs_list = &((head)->hh); \ 500 while (_hs_looping) { \ 501 _hs_p = _hs_list; \ 502 _hs_list = NULL; \ 503 _hs_tail = NULL; \ 504 _hs_nmerges = 0; \ 505 while (_hs_p) { \ 506 _hs_nmerges++; \ 507 _hs_q = _hs_p; \ 508 _hs_psize = 0; \ 509 for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \ 510 _hs_psize++; \ 511 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 512 ((void*)((char*)(_hs_q->next) + \ 513 (head)->hh.tbl->hho)) : NULL); \ 514 if (! (_hs_q) ) break; \ 515 } \ 516 _hs_qsize = _hs_insize; \ 517 while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \ 518 if (_hs_psize == 0) { \ 519 _hs_e = _hs_q; \ 520 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 521 ((void*)((char*)(_hs_q->next) + \ 522 (head)->hh.tbl->hho)) : NULL); \ 523 _hs_qsize--; \ 524 } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \ 525 _hs_e = _hs_p; \ 526 if (_hs_p){ \ 527 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ 528 ((void*)((char*)(_hs_p->next) + \ 529 (head)->hh.tbl->hho)) : NULL); \ 530 } \ 531 _hs_psize--; \ 532 } else if (( \ 533 cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \ 534 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \ 535 ) <= 0) { \ 536 _hs_e = _hs_p; \ 537 if (_hs_p){ \ 538 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ 539 ((void*)((char*)(_hs_p->next) + \ 540 (head)->hh.tbl->hho)) : NULL); \ 541 } \ 542 _hs_psize--; \ 543 } else { \ 544 _hs_e = _hs_q; \ 545 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 546 ((void*)((char*)(_hs_q->next) + \ 547 (head)->hh.tbl->hho)) : NULL); \ 548 _hs_qsize--; \ 549 } \ 550 if ( _hs_tail ) { \ 551 _hs_tail->next = ((_hs_e) ? \ 552 ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \ 553 } else { \ 554 _hs_list = _hs_e; \ 555 } \ 556 if (_hs_e) { \ 557 _hs_e->prev = ((_hs_tail) ? \ 558 ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \ 559 } \ 560 _hs_tail = _hs_e; \ 561 } \ 562 _hs_p = _hs_q; \ 563 } \ 564 if (_hs_tail){ \ 565 _hs_tail->next = NULL; \ 566 } \ 567 if ( _hs_nmerges <= 1 ) { \ 568 _hs_looping=0; \ 569 (head)->hh.tbl->tail = _hs_tail; \ 570 DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ 571 } \ 572 _hs_insize *= 2; \ 573 } \ 574 HASH_FSCK(hh,head); \ 575 } \ 576 } while (0) 577 578 /* This function selects items from one hash into another hash. 579 * The end result is that the selected items have dual presence 580 * in both hashes. There is no copy of the items made; rather 581 * they are added into the new hash through a secondary hash 582 * hash handle that must be present in the structure. */ 583 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 584 do { \ 585 unsigned _src_bkt, _dst_bkt; \ 586 void *_last_elt=NULL, *_elt; \ 587 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ 588 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ 589 if (src) { \ 590 for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ 591 for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 592 _src_hh; \ 593 _src_hh = _src_hh->hh_next) { \ 594 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 595 if (cond(_elt)) { \ 596 _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ 597 _dst_hh->key = _src_hh->key; \ 598 _dst_hh->keylen = _src_hh->keylen; \ 599 _dst_hh->hashv = _src_hh->hashv; \ 600 _dst_hh->prev = _last_elt; \ 601 _dst_hh->next = NULL; \ 602 if (_last_elt_hh) { _last_elt_hh->next = _elt; } \ 603 if (!dst) { \ 604 DECLTYPE_ASSIGN(dst,_elt); \ 605 HASH_MAKE_TABLE(hh_dst,dst); \ 606 } else { \ 607 _dst_hh->tbl = (dst)->hh_dst.tbl; \ 608 } \ 609 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ 610 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \ 611 (dst)->hh_dst.tbl->num_items++; \ 612 _last_elt = _elt; \ 613 _last_elt_hh = _dst_hh; \ 614 } \ 615 } \ 616 } \ 617 } \ 618 HASH_FSCK(hh_dst,dst); \ 619 } while (0) 620 621 #define HASH_CLEAR(hh,head) \ 622 do { \ 623 if (head) { \ 624 uthash_free((head)->hh.tbl->buckets, \ 625 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ 626 HASH_BLOOM_FREE((head)->hh.tbl); \ 627 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 628 (head)=NULL; \ 629 } \ 630 } while(0) 631 632 #define HASH_OVERHEAD(hh,head) \ 633 (size_t)((((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ 634 ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ 635 (sizeof(UT_hash_table)) + \ 636 (HASH_BLOOM_BYTELEN))) 637 638 #ifdef NO_DECLTYPE 639 #define HASH_ITER(hh,head,el,tmp) \ 640 for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \ 641 el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL)) 642 #else 643 #define HASH_ITER(hh,head,el,tmp) \ 644 for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \ 645 el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL)) 646 #endif 647 648 /* obtain a count of items in the hash */ 649 #define HASH_COUNT(head) HASH_CNT(hh,head) 650 #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0) 651 652 typedef struct UT_hash_bucket { 653 struct UT_hash_handle *hh_head; 654 unsigned count; 655 656 /* expand_mult is normally set to 0. In this situation, the max chain length 657 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 658 * the bucket's chain exceeds this length, bucket expansion is triggered). 659 * However, setting expand_mult to a non-zero value delays bucket expansion 660 * (that would be triggered by additions to this particular bucket) 661 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 662 * (The multiplier is simply expand_mult+1). The whole idea of this 663 * multiplier is to reduce bucket expansions, since they are expensive, in 664 * situations where we know that a particular bucket tends to be overused. 665 * It is better to let its chain length grow to a longer yet-still-bounded 666 * value, than to do an O(n) bucket expansion too often. 667 */ 668 unsigned expand_mult; 669 670 } UT_hash_bucket; 671 672 /* random signature used only to find hash tables in external analysis */ 673 #define HASH_SIGNATURE 0xa0111fe1 674 #define HASH_BLOOM_SIGNATURE 0xb12220f2 675 676 typedef struct UT_hash_table { 677 UT_hash_bucket *buckets; 678 unsigned num_buckets, log2_num_buckets; 679 unsigned num_items; 680 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 681 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 682 683 /* in an ideal situation (all buckets used equally), no bucket would have 684 * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 685 unsigned ideal_chain_maxlen; 686 687 /* nonideal_items is the number of items in the hash whose chain position 688 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 689 * hash distribution; reaching them in a chain traversal takes >ideal steps */ 690 unsigned nonideal_items; 691 692 /* ineffective expands occur when a bucket doubling was performed, but 693 * afterward, more than half the items in the hash had nonideal chain 694 * positions. If this happens on two consecutive expansions we inhibit any 695 * further expansion, as it's not helping; this happens when the hash 696 * function isn't a good fit for the key domain. When expansion is inhibited 697 * the hash will still work, albeit no longer in constant time. */ 698 unsigned ineff_expands, noexpand; 699 700 uint32_t signature; /* used only to find hash tables in external analysis */ 701 #ifdef HASH_BLOOM 702 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ 703 uint8_t *bloom_bv; 704 char bloom_nbits; 705 #endif 706 707 } UT_hash_table; 708 709 typedef struct UT_hash_handle { 710 struct UT_hash_table *tbl; 711 void *prev; /* prev element in app order */ 712 void *next; /* next element in app order */ 713 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 714 struct UT_hash_handle *hh_next; /* next hh in bucket order */ 715 const void *key; /* ptr to enclosing struct's key */ 716 unsigned keylen; /* enclosing struct's key len */ 717 unsigned hashv; /* result of hash-fcn(key) */ 718 } UT_hash_handle; 719 720 #endif /* UTHASH_H */ 721