libcoap  4.3.0beta
uthash.h
Go to the documentation of this file.
1 /*
2 Copyright (c) 2003-2017, Troy D. Hanson http://troydhanson.github.com/uthash/
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
27 #define UTHASH_VERSION 2.0.2
28 
29 #include <string.h> /* memcmp, memset, strlen */
30 #include <stddef.h> /* ptrdiff_t */
31 #include <stdlib.h> /* exit */
32 
33 /* These macros use decltype or the earlier __typeof GNU extension.
34  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
35  when compiling c++ source) this code uses whatever method is needed
36  or, for VS2008 where neither is available, uses casting workarounds. */
37 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE)
38 #if defined(_MSC_VER) /* MS compiler */
39 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
40 #define DECLTYPE(x) (decltype(x))
41 #else /* VS2008 or older (or VS2010 in C mode) */
42 #define NO_DECLTYPE
43 #endif
44 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
45 #define NO_DECLTYPE
46 #else /* GNU, Sun and other compilers */
47 #define DECLTYPE(x) (__typeof(x))
48 #endif
49 #endif
50 
51 #ifdef NO_DECLTYPE
52 #define DECLTYPE(x)
53 #define DECLTYPE_ASSIGN(dst,src) \
54 do { \
55  char **_da_dst = (char**)(&(dst)); \
56  *_da_dst = (char*)(src); \
57 } while (0)
58 #else
59 #define DECLTYPE_ASSIGN(dst,src) \
60 do { \
61  (dst) = DECLTYPE(dst)(src); \
62 } while (0)
63 #endif
64 
65 /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
66 #if defined(_WIN32)
67 #if defined(_MSC_VER) && _MSC_VER >= 1600
68 #include <stdint.h>
69 #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
70 #include <stdint.h>
71 #else
72 typedef unsigned int uint32_t;
73 typedef unsigned char uint8_t;
74 #endif
75 #elif defined(__GNUC__) && !defined(__VXWORKS__)
76 #include <stdint.h>
77 #else
78 typedef unsigned int uint32_t;
79 typedef unsigned char uint8_t;
80 #endif
81 
82 #ifndef uthash_fatal
83 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
84 #endif
85 #ifndef uthash_malloc
86 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
87 #endif
88 #ifndef uthash_free
89 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
90 #endif
91 #ifndef uthash_bzero
92 #define uthash_bzero(a,n) memset(a,'\0',n)
93 #endif
94 #ifndef uthash_memcmp
95 #define uthash_memcmp(a,b,n) memcmp(a,b,n)
96 #endif
97 #ifndef uthash_strlen
98 #define uthash_strlen(s) strlen(s)
99 #endif
100 
101 #ifndef uthash_noexpand_fyi
102 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
103 #endif
104 #ifndef uthash_expand_fyi
105 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
106 #endif
107 
108 /* initial number of buckets */
109 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
110 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
111 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
112 
113 /* calculate the element whose hash handle address is hhp */
114 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
115 /* calculate the hash handle from element address elp */
116 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho)))
117 
118 #define HASH_VALUE(keyptr,keylen,hashv) \
119 do { \
120  HASH_FCN(keyptr, keylen, hashv); \
121 } while (0)
122 
123 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \
124 do { \
125  (out) = NULL; \
126  if (head) { \
127  unsigned _hf_bkt; \
128  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
129  if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
130  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
131  } \
132  } \
133 } while (0)
134 
135 #define HASH_FIND(hh,head,keyptr,keylen,out) \
136 do { \
137  unsigned _hf_hashv; \
138  HASH_VALUE(keyptr, keylen, _hf_hashv); \
139  HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \
140 } while (0)
141 
142 #ifdef HASH_BLOOM
143 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
144 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
145 #define HASH_BLOOM_MAKE(tbl) \
146 do { \
147  (tbl)->bloom_nbits = HASH_BLOOM; \
148  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
149  if (!(tbl)->bloom_bv) { \
150  uthash_fatal("out of memory"); \
151  } \
152  uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
153  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
154 } while (0)
155 
156 #define HASH_BLOOM_FREE(tbl) \
157 do { \
158  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
159 } while (0)
160 
161 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
162 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
163 
164 #define HASH_BLOOM_ADD(tbl,hashv) \
165  HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
166 
167 #define HASH_BLOOM_TEST(tbl,hashv) \
168  HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
169 
170 #else
171 #define HASH_BLOOM_MAKE(tbl)
172 #define HASH_BLOOM_FREE(tbl)
173 #define HASH_BLOOM_ADD(tbl,hashv)
174 #define HASH_BLOOM_TEST(tbl,hashv) (1)
175 #define HASH_BLOOM_BYTELEN 0U
176 #endif
177 
178 #define HASH_MAKE_TABLE(hh,head) \
179 do { \
180  (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \
181  if (!(head)->hh.tbl) { \
182  uthash_fatal("out of memory"); \
183  } \
184  uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \
185  (head)->hh.tbl->tail = &((head)->hh); \
186  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
187  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
188  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
189  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
190  HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
191  if (!(head)->hh.tbl->buckets) { \
192  uthash_fatal("out of memory"); \
193  } \
194  uthash_bzero((head)->hh.tbl->buckets, \
195  HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
196  HASH_BLOOM_MAKE((head)->hh.tbl); \
197  (head)->hh.tbl->signature = HASH_SIGNATURE; \
198 } while (0)
199 
200 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
201 do { \
202  (replaced) = NULL; \
203  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
204  if (replaced) { \
205  HASH_DELETE(hh, head, replaced); \
206  } \
207  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
208 } while (0)
209 
210 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
211 do { \
212  (replaced) = NULL; \
213  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
214  if (replaced) { \
215  HASH_DELETE(hh, head, replaced); \
216  } \
217  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
218 } while (0)
219 
220 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
221 do { \
222  unsigned _hr_hashv; \
223  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
224  HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
225 } while (0)
226 
227 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \
228 do { \
229  unsigned _hr_hashv; \
230  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
231  HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
232 } while (0)
233 
234 #define HASH_APPEND_LIST(hh, head, add) \
235 do { \
236  (add)->hh.next = NULL; \
237  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
238  (head)->hh.tbl->tail->next = (add); \
239  (head)->hh.tbl->tail = &((add)->hh); \
240 } while (0)
241 
242 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
243 do { \
244  do { \
245  if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \
246  break; \
247  } \
248  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
249 } while (0)
250 
251 #ifdef NO_DECLTYPE
252 #undef HASH_AKBI_INNER_LOOP
253 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
254 do { \
255  char *_hs_saved_head = (char*)(head); \
256  do { \
257  DECLTYPE_ASSIGN(head, _hs_iter); \
258  if (cmpfcn(head, add) > 0) { \
259  DECLTYPE_ASSIGN(head, _hs_saved_head); \
260  break; \
261  } \
262  DECLTYPE_ASSIGN(head, _hs_saved_head); \
263  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
264 } while (0)
265 #endif
266 
267 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
268 do { \
269  unsigned _ha_bkt; \
270  (add)->hh.hashv = (hashval); \
271  (add)->hh.key = (char*) (keyptr); \
272  (add)->hh.keylen = (unsigned) (keylen_in); \
273  if (!(head)) { \
274  (add)->hh.next = NULL; \
275  (add)->hh.prev = NULL; \
276  (head) = (add); \
277  HASH_MAKE_TABLE(hh, head); \
278  } else { \
279  void *_hs_iter = (head); \
280  (add)->hh.tbl = (head)->hh.tbl; \
281  HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \
282  if (_hs_iter) { \
283  (add)->hh.next = _hs_iter; \
284  if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \
285  HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \
286  } else { \
287  (head) = (add); \
288  } \
289  HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \
290  } else { \
291  HASH_APPEND_LIST(hh, head, add); \
292  } \
293  } \
294  (head)->hh.tbl->num_items++; \
295  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
296  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
297  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
298  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
299  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \
300 } while (0)
301 
302 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \
303 do { \
304  unsigned _hs_hashv; \
305  HASH_VALUE(keyptr, keylen_in, _hs_hashv); \
306  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
307 } while (0)
308 
309 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
310  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
311 
312 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \
313  HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
314 
315 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \
316 do { \
317  unsigned _ha_bkt; \
318  (add)->hh.hashv = (hashval); \
319  (add)->hh.key = (const void *) (keyptr); \
320  (add)->hh.keylen = (unsigned) (keylen_in); \
321  if (!(head)) { \
322  (add)->hh.next = NULL; \
323  (add)->hh.prev = NULL; \
324  (head) = (add); \
325  HASH_MAKE_TABLE(hh, head); \
326  } else { \
327  (add)->hh.tbl = (head)->hh.tbl; \
328  HASH_APPEND_LIST(hh, head, add); \
329  } \
330  (head)->hh.tbl->num_items++; \
331  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
332  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
333  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
334  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
335  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \
336 } while (0)
337 
338 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
339 do { \
340  unsigned _ha_hashv; \
341  HASH_VALUE(keyptr, keylen_in, _ha_hashv); \
342  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \
343 } while (0)
344 
345 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \
346  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
347 
348 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
349  HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
350 
351 #define HASH_TO_BKT(hashv,num_bkts,bkt) \
352 do { \
353  bkt = ((hashv) & ((num_bkts) - 1U)); \
354 } while (0)
355 
356 /* delete "delptr" from the hash table.
357  * "the usual" patch-up process for the app-order doubly-linked-list.
358  * The use of _hd_hh_del below deserves special explanation.
359  * These used to be expressed using (delptr) but that led to a bug
360  * if someone used the same symbol for the head and deletee, like
361  * HASH_DELETE(hh,users,users);
362  * We want that to work, but by changing the head (users) below
363  * we were forfeiting our ability to further refer to the deletee (users)
364  * in the patch-up process. Solution: use scratch space to
365  * copy the deletee pointer, then the latter references are via that
366  * scratch pointer rather than through the repointed (users) symbol.
367  */
368 #define HASH_DELETE(hh,head,delptr) \
369  HASH_DELETE_HH(hh, head, &(delptr)->hh)
370 
371 #define HASH_DELETE_HH(hh,head,delptrhh) \
372 do { \
373  struct UT_hash_handle *_hd_hh_del = (delptrhh); \
374  if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \
375  HASH_BLOOM_FREE((head)->hh.tbl); \
376  uthash_free((head)->hh.tbl->buckets, \
377  (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
378  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
379  (head) = NULL; \
380  } else { \
381  unsigned _hd_bkt; \
382  if (_hd_hh_del == (head)->hh.tbl->tail) { \
383  (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \
384  } \
385  if (_hd_hh_del->prev != NULL) { \
386  HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \
387  } else { \
388  DECLTYPE_ASSIGN(head, _hd_hh_del->next); \
389  } \
390  if (_hd_hh_del->next != NULL) { \
391  HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \
392  } \
393  HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
394  HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
395  (head)->hh.tbl->num_items--; \
396  } \
397  HASH_FSCK(hh, head, "HASH_DELETE"); \
398 } while (0)
399 
400 
401 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
402 #define HASH_FIND_STR(head,findstr,out) \
403  HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out)
404 #define HASH_ADD_STR(head,strfield,add) \
405  HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add)
406 #define HASH_REPLACE_STR(head,strfield,add,replaced) \
407  HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced)
408 #define HASH_FIND_INT(head,findint,out) \
409  HASH_FIND(hh,head,findint,sizeof(int),out)
410 #define HASH_ADD_INT(head,intfield,add) \
411  HASH_ADD(hh,head,intfield,sizeof(int),add)
412 #define HASH_REPLACE_INT(head,intfield,add,replaced) \
413  HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
414 #define HASH_FIND_PTR(head,findptr,out) \
415  HASH_FIND(hh,head,findptr,sizeof(void *),out)
416 #define HASH_ADD_PTR(head,ptrfield,add) \
417  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
418 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
419  HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
420 #define HASH_DEL(head,delptr) \
421  HASH_DELETE(hh,head,delptr)
422 
423 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
424  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
425  */
426 #ifdef HASH_DEBUG
427 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
428 #define HASH_FSCK(hh,head,where) \
429 do { \
430  struct UT_hash_handle *_thh; \
431  if (head) { \
432  unsigned _bkt_i; \
433  unsigned _count = 0; \
434  char *_prev; \
435  for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \
436  unsigned _bkt_count = 0; \
437  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
438  _prev = NULL; \
439  while (_thh) { \
440  if (_prev != (char*)(_thh->hh_prev)) { \
441  HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \
442  (where), (void*)_thh->hh_prev, (void*)_prev); \
443  } \
444  _bkt_count++; \
445  _prev = (char*)(_thh); \
446  _thh = _thh->hh_next; \
447  } \
448  _count += _bkt_count; \
449  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
450  HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \
451  (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
452  } \
453  } \
454  if (_count != (head)->hh.tbl->num_items) { \
455  HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \
456  (where), (head)->hh.tbl->num_items, _count); \
457  } \
458  _count = 0; \
459  _prev = NULL; \
460  _thh = &(head)->hh; \
461  while (_thh) { \
462  _count++; \
463  if (_prev != (char*)_thh->prev) { \
464  HASH_OOPS("%s: invalid prev %p, actual %p\n", \
465  (where), (void*)_thh->prev, (void*)_prev); \
466  } \
467  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
468  _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \
469  } \
470  if (_count != (head)->hh.tbl->num_items) { \
471  HASH_OOPS("%s: invalid app item count %u, actual %u\n", \
472  (where), (head)->hh.tbl->num_items, _count); \
473  } \
474  } \
475 } while (0)
476 #else
477 #define HASH_FSCK(hh,head,where)
478 #endif
479 
480 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
481  * the descriptor to which this macro is defined for tuning the hash function.
482  * The app can #include <unistd.h> to get the prototype for write(2). */
483 #ifdef HASH_EMIT_KEYS
484 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
485 do { \
486  unsigned _klen = fieldlen; \
487  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
488  write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
489 } while (0)
490 #else
491 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
492 #endif
493 
494 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
495 #ifdef HASH_FUNCTION
496 #define HASH_FCN HASH_FUNCTION
497 #else
498 #define HASH_FCN HASH_JEN
499 #endif
500 
501 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
502 #define HASH_BER(key,keylen,hashv) \
503 do { \
504  unsigned _hb_keylen = (unsigned)keylen; \
505  const unsigned char *_hb_key = (const unsigned char*)(key); \
506  (hashv) = 0; \
507  while (_hb_keylen-- != 0U) { \
508  (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
509  } \
510 } while (0)
511 
512 
513 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
514  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
515 #define HASH_SAX(key,keylen,hashv) \
516 do { \
517  unsigned _sx_i; \
518  const unsigned char *_hs_key = (const unsigned char*)(key); \
519  hashv = 0; \
520  for (_sx_i=0; _sx_i < keylen; _sx_i++) { \
521  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
522  } \
523 } while (0)
524 /* FNV-1a variation */
525 #define HASH_FNV(key,keylen,hashv) \
526 do { \
527  unsigned _fn_i; \
528  const unsigned char *_hf_key = (const unsigned char*)(key); \
529  (hashv) = 2166136261U; \
530  for (_fn_i=0; _fn_i < keylen; _fn_i++) { \
531  hashv = hashv ^ _hf_key[_fn_i]; \
532  hashv = hashv * 16777619U; \
533  } \
534 } while (0)
535 
536 #define HASH_OAT(key,keylen,hashv) \
537 do { \
538  unsigned _ho_i; \
539  const unsigned char *_ho_key=(const unsigned char*)(key); \
540  hashv = 0; \
541  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
542  hashv += _ho_key[_ho_i]; \
543  hashv += (hashv << 10); \
544  hashv ^= (hashv >> 6); \
545  } \
546  hashv += (hashv << 3); \
547  hashv ^= (hashv >> 11); \
548  hashv += (hashv << 15); \
549 } while (0)
550 
551 #define HASH_JEN_MIX(a,b,c) \
552 do { \
553  a -= b; a -= c; a ^= ( c >> 13 ); \
554  b -= c; b -= a; b ^= ( a << 8 ); \
555  c -= a; c -= b; c ^= ( b >> 13 ); \
556  a -= b; a -= c; a ^= ( c >> 12 ); \
557  b -= c; b -= a; b ^= ( a << 16 ); \
558  c -= a; c -= b; c ^= ( b >> 5 ); \
559  a -= b; a -= c; a ^= ( c >> 3 ); \
560  b -= c; b -= a; b ^= ( a << 10 ); \
561  c -= a; c -= b; c ^= ( b >> 15 ); \
562 } while (0)
563 
564 #define HASH_JEN(key,keylen,hashv) \
565 do { \
566  unsigned _hj_i,_hj_j,_hj_k; \
567  unsigned const char *_hj_key=(unsigned const char*)(key); \
568  hashv = 0xfeedbeefu; \
569  _hj_i = _hj_j = 0x9e3779b9u; \
570  _hj_k = (unsigned)(keylen); \
571  while (_hj_k >= 12U) { \
572  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
573  + ( (unsigned)_hj_key[2] << 16 ) \
574  + ( (unsigned)_hj_key[3] << 24 ) ); \
575  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
576  + ( (unsigned)_hj_key[6] << 16 ) \
577  + ( (unsigned)_hj_key[7] << 24 ) ); \
578  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
579  + ( (unsigned)_hj_key[10] << 16 ) \
580  + ( (unsigned)_hj_key[11] << 24 ) ); \
581  \
582  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
583  \
584  _hj_key += 12; \
585  _hj_k -= 12U; \
586  } \
587  hashv += (unsigned)(keylen); \
588  switch ( _hj_k ) { \
589  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
590  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
591  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
592  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
593  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
594  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
595  case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
596  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
597  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
598  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
599  case 1: _hj_i += _hj_key[0]; \
600  default: ; /* does not happen */ \
601  } \
602  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
603 } while (0)
604 
605 /* The Paul Hsieh hash function */
606 #undef get16bits
607 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
608  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
609 #define get16bits(d) (*((const uint16_t *) (d)))
610 #endif
611 
612 #if !defined (get16bits)
613 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
614  +(uint32_t)(((const uint8_t *)(d))[0]) )
615 #endif
616 #define HASH_SFH(key,keylen,hashv) \
617 do { \
618  unsigned const char *_sfh_key=(unsigned const char*)(key); \
619  uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
620  \
621  unsigned _sfh_rem = _sfh_len & 3U; \
622  _sfh_len >>= 2; \
623  hashv = 0xcafebabeu; \
624  \
625  /* Main loop */ \
626  for (;_sfh_len > 0U; _sfh_len--) { \
627  hashv += get16bits (_sfh_key); \
628  _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
629  hashv = (hashv << 16) ^ _sfh_tmp; \
630  _sfh_key += 2U*sizeof (uint16_t); \
631  hashv += hashv >> 11; \
632  } \
633  \
634  /* Handle end cases */ \
635  switch (_sfh_rem) { \
636  case 3: hashv += get16bits (_sfh_key); \
637  hashv ^= hashv << 16; \
638  hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
639  hashv += hashv >> 11; \
640  break; \
641  case 2: hashv += get16bits (_sfh_key); \
642  hashv ^= hashv << 11; \
643  hashv += hashv >> 17; \
644  break; \
645  case 1: hashv += *_sfh_key; \
646  hashv ^= hashv << 10; \
647  hashv += hashv >> 1; \
648  } \
649  \
650  /* Force "avalanching" of final 127 bits */ \
651  hashv ^= hashv << 3; \
652  hashv += hashv >> 5; \
653  hashv ^= hashv << 4; \
654  hashv += hashv >> 17; \
655  hashv ^= hashv << 25; \
656  hashv += hashv >> 6; \
657 } while (0)
658 
659 #ifdef HASH_USING_NO_STRICT_ALIASING
660 /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
661  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
662  * MurmurHash uses the faster approach only on CPU's where we know it's safe.
663  *
664  * Note the preprocessor built-in defines can be emitted using:
665  *
666  * gcc -m64 -dM -E - < /dev/null (on gcc)
667  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
668  */
669 #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
670 #define MUR_GETBLOCK(p,i) p[i]
671 #else /* non intel */
672 #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
673 #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
674 #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
675 #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
676 #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
677 #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
678 #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
679 #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
680 #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
681 #else /* assume little endian non-intel */
682 #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
683 #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
684 #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
685 #endif
686 #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
687  (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
688  (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
689  MUR_ONE_THREE(p))))
690 #endif
691 #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
692 #define MUR_FMIX(_h) \
693 do { \
694  _h ^= _h >> 16; \
695  _h *= 0x85ebca6bu; \
696  _h ^= _h >> 13; \
697  _h *= 0xc2b2ae35u; \
698  _h ^= _h >> 16; \
699 } while (0)
700 
701 #define HASH_MUR(key,keylen,hashv) \
702 do { \
703  const uint8_t *_mur_data = (const uint8_t*)(key); \
704  const int _mur_nblocks = (int)(keylen) / 4; \
705  uint32_t _mur_h1 = 0xf88D5353u; \
706  uint32_t _mur_c1 = 0xcc9e2d51u; \
707  uint32_t _mur_c2 = 0x1b873593u; \
708  uint32_t _mur_k1 = 0; \
709  const uint8_t *_mur_tail; \
710  const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
711  int _mur_i; \
712  for (_mur_i = -_mur_nblocks; _mur_i != 0; _mur_i++) { \
713  _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
714  _mur_k1 *= _mur_c1; \
715  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
716  _mur_k1 *= _mur_c2; \
717  \
718  _mur_h1 ^= _mur_k1; \
719  _mur_h1 = MUR_ROTL32(_mur_h1,13); \
720  _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
721  } \
722  _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
723  _mur_k1=0; \
724  switch ((keylen) & 3U) { \
725  case 0: break; \
726  case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
727  case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
728  case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
729  _mur_k1 *= _mur_c1; \
730  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
731  _mur_k1 *= _mur_c2; \
732  _mur_h1 ^= _mur_k1; \
733  } \
734  _mur_h1 ^= (uint32_t)(keylen); \
735  MUR_FMIX(_mur_h1); \
736  hashv = _mur_h1; \
737 } while (0)
738 #endif /* HASH_USING_NO_STRICT_ALIASING */
739 
740 /* iterate over items in a known bucket to find desired item */
741 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \
742 do { \
743  if ((head).hh_head != NULL) { \
744  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \
745  } else { \
746  (out) = NULL; \
747  } \
748  while ((out) != NULL) { \
749  if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \
750  if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \
751  break; \
752  } \
753  } \
754  if ((out)->hh.hh_next != NULL) { \
755  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \
756  } else { \
757  (out) = NULL; \
758  } \
759  } \
760 } while (0)
761 
762 /* add an item to a bucket */
763 #define HASH_ADD_TO_BKT(head,addhh) \
764 do { \
765  UT_hash_bucket *_ha_head = &(head); \
766  _ha_head->count++; \
767  (addhh)->hh_next = _ha_head->hh_head; \
768  (addhh)->hh_prev = NULL; \
769  if (_ha_head->hh_head != NULL) { \
770  _ha_head->hh_head->hh_prev = (addhh); \
771  } \
772  _ha_head->hh_head = (addhh); \
773  if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \
774  && !(addhh)->tbl->noexpand) { \
775  HASH_EXPAND_BUCKETS((addhh)->tbl); \
776  } \
777 } while (0)
778 
779 /* remove an item from a given bucket */
780 #define HASH_DEL_IN_BKT(head,delhh) \
781 do { \
782  UT_hash_bucket *_hd_head = &(head); \
783  _hd_head->count--; \
784  if (_hd_head->hh_head == (delhh)) { \
785  _hd_head->hh_head = (delhh)->hh_next; \
786  } \
787  if ((delhh)->hh_prev) { \
788  (delhh)->hh_prev->hh_next = (delhh)->hh_next; \
789  } \
790  if ((delhh)->hh_next) { \
791  (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \
792  } \
793 } while (0)
794 
795 /* Bucket expansion has the effect of doubling the number of buckets
796  * and redistributing the items into the new buckets. Ideally the
797  * items will distribute more or less evenly into the new buckets
798  * (the extent to which this is true is a measure of the quality of
799  * the hash function as it applies to the key domain).
800  *
801  * With the items distributed into more buckets, the chain length
802  * (item count) in each bucket is reduced. Thus by expanding buckets
803  * the hash keeps a bound on the chain length. This bounded chain
804  * length is the essence of how a hash provides constant time lookup.
805  *
806  * The calculation of tbl->ideal_chain_maxlen below deserves some
807  * explanation. First, keep in mind that we're calculating the ideal
808  * maximum chain length based on the *new* (doubled) bucket count.
809  * In fractions this is just n/b (n=number of items,b=new num buckets).
810  * Since the ideal chain length is an integer, we want to calculate
811  * ceil(n/b). We don't depend on floating point arithmetic in this
812  * hash, so to calculate ceil(n/b) with integers we could write
813  *
814  * ceil(n/b) = (n/b) + ((n%b)?1:0)
815  *
816  * and in fact a previous version of this hash did just that.
817  * But now we have improved things a bit by recognizing that b is
818  * always a power of two. We keep its base 2 log handy (call it lb),
819  * so now we can write this with a bit shift and logical AND:
820  *
821  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
822  *
823  */
824 #define HASH_EXPAND_BUCKETS(tbl) \
825 do { \
826  unsigned _he_bkt; \
827  unsigned _he_bkt_i; \
828  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
829  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
830  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
831  2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
832  if (!_he_new_buckets) { \
833  uthash_fatal("out of memory"); \
834  } \
835  uthash_bzero(_he_new_buckets, \
836  2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
837  (tbl)->ideal_chain_maxlen = \
838  ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \
839  ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
840  (tbl)->nonideal_items = 0; \
841  for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \
842  _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \
843  while (_he_thh != NULL) { \
844  _he_hh_nxt = _he_thh->hh_next; \
845  HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \
846  _he_newbkt = &(_he_new_buckets[_he_bkt]); \
847  if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \
848  (tbl)->nonideal_items++; \
849  _he_newbkt->expand_mult = _he_newbkt->count / (tbl)->ideal_chain_maxlen; \
850  } \
851  _he_thh->hh_prev = NULL; \
852  _he_thh->hh_next = _he_newbkt->hh_head; \
853  if (_he_newbkt->hh_head != NULL) { \
854  _he_newbkt->hh_head->hh_prev = _he_thh; \
855  } \
856  _he_newbkt->hh_head = _he_thh; \
857  _he_thh = _he_hh_nxt; \
858  } \
859  } \
860  uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
861  (tbl)->num_buckets *= 2U; \
862  (tbl)->log2_num_buckets++; \
863  (tbl)->buckets = _he_new_buckets; \
864  (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \
865  ((tbl)->ineff_expands+1U) : 0U; \
866  if ((tbl)->ineff_expands > 1U) { \
867  (tbl)->noexpand = 1; \
868  uthash_noexpand_fyi(tbl); \
869  } \
870  uthash_expand_fyi(tbl); \
871 } while (0)
872 
873 
874 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
875 /* Note that HASH_SORT assumes the hash handle name to be hh.
876  * HASH_SRT was added to allow the hash handle name to be passed in. */
877 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
878 #define HASH_SRT(hh,head,cmpfcn) \
879 do { \
880  unsigned _hs_i; \
881  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
882  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
883  if (head != NULL) { \
884  _hs_insize = 1; \
885  _hs_looping = 1; \
886  _hs_list = &((head)->hh); \
887  while (_hs_looping != 0U) { \
888  _hs_p = _hs_list; \
889  _hs_list = NULL; \
890  _hs_tail = NULL; \
891  _hs_nmerges = 0; \
892  while (_hs_p != NULL) { \
893  _hs_nmerges++; \
894  _hs_q = _hs_p; \
895  _hs_psize = 0; \
896  for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \
897  _hs_psize++; \
898  _hs_q = ((_hs_q->next != NULL) ? \
899  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
900  if (_hs_q == NULL) { \
901  break; \
902  } \
903  } \
904  _hs_qsize = _hs_insize; \
905  while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \
906  if (_hs_psize == 0U) { \
907  _hs_e = _hs_q; \
908  _hs_q = ((_hs_q->next != NULL) ? \
909  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
910  _hs_qsize--; \
911  } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \
912  _hs_e = _hs_p; \
913  if (_hs_p != NULL) { \
914  _hs_p = ((_hs_p->next != NULL) ? \
915  HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
916  } \
917  _hs_psize--; \
918  } else if ((cmpfcn( \
919  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \
920  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \
921  )) <= 0) { \
922  _hs_e = _hs_p; \
923  if (_hs_p != NULL) { \
924  _hs_p = ((_hs_p->next != NULL) ? \
925  HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
926  } \
927  _hs_psize--; \
928  } else { \
929  _hs_e = _hs_q; \
930  _hs_q = ((_hs_q->next != NULL) ? \
931  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
932  _hs_qsize--; \
933  } \
934  if ( _hs_tail != NULL ) { \
935  _hs_tail->next = ((_hs_e != NULL) ? \
936  ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \
937  } else { \
938  _hs_list = _hs_e; \
939  } \
940  if (_hs_e != NULL) { \
941  _hs_e->prev = ((_hs_tail != NULL) ? \
942  ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \
943  } \
944  _hs_tail = _hs_e; \
945  } \
946  _hs_p = _hs_q; \
947  } \
948  if (_hs_tail != NULL) { \
949  _hs_tail->next = NULL; \
950  } \
951  if (_hs_nmerges <= 1U) { \
952  _hs_looping = 0; \
953  (head)->hh.tbl->tail = _hs_tail; \
954  DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
955  } \
956  _hs_insize *= 2U; \
957  } \
958  HASH_FSCK(hh, head, "HASH_SRT"); \
959  } \
960 } while (0)
961 
962 /* This function selects items from one hash into another hash.
963  * The end result is that the selected items have dual presence
964  * in both hashes. There is no copy of the items made; rather
965  * they are added into the new hash through a secondary hash
966  * hash handle that must be present in the structure. */
967 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
968 do { \
969  unsigned _src_bkt, _dst_bkt; \
970  void *_last_elt = NULL, *_elt; \
971  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
972  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
973  if ((src) != NULL) { \
974  for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
975  for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
976  _src_hh != NULL; \
977  _src_hh = _src_hh->hh_next) { \
978  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
979  if (cond(_elt)) { \
980  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
981  _dst_hh->key = _src_hh->key; \
982  _dst_hh->keylen = _src_hh->keylen; \
983  _dst_hh->hashv = _src_hh->hashv; \
984  _dst_hh->prev = _last_elt; \
985  _dst_hh->next = NULL; \
986  if (_last_elt_hh != NULL) { \
987  _last_elt_hh->next = _elt; \
988  } \
989  if ((dst) == NULL) { \
990  DECLTYPE_ASSIGN(dst, _elt); \
991  HASH_MAKE_TABLE(hh_dst, dst); \
992  } else { \
993  _dst_hh->tbl = (dst)->hh_dst.tbl; \
994  } \
995  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
996  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], _dst_hh); \
997  HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \
998  (dst)->hh_dst.tbl->num_items++; \
999  _last_elt = _elt; \
1000  _last_elt_hh = _dst_hh; \
1001  } \
1002  } \
1003  } \
1004  } \
1005  HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \
1006 } while (0)
1007 
1008 #define HASH_CLEAR(hh,head) \
1009 do { \
1010  if ((head) != NULL) { \
1011  HASH_BLOOM_FREE((head)->hh.tbl); \
1012  uthash_free((head)->hh.tbl->buckets, \
1013  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
1014  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
1015  (head) = NULL; \
1016  } \
1017 } while (0)
1018 
1019 #define HASH_OVERHEAD(hh,head) \
1020  (((head) != NULL) ? ( \
1021  (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
1022  ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
1023  sizeof(UT_hash_table) + \
1024  (HASH_BLOOM_BYTELEN))) : 0U)
1025 
1026 #ifdef NO_DECLTYPE
1027 #define HASH_ITER(hh,head,el,tmp) \
1028 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
1029  (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1030 #else
1031 #define HASH_ITER(hh,head,el,tmp) \
1032 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
1033  (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1034 #endif
1035 
1036 /* obtain a count of items in the hash */
1037 #define HASH_COUNT(head) HASH_CNT(hh,head)
1038 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
1039 
1040 typedef struct UT_hash_bucket {
1042  unsigned count;
1043 
1044  /* expand_mult is normally set to 0. In this situation, the max chain length
1045  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
1046  * the bucket's chain exceeds this length, bucket expansion is triggered).
1047  * However, setting expand_mult to a non-zero value delays bucket expansion
1048  * (that would be triggered by additions to this particular bucket)
1049  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
1050  * (The multiplier is simply expand_mult+1). The whole idea of this
1051  * multiplier is to reduce bucket expansions, since they are expensive, in
1052  * situations where we know that a particular bucket tends to be overused.
1053  * It is better to let its chain length grow to a longer yet-still-bounded
1054  * value, than to do an O(n) bucket expansion too often.
1055  */
1056  unsigned expand_mult;
1057 
1059 
1060 /* random signature used only to find hash tables in external analysis */
1061 #define HASH_SIGNATURE 0xa0111fe1u
1062 #define HASH_BLOOM_SIGNATURE 0xb12220f2u
1063 
1064 typedef struct UT_hash_table {
1067  unsigned num_items;
1068  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
1069  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
1070 
1071  /* in an ideal situation (all buckets used equally), no bucket would have
1072  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
1074 
1075  /* nonideal_items is the number of items in the hash whose chain position
1076  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1077  * hash distribution; reaching them in a chain traversal takes >ideal steps */
1078  unsigned nonideal_items;
1079 
1080  /* ineffective expands occur when a bucket doubling was performed, but
1081  * afterward, more than half the items in the hash had nonideal chain
1082  * positions. If this happens on two consecutive expansions we inhibit any
1083  * further expansion, as it's not helping; this happens when the hash
1084  * function isn't a good fit for the key domain. When expansion is inhibited
1085  * the hash will still work, albeit no longer in constant time. */
1087 
1088  uint32_t signature; /* used only to find hash tables in external analysis */
1089 #ifdef HASH_BLOOM
1090  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1091  uint8_t *bloom_bv;
1092  uint8_t bloom_nbits;
1093 #endif
1094 
1096 
1097 typedef struct UT_hash_handle {
1099  void *prev; /* prev element in app order */
1100  void *next; /* next element in app order */
1101  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1102  struct UT_hash_handle *hh_next; /* next hh in bucket order */
1103  const void *key; /* ptr to enclosing struct's key */
1104  unsigned keylen; /* enclosing struct's key len */
1105  unsigned hashv; /* result of hash-fcn(key) */
1107 
1108 #endif /* UTHASH_H */
unsigned count
Definition: uthash.h:1042
struct UT_hash_handle * hh_head
Definition: uthash.h:1041
unsigned expand_mult
Definition: uthash.h:1056
struct UT_hash_handle * hh_prev
Definition: uthash.h:1101
struct UT_hash_handle * hh_next
Definition: uthash.h:1102
const void * key
Definition: uthash.h:1103
void * next
Definition: uthash.h:1100
unsigned hashv
Definition: uthash.h:1105
void * prev
Definition: uthash.h:1099
struct UT_hash_table * tbl
Definition: uthash.h:1098
unsigned keylen
Definition: uthash.h:1104
struct UT_hash_handle * tail
Definition: uthash.h:1068
UT_hash_bucket * buckets
Definition: uthash.h:1065
unsigned ineff_expands
Definition: uthash.h:1086
unsigned num_buckets
Definition: uthash.h:1066
unsigned ideal_chain_maxlen
Definition: uthash.h:1073
unsigned noexpand
Definition: uthash.h:1086
unsigned num_items
Definition: uthash.h:1067
uint32_t signature
Definition: uthash.h:1088
unsigned nonideal_items
Definition: uthash.h:1078
unsigned log2_num_buckets
Definition: uthash.h:1066
ptrdiff_t hho
Definition: uthash.h:1069
struct UT_hash_bucket UT_hash_bucket
unsigned int uint32_t
Definition: uthash.h:78
struct UT_hash_handle UT_hash_handle
unsigned char uint8_t
Definition: uthash.h:79
struct UT_hash_table UT_hash_table