libcoap  4.1.1
 All Data Structures Files Functions Variables Typedefs Macros Groups Pages
uthash.h
Go to the documentation of this file.
1 /*
2 Copyright (c) 2003-2010, Troy D. Hanson http://uthash.sourceforge.net
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
27 #include <string.h> /* memcmp,strlen */
28 #include <stddef.h> /* ptrdiff_t */
29 
30 /* These macros use decltype or the earlier __typeof GNU extension.
31  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
32  when compiling c++ source) this code uses whatever method is needed
33  or, for VS2008 where neither is available, uses casting workarounds. */
34 #ifdef _MSC_VER /* MS compiler */
35 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
36 #define DECLTYPE(x) (decltype(x))
37 #else /* VS2008 or older (or VS2010 in C mode) */
38 #define NO_DECLTYPE
39 #define DECLTYPE(x)
40 #endif
41 #else /* GNU, Sun and other compilers */
42 #define DECLTYPE(x) (__typeof(x))
43 #endif
44 
45 #ifdef NO_DECLTYPE
46 #define DECLTYPE_ASSIGN(dst,src) \
47 do { \
48  char **_da_dst = (char**)(&(dst)); \
49  *_da_dst = (char*)(src); \
50 } while(0)
51 #else
52 #define DECLTYPE_ASSIGN(dst,src) \
53 do { \
54  (dst) = DECLTYPE(dst)(src); \
55 } while(0)
56 #endif
57 
58 /* a number of the hash function use uint32_t which isn't defined on win32 */
59 #ifdef _MSC_VER
60 typedef unsigned int uint32_t;
61 #else
62 #include <inttypes.h> /* uint32_t */
63 #endif
64 
65 #define UTHASH_VERSION 1.9.3
66 
67 #ifndef uthash_fatal
68 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
69 #endif
70 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
71 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
72 
73 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
74 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
75 
76 /* initial number of buckets */
77 #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
78 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
79 #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
80 
81 /* calculate the element whose hash handle address is hhe */
82 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
83 
84 #define HASH_FIND(hh,head,keyptr,keylen,out) \
85 do { \
86  unsigned _hf_bkt,_hf_hashv; \
87  out=NULL; \
88  if (head) { \
89  HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
90  if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \
91  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
92  keyptr,keylen,out); \
93  } \
94  } \
95 } while (0)
96 
97 #ifdef HASH_BLOOM
98 #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM)
99 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0)
100 #define HASH_BLOOM_MAKE(tbl) \
101 do { \
102  (tbl)->bloom_nbits = HASH_BLOOM; \
103  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
104  if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
105  memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
106  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
107 } while (0);
108 
109 #define HASH_BLOOM_FREE(tbl) \
110 do { \
111  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
112 } while (0);
113 
114 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8)))
115 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8)))
116 
117 #define HASH_BLOOM_ADD(tbl,hashv) \
118  HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
119 
120 #define HASH_BLOOM_TEST(tbl,hashv) \
121  HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
122 
123 #else
124 #define HASH_BLOOM_MAKE(tbl)
125 #define HASH_BLOOM_FREE(tbl)
126 #define HASH_BLOOM_ADD(tbl,hashv)
127 #define HASH_BLOOM_TEST(tbl,hashv) (1)
128 #endif
129 
130 #define HASH_MAKE_TABLE(hh,head) \
131 do { \
132  (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
133  sizeof(UT_hash_table)); \
134  if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
135  memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
136  (head)->hh.tbl->tail = &((head)->hh); \
137  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
138  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
139  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
140  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
141  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
142  if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
143  memset((head)->hh.tbl->buckets, 0, \
144  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
145  HASH_BLOOM_MAKE((head)->hh.tbl); \
146  (head)->hh.tbl->signature = HASH_SIGNATURE; \
147 } while(0)
148 
149 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
150  HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add)
151 
152 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
153 do { \
154  unsigned _ha_bkt; \
155  (add)->hh.next = NULL; \
156  (add)->hh.key = (char*)keyptr; \
157  (add)->hh.keylen = keylen_in; \
158  if (!(head)) { \
159  head = (add); \
160  (head)->hh.prev = NULL; \
161  HASH_MAKE_TABLE(hh,head); \
162  } else { \
163  (head)->hh.tbl->tail->next = (add); \
164  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
165  (head)->hh.tbl->tail = &((add)->hh); \
166  } \
167  (head)->hh.tbl->num_items++; \
168  (add)->hh.tbl = (head)->hh.tbl; \
169  HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
170  (add)->hh.hashv, _ha_bkt); \
171  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
172  HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \
173  HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
174  HASH_FSCK(hh,head); \
175 } while(0)
176 
177 #define HASH_TO_BKT( hashv, num_bkts, bkt ) \
178 do { \
179  bkt = ((hashv) & ((num_bkts) - 1)); \
180 } while(0)
181 
182 /* delete "delptr" from the hash table.
183  * "the usual" patch-up process for the app-order doubly-linked-list.
184  * The use of _hd_hh_del below deserves special explanation.
185  * These used to be expressed using (delptr) but that led to a bug
186  * if someone used the same symbol for the head and deletee, like
187  * HASH_DELETE(hh,users,users);
188  * We want that to work, but by changing the head (users) below
189  * we were forfeiting our ability to further refer to the deletee (users)
190  * in the patch-up process. Solution: use scratch space to
191  * copy the deletee pointer, then the latter references are via that
192  * scratch pointer rather than through the repointed (users) symbol.
193  */
194 #define HASH_DELETE(hh,head,delptr) \
195 do { \
196  unsigned _hd_bkt; \
197  struct UT_hash_handle *_hd_hh_del; \
198  if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
199  uthash_free((head)->hh.tbl->buckets, \
200  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
201  HASH_BLOOM_FREE((head)->hh.tbl); \
202  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
203  head = NULL; \
204  } else { \
205  _hd_hh_del = &((delptr)->hh); \
206  if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
207  (head)->hh.tbl->tail = \
208  (UT_hash_handle*)((char*)((delptr)->hh.prev) + \
209  (head)->hh.tbl->hho); \
210  } \
211  if ((delptr)->hh.prev) { \
212  ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \
213  (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
214  } else { \
215  DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
216  } \
217  if (_hd_hh_del->next) { \
218  ((UT_hash_handle*)((char*)_hd_hh_del->next + \
219  (head)->hh.tbl->hho))->prev = \
220  _hd_hh_del->prev; \
221  } \
222  HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
223  HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
224  (head)->hh.tbl->num_items--; \
225  } \
226  HASH_FSCK(hh,head); \
227 } while (0)
228 
229 
230 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
231 #define HASH_FIND_STR(head,findstr,out) \
232  HASH_FIND(hh,head,findstr,strlen(findstr),out)
233 #define HASH_ADD_STR(head,strfield,add) \
234  HASH_ADD(hh,head,strfield,strlen(add->strfield),add)
235 #define HASH_FIND_INT(head,findint,out) \
236  HASH_FIND(hh,head,findint,sizeof(int),out)
237 #define HASH_ADD_INT(head,intfield,add) \
238  HASH_ADD(hh,head,intfield,sizeof(int),add)
239 #define HASH_FIND_PTR(head,findptr,out) \
240  HASH_FIND(hh,head,findptr,sizeof(void *),out)
241 #define HASH_ADD_PTR(head,ptrfield,add) \
242  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
243 #define HASH_DEL(head,delptr) \
244  HASH_DELETE(hh,head,delptr)
245 
246 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
247  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
248  */
249 #ifdef HASH_DEBUG
250 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
251 #define HASH_FSCK(hh,head) \
252 do { \
253  unsigned _bkt_i; \
254  unsigned _count, _bkt_count; \
255  char *_prev; \
256  struct UT_hash_handle *_thh; \
257  if (head) { \
258  _count = 0; \
259  for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
260  _bkt_count = 0; \
261  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
262  _prev = NULL; \
263  while (_thh) { \
264  if (_prev != (char*)(_thh->hh_prev)) { \
265  HASH_OOPS("invalid hh_prev %p, actual %p\n", \
266  _thh->hh_prev, _prev ); \
267  } \
268  _bkt_count++; \
269  _prev = (char*)(_thh); \
270  _thh = _thh->hh_next; \
271  } \
272  _count += _bkt_count; \
273  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
274  HASH_OOPS("invalid bucket count %d, actual %d\n", \
275  (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
276  } \
277  } \
278  if (_count != (head)->hh.tbl->num_items) { \
279  HASH_OOPS("invalid hh item count %d, actual %d\n", \
280  (head)->hh.tbl->num_items, _count ); \
281  } \
282  /* traverse hh in app order; check next/prev integrity, count */ \
283  _count = 0; \
284  _prev = NULL; \
285  _thh = &(head)->hh; \
286  while (_thh) { \
287  _count++; \
288  if (_prev !=(char*)(_thh->prev)) { \
289  HASH_OOPS("invalid prev %p, actual %p\n", \
290  _thh->prev, _prev ); \
291  } \
292  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
293  _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
294  (head)->hh.tbl->hho) : NULL ); \
295  } \
296  if (_count != (head)->hh.tbl->num_items) { \
297  HASH_OOPS("invalid app item count %d, actual %d\n", \
298  (head)->hh.tbl->num_items, _count ); \
299  } \
300  } \
301 } while (0)
302 #else
303 #define HASH_FSCK(hh,head)
304 #endif
305 
306 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
307  * the descriptor to which this macro is defined for tuning the hash function.
308  * The app can #include <unistd.h> to get the prototype for write(2). */
309 #ifdef HASH_EMIT_KEYS
310 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
311 do { \
312  unsigned _klen = fieldlen; \
313  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
314  write(HASH_EMIT_KEYS, keyptr, fieldlen); \
315 } while (0)
316 #else
317 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
318 #endif
319 
320 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
321 #ifdef HASH_FUNCTION
322 #define HASH_FCN HASH_FUNCTION
323 #else
324 #define HASH_FCN HASH_JEN
325 #endif
326 
327 /* The Bernstein hash function, used in Perl prior to v5.6 */
328 #define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
329 do { \
330  unsigned _hb_keylen=keylen; \
331  char *_hb_key=(char*)(key); \
332  (hashv) = 0; \
333  while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
334  bkt = (hashv) & (num_bkts-1); \
335 } while (0)
336 
337 
338 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
339  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
340 #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
341 do { \
342  unsigned _sx_i; \
343  char *_hs_key=(char*)(key); \
344  hashv = 0; \
345  for(_sx_i=0; _sx_i < keylen; _sx_i++) \
346  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
347  bkt = hashv & (num_bkts-1); \
348 } while (0)
349 
350 #define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
351 do { \
352  unsigned _fn_i; \
353  char *_hf_key=(char*)(key); \
354  hashv = 2166136261UL; \
355  for(_fn_i=0; _fn_i < keylen; _fn_i++) \
356  hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \
357  bkt = hashv & (num_bkts-1); \
358 } while(0);
359 
360 #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
361 do { \
362  unsigned _ho_i; \
363  char *_ho_key=(char*)(key); \
364  hashv = 0; \
365  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
366  hashv += _ho_key[_ho_i]; \
367  hashv += (hashv << 10); \
368  hashv ^= (hashv >> 6); \
369  } \
370  hashv += (hashv << 3); \
371  hashv ^= (hashv >> 11); \
372  hashv += (hashv << 15); \
373  bkt = hashv & (num_bkts-1); \
374 } while(0)
375 
376 #define HASH_JEN_MIX(a,b,c) \
377 do { \
378  a -= b; a -= c; a ^= ( c >> 13 ); \
379  b -= c; b -= a; b ^= ( a << 8 ); \
380  c -= a; c -= b; c ^= ( b >> 13 ); \
381  a -= b; a -= c; a ^= ( c >> 12 ); \
382  b -= c; b -= a; b ^= ( a << 16 ); \
383  c -= a; c -= b; c ^= ( b >> 5 ); \
384  a -= b; a -= c; a ^= ( c >> 3 ); \
385  b -= c; b -= a; b ^= ( a << 10 ); \
386  c -= a; c -= b; c ^= ( b >> 15 ); \
387 } while (0)
388 
389 #define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
390 do { \
391  unsigned _hj_i,_hj_j,_hj_k; \
392  char *_hj_key=(char*)(key); \
393  hashv = 0xfeedbeef; \
394  _hj_i = _hj_j = 0x9e3779b9; \
395  _hj_k = keylen; \
396  while (_hj_k >= 12) { \
397  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
398  + ( (unsigned)_hj_key[2] << 16 ) \
399  + ( (unsigned)_hj_key[3] << 24 ) ); \
400  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
401  + ( (unsigned)_hj_key[6] << 16 ) \
402  + ( (unsigned)_hj_key[7] << 24 ) ); \
403  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
404  + ( (unsigned)_hj_key[10] << 16 ) \
405  + ( (unsigned)_hj_key[11] << 24 ) ); \
406  \
407  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
408  \
409  _hj_key += 12; \
410  _hj_k -= 12; \
411  } \
412  hashv += keylen; \
413  switch ( _hj_k ) { \
414  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
415  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
416  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
417  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
418  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
419  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
420  case 5: _hj_j += _hj_key[4]; \
421  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
422  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
423  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
424  case 1: _hj_i += _hj_key[0]; \
425  } \
426  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
427  bkt = hashv & (num_bkts-1); \
428 } while(0)
429 
430 /* The Paul Hsieh hash function */
431 #undef get16bits
432 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
433  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
434 #define get16bits(d) (*((const uint16_t *) (d)))
435 #endif
436 
437 #if !defined (get16bits)
438 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
439  +(uint32_t)(((const uint8_t *)(d))[0]) )
440 #endif
441 #define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
442 do { \
443  char *_sfh_key=(char*)(key); \
444  uint32_t _sfh_tmp, _sfh_len = keylen; \
445  \
446  int _sfh_rem = _sfh_len & 3; \
447  _sfh_len >>= 2; \
448  hashv = 0xcafebabe; \
449  \
450  /* Main loop */ \
451  for (;_sfh_len > 0; _sfh_len--) { \
452  hashv += get16bits (_sfh_key); \
453  _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \
454  hashv = (hashv << 16) ^ _sfh_tmp; \
455  _sfh_key += 2*sizeof (uint16_t); \
456  hashv += hashv >> 11; \
457  } \
458  \
459  /* Handle end cases */ \
460  switch (_sfh_rem) { \
461  case 3: hashv += get16bits (_sfh_key); \
462  hashv ^= hashv << 16; \
463  hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \
464  hashv += hashv >> 11; \
465  break; \
466  case 2: hashv += get16bits (_sfh_key); \
467  hashv ^= hashv << 11; \
468  hashv += hashv >> 17; \
469  break; \
470  case 1: hashv += *_sfh_key; \
471  hashv ^= hashv << 10; \
472  hashv += hashv >> 1; \
473  } \
474  \
475  /* Force "avalanching" of final 127 bits */ \
476  hashv ^= hashv << 3; \
477  hashv += hashv >> 5; \
478  hashv ^= hashv << 4; \
479  hashv += hashv >> 17; \
480  hashv ^= hashv << 25; \
481  hashv += hashv >> 6; \
482  bkt = hashv & (num_bkts-1); \
483 } while(0);
484 
485 #ifdef HASH_USING_NO_STRICT_ALIASING
486 /* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads.
487  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
488  * So MurmurHash comes in two versions, the faster unaligned one and the slower
489  * aligned one. We only use the faster one on CPU's where we know it's safe.
490  *
491  * Note the preprocessor built-in defines can be emitted using:
492  *
493  * gcc -m64 -dM -E - < /dev/null (on gcc)
494  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
495  */
496 #if (defined(__i386__) || defined(__x86_64__))
497 #define HASH_MUR HASH_MUR_UNALIGNED
498 #else
499 #define HASH_MUR HASH_MUR_ALIGNED
500 #endif
501 
502 /* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */
503 #define HASH_MUR_UNALIGNED(key,keylen,num_bkts,hashv,bkt) \
504 do { \
505  const unsigned int _mur_m = 0x5bd1e995; \
506  const int _mur_r = 24; \
507  hashv = 0xcafebabe ^ keylen; \
508  char *_mur_key = (char *)(key); \
509  uint32_t _mur_tmp, _mur_len = keylen; \
510  \
511  for (;_mur_len >= 4; _mur_len-=4) { \
512  _mur_tmp = *(uint32_t *)_mur_key; \
513  _mur_tmp *= _mur_m; \
514  _mur_tmp ^= _mur_tmp >> _mur_r; \
515  _mur_tmp *= _mur_m; \
516  hashv *= _mur_m; \
517  hashv ^= _mur_tmp; \
518  _mur_key += 4; \
519  } \
520  \
521  switch(_mur_len) \
522  { \
523  case 3: hashv ^= _mur_key[2] << 16; \
524  case 2: hashv ^= _mur_key[1] << 8; \
525  case 1: hashv ^= _mur_key[0]; \
526  hashv *= _mur_m; \
527  }; \
528  \
529  hashv ^= hashv >> 13; \
530  hashv *= _mur_m; \
531  hashv ^= hashv >> 15; \
532  \
533  bkt = hashv & (num_bkts-1); \
534 } while(0)
535 
536 /* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */
537 #define HASH_MUR_ALIGNED(key,keylen,num_bkts,hashv,bkt) \
538 do { \
539  const unsigned int _mur_m = 0x5bd1e995; \
540  const int _mur_r = 24; \
541  hashv = 0xcafebabe ^ (keylen); \
542  char *_mur_key = (char *)(key); \
543  uint32_t _mur_len = keylen; \
544  int _mur_align = (int)_mur_key & 3; \
545  \
546  if (_mur_align && (_mur_len >= 4)) { \
547  unsigned _mur_t = 0, _mur_d = 0; \
548  switch(_mur_align) { \
549  case 1: _mur_t |= _mur_key[2] << 16; \
550  case 2: _mur_t |= _mur_key[1] << 8; \
551  case 3: _mur_t |= _mur_key[0]; \
552  } \
553  _mur_t <<= (8 * _mur_align); \
554  _mur_key += 4-_mur_align; \
555  _mur_len -= 4-_mur_align; \
556  int _mur_sl = 8 * (4-_mur_align); \
557  int _mur_sr = 8 * _mur_align; \
558  \
559  for (;_mur_len >= 4; _mur_len-=4) { \
560  _mur_d = *(unsigned *)_mur_key; \
561  _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
562  unsigned _mur_k = _mur_t; \
563  _mur_k *= _mur_m; \
564  _mur_k ^= _mur_k >> _mur_r; \
565  _mur_k *= _mur_m; \
566  hashv *= _mur_m; \
567  hashv ^= _mur_k; \
568  _mur_t = _mur_d; \
569  _mur_key += 4; \
570  } \
571  _mur_d = 0; \
572  if(_mur_len >= _mur_align) { \
573  switch(_mur_align) { \
574  case 3: _mur_d |= _mur_key[2] << 16; \
575  case 2: _mur_d |= _mur_key[1] << 8; \
576  case 1: _mur_d |= _mur_key[0]; \
577  } \
578  unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
579  _mur_k *= _mur_m; \
580  _mur_k ^= _mur_k >> _mur_r; \
581  _mur_k *= _mur_m; \
582  hashv *= _mur_m; \
583  hashv ^= _mur_k; \
584  _mur_k += _mur_align; \
585  _mur_len -= _mur_align; \
586  \
587  switch(_mur_len) \
588  { \
589  case 3: hashv ^= _mur_key[2] << 16; \
590  case 2: hashv ^= _mur_key[1] << 8; \
591  case 1: hashv ^= _mur_key[0]; \
592  hashv *= _mur_m; \
593  } \
594  } else { \
595  switch(_mur_len) \
596  { \
597  case 3: _mur_d ^= _mur_key[2] << 16; \
598  case 2: _mur_d ^= _mur_key[1] << 8; \
599  case 1: _mur_d ^= _mur_key[0]; \
600  case 0: hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
601  hashv *= _mur_m; \
602  } \
603  } \
604  \
605  hashv ^= hashv >> 13; \
606  hashv *= _mur_m; \
607  hashv ^= hashv >> 15; \
608  } else { \
609  for (;_mur_len >= 4; _mur_len-=4) { \
610  unsigned _mur_k = *(unsigned*)_mur_key; \
611  _mur_k *= _mur_m; \
612  _mur_k ^= _mur_k >> _mur_r; \
613  _mur_k *= _mur_m; \
614  hashv *= _mur_m; \
615  hashv ^= _mur_k; \
616  _mur_key += 4; \
617  } \
618  switch(_mur_len) \
619  { \
620  case 3: hashv ^= _mur_key[2] << 16; \
621  case 2: hashv ^= _mur_key[1] << 8; \
622  case 1: hashv ^= _mur_key[0]; \
623  hashv *= _mur_m; \
624  } \
625  \
626  hashv ^= hashv >> 13; \
627  hashv *= _mur_m; \
628  hashv ^= hashv >> 15; \
629  } \
630  bkt = hashv & (num_bkts-1); \
631 } while(0)
632 #endif /* HASH_USING_NO_STRICT_ALIASING */
633 
634 /* key comparison function; return 0 if keys equal */
635 #define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
636 
637 /* iterate over items in a known bucket to find desired item */
638 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
639 do { \
640  if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \
641  else out=NULL; \
642  while (out) { \
643  if (out->hh.keylen == keylen_in) { \
644  if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \
645  } \
646  if (out->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,out->hh.hh_next)); \
647  else out = NULL; \
648  } \
649 } while(0)
650 
651 /* add an item to a bucket */
652 #define HASH_ADD_TO_BKT(head,addhh) \
653 do { \
654  head.count++; \
655  (addhh)->hh_next = head.hh_head; \
656  (addhh)->hh_prev = NULL; \
657  if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \
658  (head).hh_head=addhh; \
659  if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
660  && (addhh)->tbl->noexpand != 1) { \
661  HASH_EXPAND_BUCKETS((addhh)->tbl); \
662  } \
663 } while(0)
664 
665 /* remove an item from a given bucket */
666 #define HASH_DEL_IN_BKT(hh,head,hh_del) \
667  (head).count--; \
668  if ((head).hh_head == hh_del) { \
669  (head).hh_head = hh_del->hh_next; \
670  } \
671  if (hh_del->hh_prev) { \
672  hh_del->hh_prev->hh_next = hh_del->hh_next; \
673  } \
674  if (hh_del->hh_next) { \
675  hh_del->hh_next->hh_prev = hh_del->hh_prev; \
676  }
677 
678 /* Bucket expansion has the effect of doubling the number of buckets
679  * and redistributing the items into the new buckets. Ideally the
680  * items will distribute more or less evenly into the new buckets
681  * (the extent to which this is true is a measure of the quality of
682  * the hash function as it applies to the key domain).
683  *
684  * With the items distributed into more buckets, the chain length
685  * (item count) in each bucket is reduced. Thus by expanding buckets
686  * the hash keeps a bound on the chain length. This bounded chain
687  * length is the essence of how a hash provides constant time lookup.
688  *
689  * The calculation of tbl->ideal_chain_maxlen below deserves some
690  * explanation. First, keep in mind that we're calculating the ideal
691  * maximum chain length based on the *new* (doubled) bucket count.
692  * In fractions this is just n/b (n=number of items,b=new num buckets).
693  * Since the ideal chain length is an integer, we want to calculate
694  * ceil(n/b). We don't depend on floating point arithmetic in this
695  * hash, so to calculate ceil(n/b) with integers we could write
696  *
697  * ceil(n/b) = (n/b) + ((n%b)?1:0)
698  *
699  * and in fact a previous version of this hash did just that.
700  * But now we have improved things a bit by recognizing that b is
701  * always a power of two. We keep its base 2 log handy (call it lb),
702  * so now we can write this with a bit shift and logical AND:
703  *
704  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
705  *
706  */
707 #define HASH_EXPAND_BUCKETS(tbl) \
708 do { \
709  unsigned _he_bkt; \
710  unsigned _he_bkt_i; \
711  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
712  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
713  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
714  2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
715  if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
716  memset(_he_new_buckets, 0, \
717  2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
718  tbl->ideal_chain_maxlen = \
719  (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
720  ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
721  tbl->nonideal_items = 0; \
722  for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
723  { \
724  _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
725  while (_he_thh) { \
726  _he_hh_nxt = _he_thh->hh_next; \
727  HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
728  _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
729  if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
730  tbl->nonideal_items++; \
731  _he_newbkt->expand_mult = _he_newbkt->count / \
732  tbl->ideal_chain_maxlen; \
733  } \
734  _he_thh->hh_prev = NULL; \
735  _he_thh->hh_next = _he_newbkt->hh_head; \
736  if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
737  _he_thh; \
738  _he_newbkt->hh_head = _he_thh; \
739  _he_thh = _he_hh_nxt; \
740  } \
741  } \
742  uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
743  tbl->num_buckets *= 2; \
744  tbl->log2_num_buckets++; \
745  tbl->buckets = _he_new_buckets; \
746  tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
747  (tbl->ineff_expands+1) : 0; \
748  if (tbl->ineff_expands > 1) { \
749  tbl->noexpand=1; \
750  uthash_noexpand_fyi(tbl); \
751  } \
752  uthash_expand_fyi(tbl); \
753 } while(0)
754 
755 
756 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
757 /* Note that HASH_SORT assumes the hash handle name to be hh.
758  * HASH_SRT was added to allow the hash handle name to be passed in. */
759 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
760 #define HASH_SRT(hh,head,cmpfcn) \
761 do { \
762  unsigned _hs_i; \
763  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
764  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
765  if (head) { \
766  _hs_insize = 1; \
767  _hs_looping = 1; \
768  _hs_list = &((head)->hh); \
769  while (_hs_looping) { \
770  _hs_p = _hs_list; \
771  _hs_list = NULL; \
772  _hs_tail = NULL; \
773  _hs_nmerges = 0; \
774  while (_hs_p) { \
775  _hs_nmerges++; \
776  _hs_q = _hs_p; \
777  _hs_psize = 0; \
778  for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
779  _hs_psize++; \
780  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
781  ((void*)((char*)(_hs_q->next) + \
782  (head)->hh.tbl->hho)) : NULL); \
783  if (! (_hs_q) ) break; \
784  } \
785  _hs_qsize = _hs_insize; \
786  while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
787  if (_hs_psize == 0) { \
788  _hs_e = _hs_q; \
789  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
790  ((void*)((char*)(_hs_q->next) + \
791  (head)->hh.tbl->hho)) : NULL); \
792  _hs_qsize--; \
793  } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
794  _hs_e = _hs_p; \
795  _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
796  ((void*)((char*)(_hs_p->next) + \
797  (head)->hh.tbl->hho)) : NULL); \
798  _hs_psize--; \
799  } else if (( \
800  cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
801  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
802  ) <= 0) { \
803  _hs_e = _hs_p; \
804  _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
805  ((void*)((char*)(_hs_p->next) + \
806  (head)->hh.tbl->hho)) : NULL); \
807  _hs_psize--; \
808  } else { \
809  _hs_e = _hs_q; \
810  _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
811  ((void*)((char*)(_hs_q->next) + \
812  (head)->hh.tbl->hho)) : NULL); \
813  _hs_qsize--; \
814  } \
815  if ( _hs_tail ) { \
816  _hs_tail->next = ((_hs_e) ? \
817  ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
818  } else { \
819  _hs_list = _hs_e; \
820  } \
821  _hs_e->prev = ((_hs_tail) ? \
822  ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
823  _hs_tail = _hs_e; \
824  } \
825  _hs_p = _hs_q; \
826  } \
827  _hs_tail->next = NULL; \
828  if ( _hs_nmerges <= 1 ) { \
829  _hs_looping=0; \
830  (head)->hh.tbl->tail = _hs_tail; \
831  DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
832  } \
833  _hs_insize *= 2; \
834  } \
835  HASH_FSCK(hh,head); \
836  } \
837 } while (0)
838 
839 /* This function selects items from one hash into another hash.
840  * The end result is that the selected items have dual presence
841  * in both hashes. There is no copy of the items made; rather
842  * they are added into the new hash through a secondary hash
843  * hash handle that must be present in the structure. */
844 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
845 do { \
846  unsigned _src_bkt, _dst_bkt; \
847  void *_last_elt=NULL, *_elt; \
848  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
849  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
850  if (src) { \
851  for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
852  for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
853  _src_hh; \
854  _src_hh = _src_hh->hh_next) { \
855  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
856  if (cond(_elt)) { \
857  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
858  _dst_hh->key = _src_hh->key; \
859  _dst_hh->keylen = _src_hh->keylen; \
860  _dst_hh->hashv = _src_hh->hashv; \
861  _dst_hh->prev = _last_elt; \
862  _dst_hh->next = NULL; \
863  if (_last_elt_hh) { _last_elt_hh->next = _elt; } \
864  if (!dst) { \
865  DECLTYPE_ASSIGN(dst,_elt); \
866  HASH_MAKE_TABLE(hh_dst,dst); \
867  } else { \
868  _dst_hh->tbl = (dst)->hh_dst.tbl; \
869  } \
870  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
871  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
872  (dst)->hh_dst.tbl->num_items++; \
873  _last_elt = _elt; \
874  _last_elt_hh = _dst_hh; \
875  } \
876  } \
877  } \
878  } \
879  HASH_FSCK(hh_dst,dst); \
880 } while (0)
881 
882 #define HASH_CLEAR(hh,head) \
883 do { \
884  if (head) { \
885  uthash_free((head)->hh.tbl->buckets, \
886  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
887  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
888  (head)=NULL; \
889  } \
890 } while(0)
891 
892 #ifdef NO_DECLTYPE
893 #define HASH_ITER(hh,head,el,tmp) \
894 for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \
895  el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL))
896 #else
897 #define HASH_ITER(hh,head,el,tmp) \
898 for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \
899  el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL))
900 #endif
901 
902 /* obtain a count of items in the hash */
903 #define HASH_COUNT(head) HASH_CNT(hh,head)
904 #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0)
905 
906 typedef struct UT_hash_bucket {
908  unsigned count;
909 
910  /* expand_mult is normally set to 0. In this situation, the max chain length
911  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
912  * the bucket's chain exceeds this length, bucket expansion is triggered).
913  * However, setting expand_mult to a non-zero value delays bucket expansion
914  * (that would be triggered by additions to this particular bucket)
915  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
916  * (The multiplier is simply expand_mult+1). The whole idea of this
917  * multiplier is to reduce bucket expansions, since they are expensive, in
918  * situations where we know that a particular bucket tends to be overused.
919  * It is better to let its chain length grow to a longer yet-still-bounded
920  * value, than to do an O(n) bucket expansion too often.
921  */
922  unsigned expand_mult;
923 
925 
926 /* random signature used only to find hash tables in external analysis */
927 #define HASH_SIGNATURE 0xa0111fe1
928 #define HASH_BLOOM_SIGNATURE 0xb12220f2
929 
930 typedef struct UT_hash_table {
933  unsigned num_items;
934  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
935  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
936 
937  /* in an ideal situation (all buckets used equally), no bucket would have
938  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
940 
941  /* nonideal_items is the number of items in the hash whose chain position
942  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
943  * hash distribution; reaching them in a chain traversal takes >ideal steps */
944  unsigned nonideal_items;
945 
946  /* ineffective expands occur when a bucket doubling was performed, but
947  * afterward, more than half the items in the hash had nonideal chain
948  * positions. If this happens on two consecutive expansions we inhibit any
949  * further expansion, as it's not helping; this happens when the hash
950  * function isn't a good fit for the key domain. When expansion is inhibited
951  * the hash will still work, albeit no longer in constant time. */
953 
954  uint32_t signature; /* used only to find hash tables in external analysis */
955 #ifdef HASH_BLOOM
956  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
957  uint8_t *bloom_bv;
958  char bloom_nbits;
959 #endif
960 
961 } UT_hash_table;
962 
963 typedef struct UT_hash_handle {
965  void *prev; /* prev element in app order */
966  void *next; /* next element in app order */
967  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
968  struct UT_hash_handle *hh_next; /* next hh in bucket order */
969  void *key; /* ptr to enclosing struct's key */
970  unsigned keylen; /* enclosing struct's key len */
971  unsigned hashv; /* result of hash-fcn(key) */
973 
974 #endif /* UTHASH_H */
unsigned ineff_expands
Definition: uthash.h:952
struct UT_hash_handle * tail
Definition: uthash.h:934
UT_hash_bucket * buckets
Definition: uthash.h:931
unsigned expand_mult
Definition: uthash.h:922
struct UT_hash_handle * hh_head
Definition: uthash.h:907
unsigned log2_num_buckets
Definition: uthash.h:932
struct UT_hash_handle * hh_next
Definition: uthash.h:968
unsigned count
Definition: uthash.h:908
unsigned noexpand
Definition: uthash.h:952
unsigned keylen
Definition: uthash.h:970
unsigned nonideal_items
Definition: uthash.h:944
ptrdiff_t hho
Definition: uthash.h:935
unsigned num_buckets
Definition: uthash.h:932
struct UT_hash_bucket UT_hash_bucket
unsigned hashv
Definition: uthash.h:971
void * key
Definition: uthash.h:969
struct UT_hash_table * tbl
Definition: uthash.h:964
uint32_t signature
Definition: uthash.h:954
struct UT_hash_table UT_hash_table
unsigned ideal_chain_maxlen
Definition: uthash.h:939
struct UT_hash_handle UT_hash_handle
struct UT_hash_handle * hh_prev
Definition: uthash.h:967
unsigned num_items
Definition: uthash.h:933
void * prev
Definition: uthash.h:965
void * next
Definition: uthash.h:966