1 /*
2 * This code was taken from http://ccodearchive.net/info/hash.html
3 * The original file was modified to remove unwanted code
4 * and some changes to fit the current build environment
5 */
6 /*
7 -------------------------------------------------------------------------------
8 lookup3.c, by Bob Jenkins, May 2006, Public Domain.
9
10 These are functions for producing 32-bit hashes for hash table lookup.
11 hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
12 are externally useful functions. Routines to test the hash are included
13 if SELF_TEST is defined. You can use this free for any purpose. It's in
14 the public domain. It has no warranty.
15
16 You probably want to use hashlittle(). hashlittle() and hashbig()
17 hash byte arrays. hashlittle() is is faster than hashbig() on
18 little-endian machines. Intel and AMD are little-endian machines.
19 On second thought, you probably want hashlittle2(), which is identical to
20 hashlittle() except it returns two 32-bit hashes for the price of one.
21 You could implement hashbig2() if you wanted but I haven't bothered here.
22
23 If you want to find a hash of, say, exactly 7 integers, do
24 a = i1; b = i2; c = i3;
25 mix(a,b,c);
26 a += i4; b += i5; c += i6;
27 mix(a,b,c);
28 a += i7;
29 final(a,b,c);
30 then use c as the hash value. If you have a variable length array of
31 4-byte integers to hash, use hash_word(). If you have a byte array (like
32 a character string), use hashlittle(). If you have several byte arrays, or
33 a mix of things, see the comments above hashlittle().
34
35 Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
36 then mix those integers. This is fast (you can do a lot more thorough
37 mixing with 12*3 instructions on 3 integers than you can with 3 instructions
38 on 1 byte), but shoehorning those bytes into integers efficiently is messy.
39 -------------------------------------------------------------------------------
40 */
41 #include <netlink/hash.h>
42
43 #if HAVE_LITTLE_ENDIAN
44 #define HASH_LITTLE_ENDIAN 1
45 #define HASH_BIG_ENDIAN 0
46 #elif HAVE_BIG_ENDIAN
47 #define HASH_LITTLE_ENDIAN 0
48 #define HASH_BIG_ENDIAN 1
49 #else
50 #error Unknown endian
51 #endif
52
53 #define hashsize(n) ((uint32_t)1<<(n))
54 #define hashmask(n) (hashsize(n)-1)
55 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
56
57 /*
58 -------------------------------------------------------------------------------
59 mix -- mix 3 32-bit values reversibly.
60
61 This is reversible, so any information in (a,b,c) before mix() is
62 still in (a,b,c) after mix().
63
64 If four pairs of (a,b,c) inputs are run through mix(), or through
65 mix() in reverse, there are at least 32 bits of the output that
66 are sometimes the same for one pair and different for another pair.
67 This was tested for:
68 * pairs that differed by one bit, by two bits, in any combination
69 of top bits of (a,b,c), or in any combination of bottom bits of
70 (a,b,c).
71 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
72 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
73 is commonly produced by subtraction) look like a single 1-bit
74 difference.
75 * the base values were pseudorandom, all zero but one bit set, or
76 all zero plus a counter that starts at zero.
77
78 Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
79 satisfy this are
80 4 6 8 16 19 4
81 9 15 3 18 27 15
82 14 9 3 7 17 3
83 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
84 for "differ" defined as + with a one-bit base and a two-bit delta. I
85 used http://burtleburtle.net/bob/hash/avalanche.html to choose
86 the operations, constants, and arrangements of the variables.
87
88 This does not achieve avalanche. There are input bits of (a,b,c)
89 that fail to affect some output bits of (a,b,c), especially of a. The
90 most thoroughly mixed value is c, but it doesn't really even achieve
91 avalanche in c.
92
93 This allows some parallelism. Read-after-writes are good at doubling
94 the number of bits affected, so the goal of mixing pulls in the opposite
95 direction as the goal of parallelism. I did what I could. Rotates
96 seem to cost as much as shifts on every machine I could lay my hands
97 on, and rotates are much kinder to the top and bottom bits, so I used
98 rotates.
99 -------------------------------------------------------------------------------
100 */
101 #define mix(a,b,c) \
102 { \
103 a -= c; a ^= rot(c, 4); c += b; \
104 b -= a; b ^= rot(a, 6); a += c; \
105 c -= b; c ^= rot(b, 8); b += a; \
106 a -= c; a ^= rot(c,16); c += b; \
107 b -= a; b ^= rot(a,19); a += c; \
108 c -= b; c ^= rot(b, 4); b += a; \
109 }
110
111 /*
112 -------------------------------------------------------------------------------
113 final -- final mixing of 3 32-bit values (a,b,c) into c
114
115 Pairs of (a,b,c) values differing in only a few bits will usually
116 produce values of c that look totally different. This was tested for
117 * pairs that differed by one bit, by two bits, in any combination
118 of top bits of (a,b,c), or in any combination of bottom bits of
119 (a,b,c).
120 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
121 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
122 is commonly produced by subtraction) look like a single 1-bit
123 difference.
124 * the base values were pseudorandom, all zero but one bit set, or
125 all zero plus a counter that starts at zero.
126
127 These constants passed:
128 14 11 25 16 4 14 24
129 12 14 25 16 4 14 24
130 and these came close:
131 4 8 15 26 3 22 24
132 10 8 15 26 3 22 24
133 11 8 15 26 3 22 24
134 -------------------------------------------------------------------------------
135 */
136 #define final(a,b,c) \
137 { \
138 c ^= b; c -= rot(b,14); \
139 a ^= c; a -= rot(c,11); \
140 b ^= a; b -= rot(a,25); \
141 c ^= b; c -= rot(b,16); \
142 a ^= c; a -= rot(c,4); \
143 b ^= a; b -= rot(a,14); \
144 c ^= b; c -= rot(b,24); \
145 }
146
147 /*
148 -------------------------------------------------------------------------------
149 hashlittle() -- hash a variable-length key into a 32-bit value
150 k : the key (the unaligned variable-length array of bytes)
151 length : the length of the key, counting by bytes
152 val2 : IN: can be any 4-byte value OUT: second 32 bit hash.
153 Returns a 32-bit value. Every bit of the key affects every bit of
154 the return value. Two keys differing by one or two bits will have
155 totally different hash values. Note that the return value is better
156 mixed than val2, so use that first.
157
158 The best hash table sizes are powers of 2. There is no need to do
159 mod a prime (mod is sooo slow!). If you need less than 32 bits,
160 use a bitmask. For example, if you need only 10 bits, do
161 h = (h & hashmask(10));
162 In which case, the hash table should have hashsize(10) elements.
163
164 If you are hashing n strings (uint8_t **)k, do it like this:
165 for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
166
167 By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
168 code any way you wish, private, educational, or commercial. It's free.
169
170 Use for hash table lookup, or anything where one collision in 2^^32 is
171 acceptable. Do NOT use for cryptographic purposes.
172 -------------------------------------------------------------------------------
173 */
174
hashlittle(const void * key,size_t length,uint32_t * val2)175 static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 )
176 {
177 uint32_t a,b,c; /* internal state */
178 union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
179
180 /* Set up the internal state */
181 a = b = c = 0xdeadbeef + ((uint32_t)length) + *val2;
182
183 u.ptr = key;
184 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
185 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
186 const uint8_t *k8;
187
188 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
189 while (length > 12)
190 {
191 a += k[0];
192 b += k[1];
193 c += k[2];
194 mix(a,b,c);
195 length -= 12;
196 k += 3;
197 }
198
199 /*----------------------------- handle the last (probably partial) block */
200 /*
201 * "k[2]&0xffffff" actually reads beyond the end of the string, but
202 * then masks off the part it's not allowed to read. Because the
203 * string is aligned, the masked-off tail is in the same word as the
204 * rest of the string. Every machine with memory protection I've seen
205 * does it on word boundaries, so is OK with this. But VALGRIND will
206 * still catch it and complain. The masking trick does make the hash
207 * noticably faster for short strings (like English words).
208 *
209 * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
210 */
211 #if 0
212 switch(length)
213 {
214 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
215 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
216 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
217 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
218 case 8 : b+=k[1]; a+=k[0]; break;
219 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
220 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
221 case 5 : b+=k[1]&0xff; a+=k[0]; break;
222 case 4 : a+=k[0]; break;
223 case 3 : a+=k[0]&0xffffff; break;
224 case 2 : a+=k[0]&0xffff; break;
225 case 1 : a+=k[0]&0xff; break;
226 case 0 : return c; /* zero length strings require no mixing */
227 }
228
229 #else /* make valgrind happy */
230
231 k8 = (const uint8_t *)k;
232 switch(length)
233 {
234 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
235 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
236 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
237 case 9 : c+=k8[8]; /* fall through */
238 case 8 : b+=k[1]; a+=k[0]; break;
239 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
240 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
241 case 5 : b+=k8[4]; /* fall through */
242 case 4 : a+=k[0]; break;
243 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
244 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
245 case 1 : a+=k8[0]; break;
246 case 0 : return c;
247 }
248
249 #endif /* !valgrind */
250
251 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
252 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
253 const uint8_t *k8;
254
255 /*--------------- all but last block: aligned reads and different mixing */
256 while (length > 12)
257 {
258 a += k[0] + (((uint32_t)k[1])<<16);
259 b += k[2] + (((uint32_t)k[3])<<16);
260 c += k[4] + (((uint32_t)k[5])<<16);
261 mix(a,b,c);
262 length -= 12;
263 k += 6;
264 }
265
266 /*----------------------------- handle the last (probably partial) block */
267 k8 = (const uint8_t *)k;
268 switch(length)
269 {
270 case 12: c+=k[4]+(((uint32_t)k[5])<<16);
271 b+=k[2]+(((uint32_t)k[3])<<16);
272 a+=k[0]+(((uint32_t)k[1])<<16);
273 break;
274 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
275 case 10: c+=k[4];
276 b+=k[2]+(((uint32_t)k[3])<<16);
277 a+=k[0]+(((uint32_t)k[1])<<16);
278 break;
279 case 9 : c+=k8[8]; /* fall through */
280 case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
281 a+=k[0]+(((uint32_t)k[1])<<16);
282 break;
283 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
284 case 6 : b+=k[2];
285 a+=k[0]+(((uint32_t)k[1])<<16);
286 break;
287 case 5 : b+=k8[4]; /* fall through */
288 case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
289 break;
290 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
291 case 2 : a+=k[0];
292 break;
293 case 1 : a+=k8[0];
294 break;
295 case 0 : return c; /* zero length requires no mixing */
296 }
297
298 } else { /* need to read the key one byte at a time */
299 const uint8_t *k = (const uint8_t *)key;
300
301 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
302 while (length > 12)
303 {
304 a += k[0];
305 a += ((uint32_t)k[1])<<8;
306 a += ((uint32_t)k[2])<<16;
307 a += ((uint32_t)k[3])<<24;
308 b += k[4];
309 b += ((uint32_t)k[5])<<8;
310 b += ((uint32_t)k[6])<<16;
311 b += ((uint32_t)k[7])<<24;
312 c += k[8];
313 c += ((uint32_t)k[9])<<8;
314 c += ((uint32_t)k[10])<<16;
315 c += ((uint32_t)k[11])<<24;
316 mix(a,b,c);
317 length -= 12;
318 k += 12;
319 }
320
321 /*-------------------------------- last block: affect all 32 bits of (c) */
322 switch(length) /* all the case statements fall through */
323 {
324 case 12: c+=((uint32_t)k[11])<<24;
325 case 11: c+=((uint32_t)k[10])<<16;
326 case 10: c+=((uint32_t)k[9])<<8;
327 case 9 : c+=k[8];
328 case 8 : b+=((uint32_t)k[7])<<24;
329 case 7 : b+=((uint32_t)k[6])<<16;
330 case 6 : b+=((uint32_t)k[5])<<8;
331 case 5 : b+=k[4];
332 case 4 : a+=((uint32_t)k[3])<<24;
333 case 3 : a+=((uint32_t)k[2])<<16;
334 case 2 : a+=((uint32_t)k[1])<<8;
335 case 1 : a+=k[0];
336 break;
337 case 0 : return c;
338 }
339 }
340
341 final(a,b,c);
342 *val2 = b;
343 return c;
344 }
345
346 /*
347 * hashbig():
348 * This is the same as hash_word() on big-endian machines. It is different
349 * from hashlittle() on all machines. hashbig() takes advantage of
350 * big-endian byte ordering.
351 */
hashbig(const void * key,size_t length,uint32_t * val2)352 static uint32_t hashbig( const void *key, size_t length, uint32_t *val2)
353 {
354 uint32_t a,b,c;
355 union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
356
357 /* Set up the internal state */
358 a = b = c = 0xdeadbeef + ((uint32_t)length) + *val2;
359
360 u.ptr = key;
361 if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
362 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
363 const uint8_t *k8;
364
365 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
366 while (length > 12)
367 {
368 a += k[0];
369 b += k[1];
370 c += k[2];
371 mix(a,b,c);
372 length -= 12;
373 k += 3;
374 }
375
376 /*----------------------------- handle the last (probably partial) block */
377 /*
378 * "k[2]<<8" actually reads beyond the end of the string, but
379 * then shifts out the part it's not allowed to read. Because the
380 * string is aligned, the illegal read is in the same word as the
381 * rest of the string. Every machine with memory protection I've seen
382 * does it on word boundaries, so is OK with this. But VALGRIND will
383 * still catch it and complain. The masking trick does make the hash
384 * noticably faster for short strings (like English words).
385 *
386 * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
387 */
388 #if 0
389 switch(length)
390 {
391 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
392 case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
393 case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
394 case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
395 case 8 : b+=k[1]; a+=k[0]; break;
396 case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
397 case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
398 case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
399 case 4 : a+=k[0]; break;
400 case 3 : a+=k[0]&0xffffff00; break;
401 case 2 : a+=k[0]&0xffff0000; break;
402 case 1 : a+=k[0]&0xff000000; break;
403 case 0 : return c; /* zero length strings require no mixing */
404 }
405
406 #else /* make valgrind happy */
407
408 k8 = (const uint8_t *)k;
409 switch(length) /* all the case statements fall through */
410 {
411 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
412 case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
413 case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
414 case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
415 case 8 : b+=k[1]; a+=k[0]; break;
416 case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
417 case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
418 case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
419 case 4 : a+=k[0]; break;
420 case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
421 case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
422 case 1 : a+=((uint32_t)k8[0])<<24; break;
423 case 0 : return c;
424 }
425
426 #endif /* !VALGRIND */
427
428 } else { /* need to read the key one byte at a time */
429 const uint8_t *k = (const uint8_t *)key;
430
431 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
432 while (length > 12)
433 {
434 a += ((uint32_t)k[0])<<24;
435 a += ((uint32_t)k[1])<<16;
436 a += ((uint32_t)k[2])<<8;
437 a += ((uint32_t)k[3]);
438 b += ((uint32_t)k[4])<<24;
439 b += ((uint32_t)k[5])<<16;
440 b += ((uint32_t)k[6])<<8;
441 b += ((uint32_t)k[7]);
442 c += ((uint32_t)k[8])<<24;
443 c += ((uint32_t)k[9])<<16;
444 c += ((uint32_t)k[10])<<8;
445 c += ((uint32_t)k[11]);
446 mix(a,b,c);
447 length -= 12;
448 k += 12;
449 }
450
451 /*-------------------------------- last block: affect all 32 bits of (c) */
452 switch(length) /* all the case statements fall through */
453 {
454 case 12: c+=k[11];
455 case 11: c+=((uint32_t)k[10])<<8;
456 case 10: c+=((uint32_t)k[9])<<16;
457 case 9 : c+=((uint32_t)k[8])<<24;
458 case 8 : b+=k[7];
459 case 7 : b+=((uint32_t)k[6])<<8;
460 case 6 : b+=((uint32_t)k[5])<<16;
461 case 5 : b+=((uint32_t)k[4])<<24;
462 case 4 : a+=k[3];
463 case 3 : a+=((uint32_t)k[2])<<8;
464 case 2 : a+=((uint32_t)k[1])<<16;
465 case 1 : a+=((uint32_t)k[0])<<24;
466 break;
467 case 0 : return c;
468 }
469 }
470
471 final(a,b,c);
472 *val2 = b;
473 return c;
474 }
475
nl_hash_any(const void * key,size_t length,uint32_t base)476 uint32_t nl_hash_any(const void *key, size_t length, uint32_t base)
477 {
478 if (HASH_BIG_ENDIAN)
479 return hashbig(key, length, &base);
480 else
481 return hashlittle(key, length, &base);
482 }
483