/* Copyright (c) 2012, Broadcom Europe Ltd All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* ------------------------------------------------------------------------------- These are functions for producing 32-bit hashes for hash table lookup. khrn_hashword(), khrn_hashlittle(), hashlittle2(), hashbig(), mix(), and final() are externally useful functions. Routines to test the hash are included if SELF_TEST is defined. You can use this free for any purpose. It's in the public domain. It has no warranty. You probably want to use khrn_hashlittle(). khrn_hashlittle() and hashbig() hash byte arrays. khrn_hashlittle() is is faster than hashbig() on little-endian machines. Intel and AMD are little-endian machines. On second thought, you probably want hashlittle2(), which is identical to khrn_hashlittle() except it returns two 32-bit hashes for the price of one. You could implement hashbig2() if you wanted but I haven't bothered here. If you want to find a hash of, say, exactly 7 integers, do a = i1; b = i2; c = i3; mix(a,b,c); a += i4; b += i5; c += i6; mix(a,b,c); a += i7; final(a,b,c); then use c as the hash value. If you have a variable length array of 4-byte integers to hash, use khrn_hashword(). If you have a byte array (like a character string), use khrn_hashlittle(). If you have several byte arrays, or a mix of things, see the comments above khrn_hashlittle(). Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then mix those integers. This is fast (you can do a lot more thorough mixing with 12*3 instructions on 3 integers than you can with 3 instructions on 1 byte), but shoehorning those bytes into integers efficiently is messy. ------------------------------------------------------------------------------- */ #include "interface/khronos/common/khrn_int_common.h" #include "interface/khronos/common/khrn_int_hash.h" // get definitions of rot, mix and final # define HASH_LITTLE_ENDIAN 1 # define HASH_BIG_ENDIAN 0 #ifndef __arm__ // Use the version in khrn_int_hash_asm.s instead /* -------------------------------------------------------------------- This works on all machines. To be useful, it requires -- that the key be an array of uint32_t's, and -- that the length be the number of uint32_t's in the key The function khrn_hashword() is identical to khrn_hashlittle() on little-endian machines, and identical to hashbig() on big-endian machines, except that the length has to be measured in uint32_ts rather than in bytes. khrn_hashlittle() is more complicated than khrn_hashword() only because khrn_hashlittle() has to dance around fitting the key bytes into registers. -------------------------------------------------------------------- */ uint32_t khrn_hashword( const uint32_t *k, /* the key, an array of uint32_t values */ int length, /* the length of the key, in uint32_ts */ uint32_t initval) /* the previous hash, or an arbitrary value */ { uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval; /*------------------------------------------------- handle most of the key */ while (length > 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 3; k += 3; } /*------------------------------------------- handle the last 3 uint32_t's */ switch(length) /* all the case statements fall through */ { case 3 : c+=k[2]; case 2 : b+=k[1]; case 1 : a+=k[0]; final(a,b,c); case 0: /* case 0: nothing left to add */ break; } /*------------------------------------------------------ report the result */ return c; } #endif /* ------------------------------------------------------------------------------- khrn_hashlittle() -- hash a variable-length key into a 32-bit value k : the key (the unaligned variable-length array of bytes) length : the length of the key, counting by bytes initval : can be any 4-byte value Returns a 32-bit value. Every bit of the key affects every bit of the return value. Two keys differing by one or two bits will have totally different hash values. The best hash table sizes are powers of 2. There is no need to do mod a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. For example, if you need only 10 bits, do h = (h & hashmask(10)); In which case, the hash table should have hashsize(10) elements. If you are hashing n strings (uint8_t **)k, do it like this: for (i=0, h=0; i 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]&0xffffff" actually reads beyond the end of the string, but * then masks off the part it's not allowed to read. Because the * string is aligned, the masked-off tail is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND will * still catch it and complain. The masking trick does make the hash * noticeably faster for short strings (like English words). */ #ifndef VALGRIND switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff; a+=k[0]; break; case 6 : b+=k[1]&0xffff; a+=k[0]; break; case 5 : b+=k[1]&0xff; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff; break; case 2 : a+=k[0]&0xffff; break; case 1 : a+=k[0]&0xff; break; case 0 : return c; /* zero length strings require no mixing */ } #else /* make valgrind happy */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ case 1 : a+=k8[0]; break; case 0 : return c; } #endif /* !valgrind */ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ const uint8_t *k8; /*--------------- all but last block: aligned reads and different mixing */ while (length > 12) { a += k[0] + (((uint32_t)k[1])<<16); b += k[2] + (((uint32_t)k[3])<<16); c += k[4] + (((uint32_t)k[5])<<16); mix(a,b,c); length -= 12; k += 6; } /*----------------------------- handle the last (probably partial) block */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[4]+(((uint32_t)k[5])<<16); b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=k[4]; b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=k[0]; break; case 1 : a+=k8[0]; break; case 0 : return c; /* zero length requires no mixing */ } } else { /* need to read the key one byte at a time */ const uint8_t *k = (const uint8_t *)key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; a += ((uint32_t)k[1])<<8; a += ((uint32_t)k[2])<<16; a += ((uint32_t)k[3])<<24; b += k[4]; b += ((uint32_t)k[5])<<8; b += ((uint32_t)k[6])<<16; b += ((uint32_t)k[7])<<24; c += k[8]; c += ((uint32_t)k[9])<<8; c += ((uint32_t)k[10])<<16; c += ((uint32_t)k[11])<<24; mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { /* Comments to pacify Coverity */ case 12: c+=((uint32_t)k[11])<<24; /* fall through */ case 11: c+=((uint32_t)k[10])<<16; /* fall through */ case 10: c+=((uint32_t)k[9])<<8; /* fall through */ case 9 : c+=k[8]; /* fall through */ case 8 : b+=((uint32_t)k[7])<<24; /* fall through */ case 7 : b+=((uint32_t)k[6])<<16; /* fall through */ case 6 : b+=((uint32_t)k[5])<<8; /* fall through */ case 5 : b+=k[4]; /* fall through */ case 4 : a+=((uint32_t)k[3])<<24; /* fall through */ case 3 : a+=((uint32_t)k[2])<<16; /* fall through */ case 2 : a+=((uint32_t)k[1])<<8; /* fall through */ case 1 : a+=k[0]; break; case 0 : return c; } } final(a,b,c); return c; }