nfs-ganesha 1.4

murmur3.c

Go to the documentation of this file.
00001 //-----------------------------------------------------------------------------
00002 // MurmurHash3 was written by Austin Appleby, and is placed in the public
00003 // domain. The author hereby disclaims copyright to this source code.
00004 
00005 // Note - The x86 and x64 versions do _not_ produce the same results, as the
00006 // algorithms are optimized for their respective platforms. You can still
00007 // compile and run any of them on any platform, but your performance with the
00008 // non-native version will be less than optimal.
00009 
00010 #ifdef HAVE_CONFIG_H
00011 #include "config.h"
00012 #endif
00013 
00014 #include "murmur3.h"
00015 
00016 //-----------------------------------------------------------------------------
00017 // Platform-specific functions and macros
00018 
00019 #define FORCE_INLINE static inline __attribute__((always_inline))
00020 
00021 inline uint32_t rotl32 ( uint32_t x, int8_t r )
00022 {
00023   return (x << r) | (x >> (32 - r));
00024 }
00025 
00026 inline uint64_t rotl64 ( uint64_t x, int8_t r )
00027 {
00028   return (x << r) | (x >> (64 - r));
00029 }
00030 
00031 #define ROTL32(x,y)     rotl32(x,y)
00032 #define ROTL64(x,y)     rotl64(x,y)
00033 
00034 #define BIG_CONSTANT(x) (x##LLU)
00035 
00036 //-----------------------------------------------------------------------------
00037 // Block read - if your platform needs to do endian-swapping or can only
00038 // handle aligned reads, do the conversion here
00039 
00040 #define getblock(p, i) (p[i])
00041 
00042 //-----------------------------------------------------------------------------
00043 // Finalization mix - force all bits of a hash block to avalanche
00044 
00045 FORCE_INLINE uint32_t fmix32 ( uint32_t h )
00046 {
00047   h ^= h >> 16;
00048   h *= 0x85ebca6b;
00049   h ^= h >> 13;
00050   h *= 0xc2b2ae35;
00051   h ^= h >> 16;
00052 
00053   return h;
00054 }
00055 
00056 //----------
00057 
00058 FORCE_INLINE uint64_t fmix64 ( uint64_t k )
00059 {
00060   k ^= k >> 33;
00061   k *= BIG_CONSTANT(0xff51afd7ed558ccd);
00062   k ^= k >> 33;
00063   k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
00064   k ^= k >> 33;
00065 
00066   return k;
00067 }
00068 
00069 //-----------------------------------------------------------------------------
00070 
00071 void MurmurHash3_x86_32 ( const void * key, int len,
00072                           uint32_t seed, void * out )
00073 {
00074   const uint8_t * data = (const uint8_t*)key;
00075   const int nblocks = len / 4;
00076   int i;
00077 
00078   uint32_t h1 = seed;
00079 
00080   uint32_t c1 = 0xcc9e2d51;
00081   uint32_t c2 = 0x1b873593;
00082 
00083   //----------
00084   // body
00085 
00086   const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
00087 
00088   for(i = -nblocks; i; i++)
00089   {
00090     uint32_t k1 = getblock(blocks,i);
00091 
00092     k1 *= c1;
00093     k1 = ROTL32(k1,15);
00094     k1 *= c2;
00095     
00096     h1 ^= k1;
00097     h1 = ROTL32(h1,13); 
00098     h1 = h1*5+0xe6546b64;
00099   }
00100 
00101   //----------
00102   // tail
00103 
00104   const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
00105 
00106   uint32_t k1 = 0;
00107 
00108   switch(len & 3)
00109   {
00110   case 3: k1 ^= tail[2] << 16;
00111   case 2: k1 ^= tail[1] << 8;
00112   case 1: k1 ^= tail[0];
00113           k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
00114   };
00115 
00116   //----------
00117   // finalization
00118 
00119   h1 ^= len;
00120 
00121   h1 = fmix32(h1);
00122 
00123   *(uint32_t*)out = h1;
00124 } 
00125 
00126 //-----------------------------------------------------------------------------
00127 
00128 void MurmurHash3_x86_128 ( const void * key, const int len,
00129                            uint32_t seed, void * out )
00130 {
00131   const uint8_t * data = (const uint8_t*)key;
00132   const int nblocks = len / 16;
00133   int i;
00134 
00135   uint32_t h1 = seed;
00136   uint32_t h2 = seed;
00137   uint32_t h3 = seed;
00138   uint32_t h4 = seed;
00139 
00140   uint32_t c1 = 0x239b961b; 
00141   uint32_t c2 = 0xab0e9789;
00142   uint32_t c3 = 0x38b34ae5; 
00143   uint32_t c4 = 0xa1e38b93;
00144 
00145   //----------
00146   // body
00147 
00148   const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
00149 
00150   for(i = -nblocks; i; i++)
00151   {
00152     uint32_t k1 = getblock(blocks,i*4+0);
00153     uint32_t k2 = getblock(blocks,i*4+1);
00154     uint32_t k3 = getblock(blocks,i*4+2);
00155     uint32_t k4 = getblock(blocks,i*4+3);
00156 
00157     k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
00158 
00159     h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
00160 
00161     k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
00162 
00163     h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
00164 
00165     k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
00166 
00167     h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
00168 
00169     k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
00170 
00171     h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
00172   }
00173 
00174   //----------
00175   // tail
00176 
00177   const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
00178 
00179   uint32_t k1 = 0;
00180   uint32_t k2 = 0;
00181   uint32_t k3 = 0;
00182   uint32_t k4 = 0;
00183 
00184   switch(len & 15)
00185   {
00186   case 15: k4 ^= tail[14] << 16;
00187   case 14: k4 ^= tail[13] << 8;
00188   case 13: k4 ^= tail[12] << 0;
00189            k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
00190 
00191   case 12: k3 ^= tail[11] << 24;
00192   case 11: k3 ^= tail[10] << 16;
00193   case 10: k3 ^= tail[ 9] << 8;
00194   case  9: k3 ^= tail[ 8] << 0;
00195            k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
00196 
00197   case  8: k2 ^= tail[ 7] << 24;
00198   case  7: k2 ^= tail[ 6] << 16;
00199   case  6: k2 ^= tail[ 5] << 8;
00200   case  5: k2 ^= tail[ 4] << 0;
00201            k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
00202 
00203   case  4: k1 ^= tail[ 3] << 24;
00204   case  3: k1 ^= tail[ 2] << 16;
00205   case  2: k1 ^= tail[ 1] << 8;
00206   case  1: k1 ^= tail[ 0] << 0;
00207            k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
00208   };
00209 
00210   //----------
00211   // finalization
00212 
00213   h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
00214 
00215   h1 += h2; h1 += h3; h1 += h4;
00216   h2 += h1; h3 += h1; h4 += h1;
00217 
00218   h1 = fmix64(h1);
00219   h2 = fmix64(h2);
00220   h3 = fmix64(h3);
00221   h4 = fmix64(h4);
00222 
00223   h1 += h2; h1 += h3; h1 += h4;
00224   h2 += h1; h3 += h1; h4 += h1;
00225 
00226   ((uint32_t*)out)[0] = h1;
00227   ((uint32_t*)out)[1] = h2;
00228   ((uint32_t*)out)[2] = h3;
00229   ((uint32_t*)out)[3] = h4;
00230 }
00231 
00232 //-----------------------------------------------------------------------------
00233 
00234 void MurmurHash3_x64_128 ( const void * key, const int len,
00235                            const uint32_t seed, void * out )
00236 {
00237   const uint8_t * data = (const uint8_t*)key;
00238   const int nblocks = len / 16;
00239   int i;
00240 
00241   uint64_t h1 = seed;
00242   uint64_t h2 = seed;
00243 
00244   uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
00245   uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
00246 
00247   //----------
00248   // body
00249 
00250   const uint64_t * blocks = (const uint64_t *)(data);
00251 
00252   for(i = 0; i < nblocks; i++)
00253   {
00254     uint64_t k1 = getblock(blocks,i*2+0);
00255     uint64_t k2 = getblock(blocks,i*2+1);
00256 
00257     k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
00258 
00259     h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
00260 
00261     k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
00262 
00263     h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
00264   }
00265 
00266   //----------
00267   // tail
00268 
00269   const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
00270 
00271   uint64_t k1 = 0;
00272   uint64_t k2 = 0;
00273 
00274   switch(len & 15)
00275   {
00276   case 15: k2 ^= (uint64_t)(tail[14]) << 48;
00277   case 14: k2 ^= (uint64_t)(tail[13]) << 40;
00278   case 13: k2 ^= (uint64_t)(tail[12]) << 32;
00279   case 12: k2 ^= (uint64_t)(tail[11]) << 24;
00280   case 11: k2 ^= (uint64_t)(tail[10]) << 16;
00281   case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
00282   case  9: k2 ^= (uint64_t)(tail[ 8]) << 0;
00283            k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
00284 
00285   case  8: k1 ^= (uint64_t)(tail[ 7]) << 56;
00286   case  7: k1 ^= (uint64_t)(tail[ 6]) << 48;
00287   case  6: k1 ^= (uint64_t)(tail[ 5]) << 40;
00288   case  5: k1 ^= (uint64_t)(tail[ 4]) << 32;
00289   case  4: k1 ^= (uint64_t)(tail[ 3]) << 24;
00290   case  3: k1 ^= (uint64_t)(tail[ 2]) << 16;
00291   case  2: k1 ^= (uint64_t)(tail[ 1]) << 8;
00292   case  1: k1 ^= (uint64_t)(tail[ 0]) << 0;
00293            k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
00294   };
00295 
00296   //----------
00297   // finalization
00298 
00299   h1 ^= len; h2 ^= len;
00300 
00301   h1 += h2;
00302   h2 += h1;
00303 
00304   h1 = fmix64(h1);
00305   h2 = fmix64(h2);
00306 
00307   h1 += h2;
00308   h2 += h1;
00309 
00310   ((uint64_t*)out)[0] = h1;
00311   ((uint64_t*)out)[1] = h2;
00312 }
00313 
00314 //-----------------------------------------------------------------------------
00315