Home | History | Annotate | Download | only in internal
      1 /*
      2  * The following hash function is based on MurmurHash3, placed into the public
      3  * domain by Austin Appleby.  See https://github.com/aappleby/smhasher for
      4  * details.
      5  */
      6 /******************************************************************************/
      7 #ifdef JEMALLOC_H_TYPES
      8 
      9 #endif /* JEMALLOC_H_TYPES */
     10 /******************************************************************************/
     11 #ifdef JEMALLOC_H_STRUCTS
     12 
     13 #endif /* JEMALLOC_H_STRUCTS */
     14 /******************************************************************************/
     15 #ifdef JEMALLOC_H_EXTERNS
     16 
     17 #endif /* JEMALLOC_H_EXTERNS */
     18 /******************************************************************************/
     19 #ifdef JEMALLOC_H_INLINES
     20 
     21 #ifndef JEMALLOC_ENABLE_INLINE
     22 uint32_t	hash_x86_32(const void *key, int len, uint32_t seed);
     23 void	hash_x86_128(const void *key, const int len, uint32_t seed,
     24     uint64_t r_out[2]);
     25 void	hash_x64_128(const void *key, const int len, const uint32_t seed,
     26     uint64_t r_out[2]);
     27 void	hash(const void *key, size_t len, const uint32_t seed,
     28     size_t r_hash[2]);
     29 #endif
     30 
     31 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
     32 /******************************************************************************/
     33 /* Internal implementation. */
     34 JEMALLOC_INLINE uint32_t
     35 hash_rotl_32(uint32_t x, int8_t r)
     36 {
     37 
     38 	return ((x << r) | (x >> (32 - r)));
     39 }
     40 
     41 JEMALLOC_INLINE uint64_t
     42 hash_rotl_64(uint64_t x, int8_t r)
     43 {
     44 
     45 	return ((x << r) | (x >> (64 - r)));
     46 }
     47 
     48 JEMALLOC_INLINE uint32_t
     49 hash_get_block_32(const uint32_t *p, int i)
     50 {
     51 
     52 	/* Handle unaligned read. */
     53 	if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
     54 		uint32_t ret;
     55 
     56 		/* ANDROID change */
     57 		memcpy(&ret, (uint8_t*)(p+i), sizeof(uint32_t));
     58 		/* End ANDROID change */
     59 		return (ret);
     60 	}
     61 
     62 	return (p[i]);
     63 }
     64 
     65 JEMALLOC_INLINE uint64_t
     66 hash_get_block_64(const uint64_t *p, int i)
     67 {
     68 
     69 	/* Handle unaligned read. */
     70 	if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
     71 		uint64_t ret;
     72 
     73 		/* ANDROID change */
     74 		memcpy(&ret, (uint8_t*)(p+i), sizeof(uint64_t));
     75 		/* End ANDROID change */
     76 		return (ret);
     77 	}
     78 
     79 	return (p[i]);
     80 }
     81 
     82 JEMALLOC_INLINE uint32_t
     83 hash_fmix_32(uint32_t h)
     84 {
     85 
     86 	h ^= h >> 16;
     87 	h *= 0x85ebca6b;
     88 	h ^= h >> 13;
     89 	h *= 0xc2b2ae35;
     90 	h ^= h >> 16;
     91 
     92 	return (h);
     93 }
     94 
     95 JEMALLOC_INLINE uint64_t
     96 hash_fmix_64(uint64_t k)
     97 {
     98 
     99 	k ^= k >> 33;
    100 	k *= KQU(0xff51afd7ed558ccd);
    101 	k ^= k >> 33;
    102 	k *= KQU(0xc4ceb9fe1a85ec53);
    103 	k ^= k >> 33;
    104 
    105 	return (k);
    106 }
    107 
    108 JEMALLOC_INLINE uint32_t
    109 hash_x86_32(const void *key, int len, uint32_t seed)
    110 {
    111 	const uint8_t *data = (const uint8_t *) key;
    112 	const int nblocks = len / 4;
    113 
    114 	uint32_t h1 = seed;
    115 
    116 	const uint32_t c1 = 0xcc9e2d51;
    117 	const uint32_t c2 = 0x1b873593;
    118 
    119 	/* body */
    120 	{
    121 		const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
    122 		int i;
    123 
    124 		for (i = -nblocks; i; i++) {
    125 			uint32_t k1 = hash_get_block_32(blocks, i);
    126 
    127 			k1 *= c1;
    128 			k1 = hash_rotl_32(k1, 15);
    129 			k1 *= c2;
    130 
    131 			h1 ^= k1;
    132 			h1 = hash_rotl_32(h1, 13);
    133 			h1 = h1*5 + 0xe6546b64;
    134 		}
    135 	}
    136 
    137 	/* tail */
    138 	{
    139 		const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
    140 
    141 		uint32_t k1 = 0;
    142 
    143 		switch (len & 3) {
    144 		case 3: k1 ^= tail[2] << 16;
    145 		case 2: k1 ^= tail[1] << 8;
    146 		case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
    147 			k1 *= c2; h1 ^= k1;
    148 		}
    149 	}
    150 
    151 	/* finalization */
    152 	h1 ^= len;
    153 
    154 	h1 = hash_fmix_32(h1);
    155 
    156 	return (h1);
    157 }
    158 
    159 UNUSED JEMALLOC_INLINE void
    160 hash_x86_128(const void *key, const int len, uint32_t seed,
    161     uint64_t r_out[2])
    162 {
    163 	const uint8_t * data = (const uint8_t *) key;
    164 	const int nblocks = len / 16;
    165 
    166 	uint32_t h1 = seed;
    167 	uint32_t h2 = seed;
    168 	uint32_t h3 = seed;
    169 	uint32_t h4 = seed;
    170 
    171 	const uint32_t c1 = 0x239b961b;
    172 	const uint32_t c2 = 0xab0e9789;
    173 	const uint32_t c3 = 0x38b34ae5;
    174 	const uint32_t c4 = 0xa1e38b93;
    175 
    176 	/* body */
    177 	{
    178 		const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
    179 		int i;
    180 
    181 		for (i = -nblocks; i; i++) {
    182 			uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
    183 			uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
    184 			uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
    185 			uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
    186 
    187 			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
    188 
    189 			h1 = hash_rotl_32(h1, 19); h1 += h2;
    190 			h1 = h1*5 + 0x561ccd1b;
    191 
    192 			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
    193 
    194 			h2 = hash_rotl_32(h2, 17); h2 += h3;
    195 			h2 = h2*5 + 0x0bcaa747;
    196 
    197 			k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
    198 
    199 			h3 = hash_rotl_32(h3, 15); h3 += h4;
    200 			h3 = h3*5 + 0x96cd1c35;
    201 
    202 			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
    203 
    204 			h4 = hash_rotl_32(h4, 13); h4 += h1;
    205 			h4 = h4*5 + 0x32ac3b17;
    206 		}
    207 	}
    208 
    209 	/* tail */
    210 	{
    211 		const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
    212 		uint32_t k1 = 0;
    213 		uint32_t k2 = 0;
    214 		uint32_t k3 = 0;
    215 		uint32_t k4 = 0;
    216 
    217 		switch (len & 15) {
    218 		case 15: k4 ^= tail[14] << 16;
    219 		case 14: k4 ^= tail[13] << 8;
    220 		case 13: k4 ^= tail[12] << 0;
    221 			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
    222 
    223 		case 12: k3 ^= tail[11] << 24;
    224 		case 11: k3 ^= tail[10] << 16;
    225 		case 10: k3 ^= tail[ 9] << 8;
    226 		case  9: k3 ^= tail[ 8] << 0;
    227 		     k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
    228 
    229 		case  8: k2 ^= tail[ 7] << 24;
    230 		case  7: k2 ^= tail[ 6] << 16;
    231 		case  6: k2 ^= tail[ 5] << 8;
    232 		case  5: k2 ^= tail[ 4] << 0;
    233 			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
    234 
    235 		case  4: k1 ^= tail[ 3] << 24;
    236 		case  3: k1 ^= tail[ 2] << 16;
    237 		case  2: k1 ^= tail[ 1] << 8;
    238 		case  1: k1 ^= tail[ 0] << 0;
    239 			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
    240 		}
    241 	}
    242 
    243 	/* finalization */
    244 	h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
    245 
    246 	h1 += h2; h1 += h3; h1 += h4;
    247 	h2 += h1; h3 += h1; h4 += h1;
    248 
    249 	h1 = hash_fmix_32(h1);
    250 	h2 = hash_fmix_32(h2);
    251 	h3 = hash_fmix_32(h3);
    252 	h4 = hash_fmix_32(h4);
    253 
    254 	h1 += h2; h1 += h3; h1 += h4;
    255 	h2 += h1; h3 += h1; h4 += h1;
    256 
    257 	r_out[0] = (((uint64_t) h2) << 32) | h1;
    258 	r_out[1] = (((uint64_t) h4) << 32) | h3;
    259 }
    260 
    261 UNUSED JEMALLOC_INLINE void
    262 hash_x64_128(const void *key, const int len, const uint32_t seed,
    263     uint64_t r_out[2])
    264 {
    265 	const uint8_t *data = (const uint8_t *) key;
    266 	const int nblocks = len / 16;
    267 
    268 	uint64_t h1 = seed;
    269 	uint64_t h2 = seed;
    270 
    271 	const uint64_t c1 = KQU(0x87c37b91114253d5);
    272 	const uint64_t c2 = KQU(0x4cf5ad432745937f);
    273 
    274 	/* body */
    275 	{
    276 		const uint64_t *blocks = (const uint64_t *) (data);
    277 		int i;
    278 
    279 		for (i = 0; i < nblocks; i++) {
    280 			uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
    281 			uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
    282 
    283 			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
    284 
    285 			h1 = hash_rotl_64(h1, 27); h1 += h2;
    286 			h1 = h1*5 + 0x52dce729;
    287 
    288 			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
    289 
    290 			h2 = hash_rotl_64(h2, 31); h2 += h1;
    291 			h2 = h2*5 + 0x38495ab5;
    292 		}
    293 	}
    294 
    295 	/* tail */
    296 	{
    297 		const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
    298 		uint64_t k1 = 0;
    299 		uint64_t k2 = 0;
    300 
    301 		switch (len & 15) {
    302 		case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
    303 		case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
    304 		case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
    305 		case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
    306 		case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
    307 		case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
    308 		case  9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
    309 			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
    310 
    311 		case  8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
    312 		case  7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
    313 		case  6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
    314 		case  5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
    315 		case  4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
    316 		case  3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
    317 		case  2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
    318 		case  1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
    319 			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
    320 		}
    321 	}
    322 
    323 	/* finalization */
    324 	h1 ^= len; h2 ^= len;
    325 
    326 	h1 += h2;
    327 	h2 += h1;
    328 
    329 	h1 = hash_fmix_64(h1);
    330 	h2 = hash_fmix_64(h2);
    331 
    332 	h1 += h2;
    333 	h2 += h1;
    334 
    335 	r_out[0] = h1;
    336 	r_out[1] = h2;
    337 }
    338 
    339 /******************************************************************************/
    340 /* API. */
    341 JEMALLOC_INLINE void
    342 hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
    343 {
    344 
    345 	assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
    346 
    347 #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
    348 	hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
    349 #else
    350 	{
    351 		uint64_t hashes[2];
    352 		hash_x86_128(key, (int)len, seed, hashes);
    353 		r_hash[0] = (size_t)hashes[0];
    354 		r_hash[1] = (size_t)hashes[1];
    355 	}
    356 #endif
    357 }
    358 #endif
    359 
    360 #endif /* JEMALLOC_H_INLINES */
    361 /******************************************************************************/
    362