Home | History | Annotate | Download | only in runtime
      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Hashing algorithm inspired by
      6 //   xxhash: https://code.google.com/p/xxhash/
      7 // cityhash: https://code.google.com/p/cityhash/
      8 
      9 // +build amd64 amd64p32 arm64 ppc64 ppc64le
     10 
     11 package runtime
     12 
     13 import "unsafe"
     14 
     15 const (
     16 	// Constants for multiplication: four random odd 64-bit numbers.
     17 	m1 = 16877499708836156737
     18 	m2 = 2820277070424839065
     19 	m3 = 9497967016996688599
     20 	m4 = 15839092249703872147
     21 )
     22 
     23 func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
     24 	if GOARCH == "amd64" && GOOS != "nacl" && useAeshash {
     25 		return aeshash(p, seed, s)
     26 	}
     27 	h := uint64(seed + s*hashkey[0])
     28 tail:
     29 	switch {
     30 	case s == 0:
     31 	case s < 4:
     32 		h ^= uint64(*(*byte)(p))
     33 		h ^= uint64(*(*byte)(add(p, s>>1))) << 8
     34 		h ^= uint64(*(*byte)(add(p, s-1))) << 16
     35 		h = rotl_31(h*m1) * m2
     36 	case s <= 8:
     37 		h ^= uint64(readUnaligned32(p))
     38 		h ^= uint64(readUnaligned32(add(p, s-4))) << 32
     39 		h = rotl_31(h*m1) * m2
     40 	case s <= 16:
     41 		h ^= readUnaligned64(p)
     42 		h = rotl_31(h*m1) * m2
     43 		h ^= readUnaligned64(add(p, s-8))
     44 		h = rotl_31(h*m1) * m2
     45 	case s <= 32:
     46 		h ^= readUnaligned64(p)
     47 		h = rotl_31(h*m1) * m2
     48 		h ^= readUnaligned64(add(p, 8))
     49 		h = rotl_31(h*m1) * m2
     50 		h ^= readUnaligned64(add(p, s-16))
     51 		h = rotl_31(h*m1) * m2
     52 		h ^= readUnaligned64(add(p, s-8))
     53 		h = rotl_31(h*m1) * m2
     54 	default:
     55 		v1 := h
     56 		v2 := uint64(hashkey[1])
     57 		v3 := uint64(hashkey[2])
     58 		v4 := uint64(hashkey[3])
     59 		for s >= 32 {
     60 			v1 ^= readUnaligned64(p)
     61 			v1 = rotl_31(v1*m1) * m2
     62 			p = add(p, 8)
     63 			v2 ^= readUnaligned64(p)
     64 			v2 = rotl_31(v2*m2) * m3
     65 			p = add(p, 8)
     66 			v3 ^= readUnaligned64(p)
     67 			v3 = rotl_31(v3*m3) * m4
     68 			p = add(p, 8)
     69 			v4 ^= readUnaligned64(p)
     70 			v4 = rotl_31(v4*m4) * m1
     71 			p = add(p, 8)
     72 			s -= 32
     73 		}
     74 		h = v1 ^ v2 ^ v3 ^ v4
     75 		goto tail
     76 	}
     77 
     78 	h ^= h >> 29
     79 	h *= m3
     80 	h ^= h >> 32
     81 	return uintptr(h)
     82 }
     83 
     84 // Note: in order to get the compiler to issue rotl instructions, we
     85 // need to constant fold the shift amount by hand.
     86 // TODO: convince the compiler to issue rotl instructions after inlining.
     87 func rotl_31(x uint64) uint64 {
     88 	return (x << 31) | (x >> (64 - 31))
     89 }
     90