github.com/arr-ai/hash@v0.8.0/hash64.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Hashing algorithm inspired by
     6  //   xxhash: https://code.google.com/p/xxhash/
     7  // cityhash: https://code.google.com/p/cityhash/
     8  
     9  // +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
    10  
    11  package hash
    12  
    13  import (
    14  	"runtime"
    15  	"unsafe"
    16  )
    17  
    18  const (
    19  	// Constants for multiplication: four random odd 64-bit numbers.
    20  	m1 = 16877499708836156737
    21  	m2 = 2820277070424839065
    22  	m3 = 9497967016996688599
    23  	m4 = 15839092249703872147
    24  )
    25  
    26  //nolint:funlen
    27  func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
    28  	if (runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64") &&
    29  		runtime.GOOS != "nacl" && useAeshash {
    30  		return aeshash(p, seed, s)
    31  	}
    32  
    33  	h := uint64(seed + s*hashkey[0])
    34  tail:
    35  	switch {
    36  	case s == 0:
    37  	case s < 4:
    38  		h ^= uint64(*(*byte)(p))
    39  		h ^= uint64(*(*byte)(add(p, s>>1))) << 8
    40  		h ^= uint64(*(*byte)(add(p, s-1))) << 16
    41  		h = rotl31(h*m1) * m2
    42  	case s <= 8:
    43  		h ^= uint64(readUnaligned32(p))
    44  		h ^= uint64(readUnaligned32(add(p, s-4))) << 32
    45  		h = rotl31(h*m1) * m2
    46  	case s <= 16:
    47  		h ^= readUnaligned64(p)
    48  		h = rotl31(h*m1) * m2
    49  		h ^= readUnaligned64(add(p, s-8))
    50  		h = rotl31(h*m1) * m2
    51  		p = add(p, 8)
    52  		s -= 8
    53  		goto tail
    54  	case s <= 32:
    55  		h ^= readUnaligned64(p)
    56  		h = rotl31(h*m1) * m2
    57  		h ^= readUnaligned64(add(p, 8))
    58  		h = rotl31(h*m1) * m2
    59  		h ^= readUnaligned64(add(p, s-16))
    60  		h = rotl31(h*m1) * m2
    61  		h ^= readUnaligned64(add(p, s-8))
    62  		h = rotl31(h*m1) * m2
    63  		p = add(p, 16)
    64  		s -= 16
    65  		goto tail
    66  	default:
    67  		v1 := h
    68  		v2 := uint64(seed * hashkey[1])
    69  		v3 := uint64(seed * hashkey[2])
    70  		v4 := uint64(seed * hashkey[3])
    71  		for s >= 32 {
    72  			v1 ^= readUnaligned64(p)
    73  			v1 = rotl31(v1*m1) * m2
    74  			p = add(p, 8)
    75  			v2 ^= readUnaligned64(p)
    76  			v2 = rotl31(v2*m2) * m3
    77  			p = add(p, 8)
    78  			v3 ^= readUnaligned64(p)
    79  			v3 = rotl31(v3*m3) * m4
    80  			p = add(p, 8)
    81  			v4 ^= readUnaligned64(p)
    82  			v4 = rotl31(v4*m4) * m1
    83  			p = add(p, 8)
    84  			s -= 32
    85  		}
    86  		h = v1 ^ v2 ^ v3 ^ v4
    87  		goto tail
    88  	}
    89  
    90  	h ^= h >> 29
    91  	h *= m3
    92  	h ^= h >> 32
    93  	return uintptr(h)
    94  }
    95  
    96  func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
    97  	h := uint64(seed + 4*hashkey[0])
    98  	v := uint64(readUnaligned32(p))
    99  	h ^= v
   100  	h ^= v << 32
   101  	h = rotl31(h*m1) * m2
   102  	h ^= h >> 29
   103  	h *= m3
   104  	h ^= h >> 32
   105  	return uintptr(h)
   106  }
   107  
   108  func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
   109  	h := uint64(seed + 8*hashkey[0])
   110  	h ^= uint64(readUnaligned32(p)) | uint64(readUnaligned32(add(p, 4)))<<32
   111  	h = rotl31(h*m1) * m2
   112  	h ^= h >> 29
   113  	h *= m3
   114  	h ^= h >> 32
   115  	return uintptr(h)
   116  }
   117  
   118  // Note: in order to get the compiler to issue rotl instructions, we
   119  // need to constant fold the shift amount by hand.
   120  // TODO: convince the compiler to issue rotl instructions after inlining.
   121  func rotl31(x uint64) uint64 {
   122  	return (x << 31) | (x >> (64 - 31))
   123  }