github.com/arr-ai/hash@v0.8.0/hash32.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Hashing algorithm inspired by
     6  //   xxhash: https://code.google.com/p/xxhash/
     7  // cityhash: https://code.google.com/p/cityhash/
     8  
     9  // +build 386 arm mips mipsle
    10  
    11  package hash
    12  
    13  import (
    14  	"runtime"
    15  	"unsafe"
    16  )
    17  
    18  const (
    19  	// Constants for multiplication: four random odd 32-bit numbers.
    20  	m1 = 3168982561
    21  	m2 = 3339683297
    22  	m3 = 832293441
    23  	m4 = 2336365089
    24  )
    25  
    26  func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
    27  	if runtime.GOARCH == "386" && runtime.GOOS != "nacl" && useAeshash {
    28  		return aeshash(p, seed, s)
    29  	}
    30  	h := uint32(seed + s*hashkey[0])
    31  tail:
    32  	switch {
    33  	case s == 0:
    34  	case s < 4:
    35  		h ^= uint32(*(*byte)(p))
    36  		h ^= uint32(*(*byte)(add(p, s>>1))) << 8
    37  		h ^= uint32(*(*byte)(add(p, s-1))) << 16
    38  		h = rotl_15(h*m1) * m2
    39  	case s == 4:
    40  		h ^= readUnaligned32(p)
    41  		h = rotl_15(h*m1) * m2
    42  		p = add(p, 2)
    43  	case s <= 8:
    44  		h ^= readUnaligned32(p)
    45  		h = rotl_15(h*m1) * m2
    46  		h ^= readUnaligned32(add(p, s-4))
    47  		h = rotl_15(h*m1) * m2
    48  		p = add(p, 4)
    49  		s -= 4
    50  		goto tail
    51  	case s <= 16:
    52  		h ^= readUnaligned32(p)
    53  		h = rotl_15(h*m1) * m2
    54  		h ^= readUnaligned32(add(p, 4))
    55  		h = rotl_15(h*m1) * m2
    56  		h ^= readUnaligned32(add(p, s-8))
    57  		h = rotl_15(h*m1) * m2
    58  		h ^= readUnaligned32(add(p, s-4))
    59  		h = rotl_15(h*m1) * m2
    60  		p = add(p, 8)
    61  		s -= 8
    62  		goto tail
    63  	default:
    64  		v1 := h
    65  		v2 := uint32(seed * hashkey[1])
    66  		v3 := uint32(seed * hashkey[2])
    67  		v4 := uint32(seed * hashkey[3])
    68  		for s >= 16 {
    69  			v1 ^= readUnaligned32(p)
    70  			v1 = rotl_15(v1*m1) * m2
    71  			p = add(p, 4)
    72  			v2 ^= readUnaligned32(p)
    73  			v2 = rotl_15(v2*m2) * m3
    74  			p = add(p, 4)
    75  			v3 ^= readUnaligned32(p)
    76  			v3 = rotl_15(v3*m3) * m4
    77  			p = add(p, 4)
    78  			v4 ^= readUnaligned32(p)
    79  			v4 = rotl_15(v4*m4) * m1
    80  			p = add(p, 4)
    81  			s -= 16
    82  		}
    83  		h = v1 ^ v2 ^ v3 ^ v4
    84  		goto tail
    85  	}
    86  	h ^= h >> 17
    87  	h *= m3
    88  	h ^= h >> 13
    89  	h *= m4
    90  	h ^= h >> 16
    91  	return uintptr(h)
    92  }
    93  
    94  func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
    95  	h := uint32(seed + 4*hashkey[0])
    96  	h ^= readUnaligned32(p)
    97  	h = rotl_15(h*m1) * m2
    98  	h ^= h >> 17
    99  	h *= m3
   100  	h ^= h >> 13
   101  	h *= m4
   102  	h ^= h >> 16
   103  	return uintptr(h)
   104  }
   105  
   106  func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
   107  	h := uint32(seed + 8*hashkey[0])
   108  	h ^= readUnaligned32(p)
   109  	h = rotl_15(h*m1) * m2
   110  	h ^= readUnaligned32(add(p, 4))
   111  	h = rotl_15(h*m1) * m2
   112  	h ^= h >> 17
   113  	h *= m3
   114  	h ^= h >> 13
   115  	h *= m4
   116  	h ^= h >> 16
   117  	return uintptr(h)
   118  }
   119  
   120  // Note: in order to get the compiler to issue rotl instructions, we
   121  // need to constant fold the shift amount by hand.
   122  // TODO: convince the compiler to issue rotl instructions after inlining.
   123  func rotl_15(x uint32) uint32 {
   124  	return (x << 15) | (x >> (32 - 15))
   125  }