github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/hash32.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Hashing algorithm inspired by
     6  //   xxhash: https://code.google.com/p/xxhash/
     7  // cityhash: https://code.google.com/p/cityhash/
     8  
     9  // +build 386 arm mips mipsle
    10  
    11  package runtime
    12  
    13  import "unsafe"
    14  
    15  const (
    16  	// Constants for multiplication: four random odd 32-bit numbers.
    17  	m1 = 3168982561
    18  	m2 = 3339683297
    19  	m3 = 832293441
    20  	m4 = 2336365089
    21  )
    22  
    23  func memhashFallback(p unsafe.Pointer, seed, s uintptr) uintptr {
    24  	h := uint32(seed + s*hashkey[0])
    25  tail:
    26  	switch {
    27  	case s == 0:
    28  	case s < 4:
    29  		h ^= uint32(*(*byte)(p))
    30  		h ^= uint32(*(*byte)(add(p, s>>1))) << 8
    31  		h ^= uint32(*(*byte)(add(p, s-1))) << 16
    32  		h = rotl_15(h*m1) * m2
    33  	case s == 4:
    34  		h ^= readUnaligned32(p)
    35  		h = rotl_15(h*m1) * m2
    36  	case s <= 8:
    37  		h ^= readUnaligned32(p)
    38  		h = rotl_15(h*m1) * m2
    39  		h ^= readUnaligned32(add(p, s-4))
    40  		h = rotl_15(h*m1) * m2
    41  	case s <= 16:
    42  		h ^= readUnaligned32(p)
    43  		h = rotl_15(h*m1) * m2
    44  		h ^= readUnaligned32(add(p, 4))
    45  		h = rotl_15(h*m1) * m2
    46  		h ^= readUnaligned32(add(p, s-8))
    47  		h = rotl_15(h*m1) * m2
    48  		h ^= readUnaligned32(add(p, s-4))
    49  		h = rotl_15(h*m1) * m2
    50  	default:
    51  		v1 := h
    52  		v2 := uint32(seed * hashkey[1])
    53  		v3 := uint32(seed * hashkey[2])
    54  		v4 := uint32(seed * hashkey[3])
    55  		for s >= 16 {
    56  			v1 ^= readUnaligned32(p)
    57  			v1 = rotl_15(v1*m1) * m2
    58  			p = add(p, 4)
    59  			v2 ^= readUnaligned32(p)
    60  			v2 = rotl_15(v2*m2) * m3
    61  			p = add(p, 4)
    62  			v3 ^= readUnaligned32(p)
    63  			v3 = rotl_15(v3*m3) * m4
    64  			p = add(p, 4)
    65  			v4 ^= readUnaligned32(p)
    66  			v4 = rotl_15(v4*m4) * m1
    67  			p = add(p, 4)
    68  			s -= 16
    69  		}
    70  		h = v1 ^ v2 ^ v3 ^ v4
    71  		goto tail
    72  	}
    73  	h ^= h >> 17
    74  	h *= m3
    75  	h ^= h >> 13
    76  	h *= m4
    77  	h ^= h >> 16
    78  	return uintptr(h)
    79  }
    80  
    81  func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr {
    82  	h := uint32(seed + 4*hashkey[0])
    83  	h ^= readUnaligned32(p)
    84  	h = rotl_15(h*m1) * m2
    85  	h ^= h >> 17
    86  	h *= m3
    87  	h ^= h >> 13
    88  	h *= m4
    89  	h ^= h >> 16
    90  	return uintptr(h)
    91  }
    92  
    93  func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr {
    94  	h := uint32(seed + 8*hashkey[0])
    95  	h ^= readUnaligned32(p)
    96  	h = rotl_15(h*m1) * m2
    97  	h ^= readUnaligned32(add(p, 4))
    98  	h = rotl_15(h*m1) * m2
    99  	h ^= h >> 17
   100  	h *= m3
   101  	h ^= h >> 13
   102  	h *= m4
   103  	h ^= h >> 16
   104  	return uintptr(h)
   105  }
   106  
   107  // Note: in order to get the compiler to issue rotl instructions, we
   108  // need to constant fold the shift amount by hand.
   109  // TODO: convince the compiler to issue rotl instructions after inlining.
   110  func rotl_15(x uint32) uint32 {
   111  	return (x << 15) | (x >> (32 - 15))
   112  }