github.com/primecitizens/pcz/std@v0.2.1/builtin/map/map.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright 2023 The Prime Citizens 3 // 4 // Copyright 2014 The Go Authors. All rights reserved. 5 // Use of this source code is governed by a BSD-style 6 // license that can be found in the LICENSE file. 7 8 package stdmap 9 10 import ( 11 "unsafe" 12 13 "github.com/primecitizens/pcz/std/core/abi" 14 "github.com/primecitizens/pcz/std/core/arch" 15 "github.com/primecitizens/pcz/std/core/assert" 16 "github.com/primecitizens/pcz/std/core/thread" 17 ) 18 19 func MakeSmall() *hmap { 20 h := new(hmap) 21 h.Hash0 = thread.G().G().Rand32() 22 return h 23 } 24 25 func Make(t *abi.MapType, hint int, h *hmap) *hmap { 26 assert.TODO() 27 return nil 28 // mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_) 29 // 30 // if overflow || mem > os.MaxAlloc { 31 // hint = 0 32 // } 33 // 34 // // initialize Hmap 35 // 36 // if h == nil { 37 // h = new(hmap) 38 // } 39 // 40 // h.Hash0 = rand.Fastrand() 41 // 42 // // Find the size parameter B which will hold the requested # of elements. 43 // // For hint < 0 overLoadFactor returns false since hint < bucketCnt. 44 // B := uint8(0) 45 // 46 // for overLoadFactor(hint, B) { 47 // B++ 48 // } 49 // 50 // h.B = B 51 // 52 // // allocate initial hash table 53 // // if B == 0, the buckets field is allocated lazily later (in mapassign) 54 // // If hint is large zeroing this memory could take a while. 55 // 56 // if h.B != 0 { 57 // var nextOverflow *bmap 58 // h.buckets, nextOverflow = makeBucketArray(t, h.B, nil) 59 // if nextOverflow != nil { 60 // h.Extra = new(mapextra) 61 // h.Extra.nextOverflow = nextOverflow 62 // } 63 // } 64 // 65 // return h 66 } 67 68 const ( 69 // Maximum number of key/elem pairs a bucket can hold. 70 bucketCntBits = 3 71 bucketCnt = 1 << bucketCntBits 72 73 // Maximum average load of a bucket that triggers growth is 6.5. 74 // Represent as loadFactorNum/loadFactorDen, to allow integer math. 75 loadFactorNum = 13 76 loadFactorDen = 2 77 78 // Maximum key or elem size to keep inline (instead of mallocing per element). 79 // Must fit in a uint8. 80 // Fast versions cannot handle big elems - the cutoff size for 81 // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem. 82 maxKeySize = 128 83 maxElemSize = 128 84 85 // data offset should be the size of the bmap struct, but needs to be 86 // aligned correctly. For amd64p32 this means 64-bit alignment 87 // even though pointers are 32 bit. 88 dataOffset = unsafe.Offsetof(struct { 89 b bmap 90 v int64 91 }{}.v) 92 93 // Possible tophash values. We reserve a few possibilities for special marks. 94 // Each bucket (including its overflow buckets, if any) will have either all or none of its 95 // entries in the evacuated* states (except during the evacuate() method, which only happens 96 // during map writes and thus no one else can observe the map during that time). 97 emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows. 98 emptyOne = 1 // this cell is empty 99 evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table. 100 evacuatedY = 3 // same as above, but evacuated to second half of larger table. 101 evacuatedEmpty = 4 // cell is empty, bucket is evacuated. 102 minTopHash = 5 // minimum tophash for a normal filled cell. 103 104 // flags 105 iterator = 1 // there may be an iterator using buckets 106 oldIterator = 2 // there may be an iterator using oldbuckets 107 hashWriting = 4 // a goroutine is writing to the map 108 sameSizeGrow = 8 // the current map growth is to a new map of the same size 109 110 // sentinel bucket ID for iterator checks 111 noCheck = 1<<(8*arch.PtrSize) - 1 112 ) 113 114 // 115 // // makeBucketArray initializes a backing array for map buckets. 116 // // 1<<b is the minimum number of buckets to allocate. 117 // // dirtyalloc should either be nil or a bucket array previously 118 // // allocated by makeBucketArray with the same t and b parameters. 119 // // If dirtyalloc is nil a new backing array will be alloced and 120 // // otherwise dirtyalloc will be cleared and reused as backing array. 121 // func makeBucketArray(t *abi.MapType, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) { 122 // base := bucketShift(b) 123 // nbuckets := base 124 // // For small b, overflow buckets are unlikely. 125 // // Avoid the overhead of the calculation. 126 // if b >= 4 { 127 // // Add on the estimated number of overflow buckets 128 // // required to insert the median number of elements 129 // // used with this value of b. 130 // nbuckets += bucketShift(b - 4) 131 // sz := t.Bucket.Size_ * nbuckets 132 // up := alloc.RoundupSize(sz) 133 // if up != sz { 134 // nbuckets = up / t.Bucket.Size_ 135 // } 136 // } 137 // 138 // if dirtyalloc == nil { 139 // buckets = newarray(t.Bucket, int(nbuckets)) 140 // } else { 141 // // dirtyalloc was previously generated by 142 // // the above newarray(t.bucket, int(nbuckets)) 143 // // but may not be empty. 144 // buckets = dirtyalloc 145 // size := t.Bucket.Size_ * nbuckets 146 // if t.Bucket.PtrBytes != 0 { 147 // mem.MemclrHasPointers(buckets, size) 148 // } else { 149 // mem.MemclrNoHeapPointers(buckets, size) 150 // } 151 // } 152 // 153 // if base != nbuckets { 154 // // We preallocated some overflow buckets. 155 // // To keep the overhead of tracking these overflow buckets to a minimum, 156 // // we use the convention that if a preallocated overflow bucket's overflow 157 // // pointer is nil, then there are more available by bumping the pointer. 158 // // We need a safe non-nil pointer for the last overflow bucket; just use buckets. 159 // nextOverflow = (*bmap)(unsafe.Add(buckets, base*uintptr(t.BucketSize))) 160 // last := (*bmap)(unsafe.Add(buckets, (nbuckets-1)*uintptr(t.BucketSize))) 161 // last.setoverflow(t, (*bmap)(buckets)) 162 // } 163 // return buckets, nextOverflow 164 // } 165 // 166 // // bucketShift returns 1<<b, optimized for code generation. 167 // func bucketShift(b uint8) uintptr { 168 // // Masking the shift amount allows overflow checks to be elided. 169 // return uintptr(1) << (b & (arch.PtrSize*8 - 1)) 170 // } 171 // 172 // // bucketMask returns 1<<b - 1, optimized for code generation. 173 // func bucketMask(b uint8) uintptr { 174 // return bucketShift(b) - 1 175 // } 176 // 177 // // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor. 178 // func overLoadFactor(count int, B uint8) bool { 179 // return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) 180 // } 181 // 182 // // newarray allocates an array of n elements of type typ. 183 // func newarray(typ *abi.Type, n int) unsafe.Pointer { 184 // if n == 1 { 185 // return alloc.MallocGC(typ.Size_, typ, true) 186 // } 187 // mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 188 // if overflow || mem > os.MaxAlloc || n < 0 { 189 // panic(cerr.String("runtime: allocation size out of range")) 190 // } 191 // 192 // return alloc.MallocGC(mem, typ, true) 193 // }