v8.run/go/exp@v0.0.26-0.20230226010534-afcdbd3f782d/hashtable/htab_64.go (about) 1 package hashtable 2 3 import ( 4 "runtime" 5 "sync/atomic" 6 "unsafe" 7 ) 8 9 type kv[K comparable, V any] struct { 10 Hash uint64 11 Key K 12 Value V 13 } 14 15 type metadata struct { 16 LockBits byte 17 H2A [7]byte 18 } 19 20 func (m *metadata) Load() metadata { 21 metaU64PTR := (*uint64)(unsafe.Pointer(m)) 22 U64Val := atomic.LoadUint64(metaU64PTR) 23 return *(*metadata)(unsafe.Pointer(&U64Val)) 24 } 25 26 func (m *metadata) Store(val metadata) { 27 metaU64PTR := (*uint64)(unsafe.Pointer(m)) 28 newU64 := *(*uint64)(unsafe.Pointer(&val)) 29 atomic.StoreUint64(metaU64PTR, newU64) 30 } 31 32 func (m *metadata) CompareAndSwap(old metadata, new metadata) bool { 33 oldU64 := *(*uint64)(unsafe.Pointer(&old)) 34 newU64 := *(*uint64)(unsafe.Pointer(&new)) 35 metaU64PTR := (*uint64)(unsafe.Pointer(m)) 36 return atomic.CompareAndSwapUint64(metaU64PTR, oldU64, newU64) 37 } 38 39 func (m *metadata) VLock() { 40 for { 41 v := m.Load() 42 if v.LockBits == 1 { 43 runtime.Gosched() 44 continue 45 } 46 47 locked := v 48 locked.LockBits = 1 49 if m.CompareAndSwap(v, locked) { 50 return 51 } 52 } 53 } 54 55 func (m *metadata) VUnlock() { 56 for { 57 v := m.Load() 58 unlocked := v 59 unlocked.LockBits = 0 60 if m.CompareAndSwap(v, unlocked) { 61 return 62 } 63 } 64 } 65 66 type hblk struct { 67 Meta metadata 68 Data [7]unsafe.Pointer 69 } 70 71 type htab[K comparable, V any] struct { 72 blks []hblk 73 size uint64 74 version uint64 75 _ [8]uint64 76 count uint64 77 } 78 79 func h1(v uint64) uint64 { 80 return v >> 7 81 } 82 83 func h2(v uint64) byte { 84 return (byte(v) & 0b01111111) | 0b10000000 85 } 86 87 func qp(v uint64, i uint64) uint64 { 88 return v + (i*i+i)/2 89 } 90 91 const ( 92 empty = 0b00000000 93 deleted = 0b01111111 94 ) 95 96 func (h *htab[K, V]) lookup(hash uint64, key K) (val V, ok bool) { 97 hash1 := h1(hash) 98 hash2 := h2(hash) 99 100 blkIndex := hash1 & (h.size - 1) 101 for j := uint64(0); j < h.size; j++ { 102 meta := h.blks[blkIndex].Meta.Load() // Atomic MetaData 103 // meta := &h.blks[blkIndex].Meta 104 for i := range meta.H2A { 105 switch meta.H2A[i] { 106 case empty: 107 return 108 case hash2: 109 v := (*kv[K, V])(atomic.LoadPointer(&h.blks[blkIndex].Data[i])) 110 if v != nil { 111 if v.Hash == hash && v.Key == key { 112 val = v.Value 113 ok = true 114 return 115 } 116 } 117 } 118 } 119 blkIndex += (j*j + j) / 2 120 } 121 return 122 } 123 124 func (h *htab[K, V]) store(hash uint64, newkv *kv[K, V]) { 125 hash1 := h1(hash) 126 hash2 := h2(hash) 127 128 initBlk := hash1 & (h.size - 1) 129 blkIndex := initBlk 130 131 h.blks[initBlk].Meta.VLock() 132 //defer h.blks[initBlk].Meta.VUnlock() 133 L: 134 for j := uint64(0); j < h.size; j++ { 135 meta := h.blks[blkIndex].Meta.Load() // Atomic MetaData 136 for i := range meta.H2A { 137 switch meta.H2A[i] { 138 case empty: 139 break L 140 case hash2: 141 v := (*kv[K, V])(atomic.LoadPointer(&h.blks[blkIndex].Data[i])) 142 if v != nil { 143 if v.Hash == hash && v.Key == newkv.Key { 144 atomic.StorePointer( 145 &h.blks[blkIndex].Data[i], 146 unsafe.Pointer(newkv), 147 ) 148 h.blks[initBlk].Meta.VUnlock() 149 return 150 } 151 } 152 } 153 } 154 blkIndex += (j*j + j) / 2 155 } 156 157 for j := uint64(0); j < h.size; j++ { 158 meta := h.blks[blkIndex].Meta.Load() // Atomic MetaData 159 for i := range meta.H2A { 160 switch meta.H2A[i] { 161 case empty, deleted: 162 retry: 163 desired := meta 164 desired.H2A[i] = hash2 165 166 if h.blks[blkIndex].Meta.CompareAndSwap(meta, desired) { 167 atomic.StorePointer( 168 &h.blks[blkIndex].Data[i], 169 unsafe.Pointer(newkv), 170 ) 171 h.blks[initBlk].Meta.VUnlock() 172 173 if meta.H2A[i] == empty { 174 atomic.AddUint64(&h.count, 1) 175 } 176 return 177 } 178 179 newmeta := h.blks[blkIndex].Meta.Load() 180 if newmeta.H2A[i] == hash2 { 181 // H2 Collision 182 break 183 } 184 185 if newmeta.H2A[i] == meta.H2A[i] { 186 // Retry 187 meta = newmeta 188 goto retry 189 } 190 case hash2: 191 // H2 Collision 192 } 193 } 194 blkIndex += (j*j + j) / 2 195 } 196 h.blks[initBlk].Meta.VUnlock() 197 return 198 } 199 200 func (h *htab[K, V]) storeIfNotExists(hash uint64, newkv *kv[K, V]) (stored bool) { 201 hash1 := h1(hash) 202 hash2 := h2(hash) 203 204 initBlk := hash1 & (h.size - 1) 205 blkIndex := initBlk 206 207 h.blks[initBlk].Meta.VLock() 208 //defer h.blks[initBlk].Meta.VUnlock() 209 L: 210 for j := uint64(0); j < h.size; j++ { 211 meta := h.blks[blkIndex].Meta.Load() // Atomic MetaData 212 for i := range meta.H2A { 213 switch meta.H2A[i] { 214 case empty: 215 break L 216 case hash2: 217 v := (*kv[K, V])(atomic.LoadPointer(&h.blks[blkIndex].Data[i])) 218 if v != nil { 219 if v.Hash == hash && v.Key == newkv.Key { 220 // Key Exists 221 222 h.blks[initBlk].Meta.VUnlock() 223 return 224 } 225 } 226 } 227 } 228 blkIndex += (j*j + j) / 2 229 } 230 231 for j := uint64(0); j < h.size; j++ { 232 meta := h.blks[blkIndex].Meta.Load() // Atomic MetaData 233 for i := range meta.H2A { 234 switch meta.H2A[i] { 235 case empty, deleted: 236 retry: 237 desired := meta 238 desired.H2A[i] = hash2 239 240 if h.blks[blkIndex].Meta.CompareAndSwap(meta, desired) { 241 atomic.StorePointer( 242 &h.blks[blkIndex].Data[i], 243 unsafe.Pointer(newkv), 244 ) 245 h.blks[initBlk].Meta.VUnlock() 246 247 if meta.H2A[i] == empty { 248 atomic.AddUint64(&h.count, 1) 249 } 250 251 stored = true 252 return 253 } 254 255 newmeta := h.blks[blkIndex].Meta.Load() 256 if newmeta.H2A[i] == hash2 { 257 // H2 Collision 258 break 259 } 260 261 if newmeta.H2A[i] == meta.H2A[i] { 262 // Retry 263 meta = newmeta 264 goto retry 265 } 266 case hash2: 267 // H2 Collision 268 } 269 } 270 blkIndex += (j*j + j) / 2 271 } 272 h.blks[initBlk].Meta.VUnlock() 273 return 274 } 275 276 func (h *htab[K, V]) delete(hash uint64, key K) { 277 hash1 := h1(hash) 278 hash2 := h2(hash) 279 280 initBlk := hash1 & (h.size - 1) 281 blkIndex := initBlk 282 283 h.blks[initBlk].Meta.VLock() 284 285 for j := uint64(0); j < h.size; j++ { 286 meta := h.blks[blkIndex].Meta.Load() // Atomic MetaData 287 for i := range meta.H2A { 288 switch meta.H2A[i] { 289 case empty: 290 h.blks[initBlk].Meta.VUnlock() 291 return 292 case hash2: 293 v := (*kv[K, V])(atomic.LoadPointer(&h.blks[blkIndex].Data[i])) 294 if v != nil { 295 if v.Hash == hash && v.Key == key { 296 atomic.StorePointer( 297 &h.blks[blkIndex].Data[i], 298 nil, 299 ) 300 301 for { 302 desired := meta 303 desired.H2A[i] = deleted 304 if h.blks[blkIndex].Meta.CompareAndSwap(meta, desired) { 305 break 306 } 307 meta = h.blks[blkIndex].Meta.Load() 308 } 309 310 h.blks[initBlk].Meta.VUnlock() 311 return 312 } 313 } 314 } 315 } 316 blkIndex += (j*j + j) / 2 317 } 318 319 h.blks[initBlk].Meta.VUnlock() 320 return 321 } 322 323 func (h *htab[K, V]) copyto(n *htab[K, V]) { 324 for j := range h.blks { 325 meta := h.blks[j].Meta.Load() 326 ML: 327 for i := range meta.H2A { 328 switch meta.H2A[i] { 329 case empty: 330 break ML 331 case deleted: 332 // skip 333 default: 334 v := (*kv[K, V])(atomic.LoadPointer(&h.blks[j].Data[i])) 335 if v != nil { 336 n.store(v.Hash, v) 337 } 338 } 339 } 340 } 341 } 342 343 func newHtab[K comparable, V any](size uint64) *htab[K, V] { 344 ht := &htab[K, V]{ 345 blks: make([]hblk, size), 346 size: size, 347 count: 0, 348 } 349 return ht 350 }