github.com/hirochachacha/plua@v0.0.0-20170217012138-c82f520cc725/internal/tables/concurrent_map.go (about) 1 package tables 2 3 // Ported from java.util.concurrent.ConcurrentHashMap. 4 // Originally, written by Doug Lea with assistance from members of JCP JSR-166 5 // Expert Group and released to the public domain, as explained at 6 // http://creativecommons.org/licenses/publicdomain 7 8 import ( 9 "sync" 10 "sync/atomic" 11 "unsafe" 12 13 "github.com/hirochachacha/plua/internal/hash" 14 "github.com/hirochachacha/plua/object" 15 ) 16 17 const ( 18 retryCount = 2 19 20 threshold = 0.75 21 22 nSegmentBits = 4 23 nSegments = 16 // 2 ** nSegmentBits 24 segmentShift = 64 - nSegmentBits 25 26 minConcurrentMapSize = nSegments * 4 // 64 27 ) 28 29 type concurrentMap struct { 30 segments []*segment 31 h *hash.Hash 32 33 lastKey object.Value 34 nextIndex int 35 nextSegmentIndex int 36 nextBucket *sbucket 37 nextBuckets []unsafe.Pointer 38 } 39 40 func newConcurrentMap() *concurrentMap { 41 return newConcurrentMapSize(0) 42 } 43 44 func newConcurrentMapSize(size int) *concurrentMap { 45 var sizePerSegment int 46 if size < minConcurrentMapSize { 47 size = minConcurrentMapSize 48 sizePerSegment = size / nSegments 49 } else { 50 sizePerSegment = roundup(size / nSegments) 51 } 52 53 segments := make([]*segment, nSegments) 54 for i := range segments { 55 buckets := make([]unsafe.Pointer, sizePerSegment) 56 segments[i] = &segment{ 57 threshold: int32(float64(sizePerSegment) * threshold), 58 buckets: unsafe.Pointer(&buckets), 59 } 60 } 61 62 return &concurrentMap{ 63 segments: segments, 64 h: hash.New(), 65 } 66 } 67 68 func (m *concurrentMap) Cap() int { 69 sum := 0 70 71 for _, segment := range m.segments { 72 segment.m.Lock() 73 } 74 for _, segment := range m.segments { 75 sum += len(*(*[]unsafe.Pointer)(segment.buckets)) 76 } 77 for _, segment := range m.segments { 78 segment.m.Unlock() 79 } 80 81 return sum 82 } 83 84 func (m *concurrentMap) Len() int { 85 var sum int32 86 var check int32 87 var mcsum int32 88 mc := make([]int32, len(m.segments)) 89 90 // non blocking 91 L: 92 for i := 0; i < retryCount; i++ { 93 sum = 0 94 check = 0 95 mcsum = 0 96 97 for i, segment := range m.segments { 98 sum += atomic.LoadInt32(&segment.length) 99 mc[i] = segment.modCount 100 mcsum += mc[i] 101 } 102 103 if mcsum != 0 { 104 for i, segment := range m.segments { 105 check += atomic.LoadInt32(&segment.length) 106 if mc[i] != segment.modCount { 107 continue L 108 } 109 } 110 } 111 112 // success 113 if check == sum { 114 return int(sum) 115 } 116 } 117 118 // fallback to locking 119 sum = 0 120 for _, segment := range m.segments { 121 segment.m.Lock() 122 } 123 for _, segment := range m.segments { 124 sum += segment.length 125 } 126 for _, segment := range m.segments { 127 segment.m.Unlock() 128 } 129 130 return int(sum) 131 } 132 133 func (m *concurrentMap) Get(key object.Value) object.Value { 134 sum := m.sum(key) 135 segment := m.segmentFor(sum) 136 137 return segment.get(sum, key) 138 } 139 140 func (m *concurrentMap) Set(key, val object.Value) { 141 sum := m.sum(key) 142 segment := m.segmentFor(sum) 143 144 segment.set(sum, key, val) 145 } 146 147 func (m *concurrentMap) Delete(key object.Value) { 148 sum := m.sum(key) 149 segment := m.segmentFor(sum) 150 151 segment.delete(sum, key) 152 } 153 154 func (m *concurrentMap) Next(key object.Value) (nkey, nval object.Value, ok bool) { 155 if key == nil { 156 m.lastKey = nil 157 158 ok = true 159 160 m.nextIndex = -1 161 m.nextSegmentIndex = len(m.segments) - 1 162 m.nextBucket = nil 163 m.nextBuckets = nil 164 165 m.advance() 166 167 bucket := m.nextBucket 168 if bucket == nil { 169 return 170 } 171 172 nkey = bucket.key 173 nval = *(*object.Value)(atomic.LoadPointer(&bucket.val)) 174 175 m.lastKey = nkey 176 177 return 178 } 179 180 if key == m.lastKey { 181 m.lastKey = nil 182 183 ok = true 184 185 m.advance() 186 187 bucket := m.nextBucket 188 if bucket == nil { 189 return 190 } 191 192 nkey = bucket.key 193 nval = *(*object.Value)(atomic.LoadPointer(&bucket.val)) 194 195 m.lastKey = nkey 196 197 return 198 } 199 200 m.lastKey = nil 201 202 sum := m.sum(key) 203 sindex := sum >> segmentShift 204 segment := m.segments[sindex] 205 206 if atomic.LoadInt32(&segment.length) != 0 { 207 buckets := *(*[]unsafe.Pointer)(atomic.LoadPointer(&segment.buckets)) 208 index := sum & uint64(len(buckets)-1) 209 elem := (*sbucket)(atomic.LoadPointer(&buckets[index])) 210 for elem != nil { 211 if elem.sum == sum && object.Equal(elem.key, key) { 212 ok = true 213 214 m.nextIndex = int(index) - 1 215 m.nextSegmentIndex = int(sindex) - 1 216 m.nextBucket = elem 217 m.nextBuckets = buckets 218 219 m.advance() 220 221 bucket := m.nextBucket 222 if bucket == nil { 223 return 224 } 225 226 nkey = bucket.key 227 nval = *(*object.Value)(atomic.LoadPointer(&bucket.val)) 228 m.lastKey = nkey 229 230 return 231 } 232 233 elem = elem.next 234 } 235 236 return 237 } 238 239 return 240 } 241 242 func (m *concurrentMap) advance() { 243 if m.nextBucket != nil { 244 m.nextBucket = m.nextBucket.next 245 246 if m.nextBucket != nil { 247 return 248 } 249 } 250 for m.nextIndex >= 0 { 251 m.nextBucket = (*sbucket)(atomic.LoadPointer(&m.nextBuckets[m.nextIndex])) 252 253 m.nextIndex-- 254 255 if m.nextBucket != nil { 256 return 257 } 258 } 259 for m.nextSegmentIndex >= 0 { 260 segment := m.segments[m.nextSegmentIndex] 261 262 m.nextSegmentIndex-- 263 264 if atomic.LoadInt32(&segment.length) != 0 { 265 m.nextBuckets = *(*[]unsafe.Pointer)(atomic.LoadPointer(&segment.buckets)) 266 for j := len(m.nextBuckets) - 1; j >= 0; j-- { 267 m.nextBucket = (*sbucket)(atomic.LoadPointer(&m.nextBuckets[j])) 268 269 m.nextIndex = j - 1 270 271 if m.nextBucket != nil { 272 return 273 } 274 } 275 } 276 } 277 } 278 279 func (m *concurrentMap) segmentFor(sum uint64) *segment { 280 return m.segments[sum>>segmentShift] 281 } 282 283 func (m *concurrentMap) sum(key object.Value) uint64 { 284 return m.h.Sum(key) 285 } 286 287 type sbucket struct { 288 key object.Value 289 val unsafe.Pointer 290 sum uint64 291 next *sbucket 292 isActive bool 293 } 294 295 type segment struct { 296 length int32 297 buckets unsafe.Pointer 298 299 threshold int32 300 modCount int32 301 302 m sync.Mutex 303 } 304 305 func (s *segment) get(sum uint64, key object.Value) object.Value { 306 if atomic.LoadInt32(&s.length) != 0 { 307 buckets := *(*[]unsafe.Pointer)(atomic.LoadPointer(&s.buckets)) 308 index := sum & uint64(len(buckets)-1) 309 elem := (*sbucket)(atomic.LoadPointer(&buckets[index])) 310 for elem != nil { 311 if elem.sum == sum && object.Equal(elem.key, key) { 312 val := *(*object.Value)(atomic.LoadPointer(&elem.val)) 313 if val == nil { 314 s.m.Lock() 315 316 val = *(*object.Value)(elem.val) 317 318 s.m.Unlock() 319 } 320 321 return val 322 } 323 324 elem = elem.next 325 } 326 } 327 328 return nil 329 } 330 331 func (s *segment) set(sum uint64, key, val object.Value) { 332 s.m.Lock() 333 334 s.unsafeSet(sum, key, val) 335 336 s.m.Unlock() 337 } 338 339 func (s *segment) unsafeSet(sum uint64, key, val object.Value) { 340 if s.length > s.threshold { 341 s.grow() 342 } 343 344 buckets := *(*[]unsafe.Pointer)(s.buckets) 345 index := sum & uint64(len(buckets)-1) 346 first := (*sbucket)(buckets[index]) 347 elem := first 348 for elem != nil { 349 if elem.sum == sum && object.Equal(elem.key, key) { 350 atomic.StorePointer(&elem.val, unsafe.Pointer(&val)) 351 352 return 353 } 354 355 elem = elem.next 356 } 357 358 bucket := &sbucket{ 359 key: key, 360 val: unsafe.Pointer(&val), 361 sum: sum, 362 next: first, 363 } 364 365 s.modCount++ 366 367 atomic.StorePointer(&buckets[index], unsafe.Pointer(bucket)) 368 atomic.AddInt32(&s.length, 1) 369 } 370 371 func (s *segment) delete(sum uint64, key object.Value) { 372 s.m.Lock() 373 defer s.m.Unlock() 374 375 buckets := *(*[]unsafe.Pointer)(s.buckets) 376 index := sum & uint64(len(buckets)-1) 377 first := (*sbucket)(buckets[index]) 378 elem := first 379 for elem != nil { 380 if elem.sum == sum && object.Equal(elem.key, key) { 381 s.modCount++ 382 383 bucket := elem.next 384 for p := first; p != elem; p = p.next { 385 bucket = &sbucket{ 386 key: p.key, 387 val: p.val, 388 sum: p.sum, 389 next: bucket, 390 } 391 } 392 393 atomic.StorePointer(&buckets[index], unsafe.Pointer(bucket)) 394 395 atomic.AddInt32(&s.length, -1) 396 397 return 398 } 399 400 elem = elem.next 401 } 402 } 403 404 func (s *segment) grow() { 405 old := *(*[]unsafe.Pointer)(s.buckets) 406 407 length := len(old) * growRate 408 409 buckets := make([]unsafe.Pointer, length) 410 411 for _, p := range old { 412 elem := (*sbucket)(p) 413 if elem != nil { 414 next := elem.next 415 416 index := elem.sum & uint64(length-1) 417 418 if next == nil { 419 buckets[index] = unsafe.Pointer(elem) 420 } else { 421 lastElem := elem 422 lastIndex := index 423 424 for last := next; last != nil; last = last.next { 425 i := last.sum & uint64(length-1) 426 if i != lastIndex { 427 lastIndex = i 428 lastElem = last 429 } 430 } 431 432 buckets[lastIndex] = unsafe.Pointer(lastElem) 433 434 for p := elem; p != lastElem; p = p.next { 435 i := p.sum & uint64(length-1) 436 n := buckets[i] 437 buckets[i] = unsafe.Pointer(&sbucket{key: p.key, sum: p.sum, val: p.val, next: (*sbucket)(n)}) 438 } 439 } 440 } 441 } 442 443 s.threshold = int32(float64(length) * threshold) 444 445 atomic.StorePointer(&s.buckets, unsafe.Pointer(&buckets)) 446 }