github.com/songzhibin97/gkit@v1.2.13/structure/skipmap/skipmap.go (about) 1 // Copyright 2021 ByteDance Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Package skipmap is a high-performance, scalable, concurrent-safe map based on skip-list. 16 // In the typical pattern(100000 operations, 90%LOAD 9%STORE 1%DELETE, 8C16T), the skipmap 17 // up to 10x faster than the built-in sync.Map. 18 package skipmap 19 20 import ( 21 "sync" 22 "sync/atomic" 23 "unsafe" 24 ) 25 26 // Int64Map represents a map based on skip list in ascending order. 27 type Int64Map struct { 28 header *int64Node 29 length int64 30 highestLevel int64 // highest level for now 31 } 32 33 type int64Node struct { 34 key int64 35 value unsafe.Pointer // *interface{} 36 next optionalArray // [level]*int64Node 37 mu sync.Mutex 38 flags bitflag 39 level uint32 40 } 41 42 func newInt64Node(key int64, value interface{}, level int) *int64Node { 43 node := &int64Node{ 44 key: key, 45 level: uint32(level), 46 } 47 node.storeVal(value) 48 if level > op1 { 49 node.next.extra = new([op2]unsafe.Pointer) 50 } 51 return node 52 } 53 54 func (n *int64Node) storeVal(value interface{}) { 55 atomic.StorePointer(&n.value, unsafe.Pointer(&value)) 56 } 57 58 func (n *int64Node) loadVal() interface{} { 59 return *(*interface{})(atomic.LoadPointer(&n.value)) 60 } 61 62 func (n *int64Node) loadNext(i int) *int64Node { 63 return (*int64Node)(n.next.load(i)) 64 } 65 66 func (n *int64Node) storeNext(i int, node *int64Node) { 67 n.next.store(i, unsafe.Pointer(node)) 68 } 69 70 func (n *int64Node) atomicLoadNext(i int) *int64Node { 71 return (*int64Node)(n.next.atomicLoad(i)) 72 } 73 74 func (n *int64Node) atomicStoreNext(i int, node *int64Node) { 75 n.next.atomicStore(i, unsafe.Pointer(node)) 76 } 77 78 func (n *int64Node) lessthan(key int64) bool { 79 return n.key < key 80 } 81 82 func (n *int64Node) equal(key int64) bool { 83 return n.key == key 84 } 85 86 // NewInt64 return an empty int64 skipmap. 87 func NewInt64() *Int64Map { 88 h := newInt64Node(0, "", maxLevel) 89 h.flags.SetTrue(fullyLinked) 90 return &Int64Map{ 91 header: h, 92 highestLevel: defaultHighestLevel, 93 } 94 } 95 96 // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. 97 // The returned preds and succs always satisfy preds[i] > key >= succs[i]. 98 // (without fullpath, if find the node will return immediately) 99 func (s *Int64Map) findNode(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) *int64Node { 100 x := s.header 101 for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { 102 succ := x.atomicLoadNext(i) 103 for succ != nil && succ.lessthan(key) { 104 x = succ 105 succ = x.atomicLoadNext(i) 106 } 107 preds[i] = x 108 succs[i] = succ 109 110 // Check if the key already in the skipmap. 111 if succ != nil && succ.equal(key) { 112 return succ 113 } 114 } 115 return nil 116 } 117 118 // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. 119 // The returned preds and succs always satisfy preds[i] > key >= succs[i]. 120 func (s *Int64Map) findNodeDelete(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) int { 121 // lFound represents the index of the first layer at which it found a node. 122 lFound, x := -1, s.header 123 for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { 124 succ := x.atomicLoadNext(i) 125 for succ != nil && succ.lessthan(key) { 126 x = succ 127 succ = x.atomicLoadNext(i) 128 } 129 preds[i] = x 130 succs[i] = succ 131 132 // Check if the key already in the skip list. 133 if lFound == -1 && succ != nil && succ.equal(key) { 134 lFound = i 135 } 136 } 137 return lFound 138 } 139 140 func unlockInt64(preds [maxLevel]*int64Node, highestLevel int) { 141 var prevPred *int64Node 142 for i := highestLevel; i >= 0; i-- { 143 if preds[i] != prevPred { // the node could be unlocked by previous loop 144 preds[i].mu.Unlock() 145 prevPred = preds[i] 146 } 147 } 148 } 149 150 // Store sets the value for a key. 151 func (s *Int64Map) Store(key int64, value interface{}) { 152 level := s.randomlevel() 153 var preds, succs [maxLevel]*int64Node 154 for { 155 nodeFound := s.findNode(key, &preds, &succs) 156 if nodeFound != nil { // indicating the key is already in the skip-list 157 if !nodeFound.flags.Get(marked) { 158 // We don't need to care about whether or not the node is fully linked, 159 // just replace the value. 160 nodeFound.storeVal(value) 161 return 162 } 163 // If the node is marked, represents some other goroutines is in the process of deleting this node, 164 // we need to add this node in next loop. 165 continue 166 } 167 168 // Add this node into skip list. 169 var ( 170 highestLocked = -1 // the highest level being locked by this process 171 valid = true 172 pred, succ, prevPred *int64Node 173 ) 174 for layer := 0; valid && layer < level; layer++ { 175 pred = preds[layer] // target node's previous node 176 succ = succs[layer] // target node's next node 177 if pred != prevPred { // the node in this layer could be locked by previous loop 178 pred.mu.Lock() 179 highestLocked = layer 180 prevPred = pred 181 } 182 // valid check if there is another node has inserted into the skip list in this layer during this process. 183 // It is valid if: 184 // 1. The previous node and next node both are not marked. 185 // 2. The previous node's next node is succ in this layer. 186 valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ 187 } 188 if !valid { 189 unlockInt64(preds, highestLocked) 190 continue 191 } 192 193 nn := newInt64Node(key, value, level) 194 for layer := 0; layer < level; layer++ { 195 nn.storeNext(layer, succs[layer]) 196 preds[layer].atomicStoreNext(layer, nn) 197 } 198 nn.flags.SetTrue(fullyLinked) 199 unlockInt64(preds, highestLocked) 200 atomic.AddInt64(&s.length, 1) 201 return 202 } 203 } 204 205 func (s *Int64Map) randomlevel() int { 206 // Generate random level. 207 level := randomLevel() 208 // Update highest level if possible. 209 for { 210 hl := atomic.LoadInt64(&s.highestLevel) 211 if int64(level) <= hl { 212 break 213 } 214 if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) { 215 break 216 } 217 } 218 return level 219 } 220 221 // Load returns the value stored in the map for a key, or nil if no 222 // value is present. 223 // The ok result indicates whether value was found in the map. 224 func (s *Int64Map) Load(key int64) (value interface{}, ok bool) { 225 x := s.header 226 for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- { 227 nex := x.atomicLoadNext(i) 228 for nex != nil && nex.lessthan(key) { 229 x = nex 230 nex = x.atomicLoadNext(i) 231 } 232 233 // Check if the key already in the skip list. 234 if nex != nil && nex.equal(key) { 235 if nex.flags.MGet(fullyLinked|marked, fullyLinked) { 236 return nex.loadVal(), true 237 } 238 return nil, false 239 } 240 } 241 return nil, false 242 } 243 244 // LoadAndDelete deletes the value for a key, returning the previous value if any. 245 // The loaded result reports whether the key was present. 246 // (Modified from Delete) 247 func (s *Int64Map) LoadAndDelete(key int64) (value interface{}, loaded bool) { 248 var ( 249 nodeToDelete *int64Node 250 isMarked bool // represents if this operation mark the node 251 topLayer = -1 252 preds, succs [maxLevel]*int64Node 253 ) 254 for { 255 lFound := s.findNodeDelete(key, &preds, &succs) 256 if isMarked || // this process mark this node or we can find this node in the skip list 257 lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { 258 if !isMarked { // we don't mark this node for now 259 nodeToDelete = succs[lFound] 260 topLayer = lFound 261 nodeToDelete.mu.Lock() 262 if nodeToDelete.flags.Get(marked) { 263 // The node is marked by another process, 264 // the physical deletion will be accomplished by another process. 265 nodeToDelete.mu.Unlock() 266 return nil, false 267 } 268 nodeToDelete.flags.SetTrue(marked) 269 isMarked = true 270 } 271 // Accomplish the physical deletion. 272 var ( 273 highestLocked = -1 // the highest level being locked by this process 274 valid = true 275 pred, succ, prevPred *int64Node 276 ) 277 for layer := 0; valid && (layer <= topLayer); layer++ { 278 pred, succ = preds[layer], succs[layer] 279 if pred != prevPred { // the node in this layer could be locked by previous loop 280 pred.mu.Lock() 281 highestLocked = layer 282 prevPred = pred 283 } 284 // valid check if there is another node has inserted into the skip list in this layer 285 // during this process, or the previous is deleted by another process. 286 // It is valid if: 287 // 1. the previous node exists. 288 // 2. no another node has inserted into the skip list in this layer. 289 valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ 290 } 291 if !valid { 292 unlockInt64(preds, highestLocked) 293 continue 294 } 295 for i := topLayer; i >= 0; i-- { 296 // Now we own the `nodeToDelete`, no other goroutine will modify it. 297 // So we don't need `nodeToDelete.loadNext` 298 preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) 299 } 300 nodeToDelete.mu.Unlock() 301 unlockInt64(preds, highestLocked) 302 atomic.AddInt64(&s.length, -1) 303 return nodeToDelete.loadVal(), true 304 } 305 return nil, false 306 } 307 } 308 309 // LoadOrStore returns the existing value for the key if present. 310 // Otherwise, it stores and returns the given value. 311 // The loaded result is true if the value was loaded, false if stored. 312 // (Modified from Store) 313 func (s *Int64Map) LoadOrStore(key int64, value interface{}) (actual interface{}, loaded bool) { 314 var ( 315 level int 316 preds, succs [maxLevel]*int64Node 317 hl = int(atomic.LoadInt64(&s.highestLevel)) 318 ) 319 for { 320 nodeFound := s.findNode(key, &preds, &succs) 321 if nodeFound != nil { // indicating the key is already in the skip-list 322 if !nodeFound.flags.Get(marked) { 323 // We don't need to care about whether or not the node is fully linked, 324 // just return the value. 325 return nodeFound.loadVal(), true 326 } 327 // If the node is marked, represents some other goroutines is in the process of deleting this node, 328 // we need to add this node in next loop. 329 continue 330 } 331 332 // Add this node into skip list. 333 var ( 334 highestLocked = -1 // the highest level being locked by this process 335 valid = true 336 pred, succ, prevPred *int64Node 337 ) 338 if level == 0 { 339 level = s.randomlevel() 340 if level > hl { 341 // If the highest level is updated, usually means that many goroutines 342 // are inserting items. Hopefully we can find a better path in next loop. 343 // TODO(zyh): consider filling the preds if s.header[level].next == nil, 344 // but this strategy's performance is almost the same as the existing method. 345 continue 346 } 347 } 348 for layer := 0; valid && layer < level; layer++ { 349 pred = preds[layer] // target node's previous node 350 succ = succs[layer] // target node's next node 351 if pred != prevPred { // the node in this layer could be locked by previous loop 352 pred.mu.Lock() 353 highestLocked = layer 354 prevPred = pred 355 } 356 // valid check if there is another node has inserted into the skip list in this layer during this process. 357 // It is valid if: 358 // 1. The previous node and next node both are not marked. 359 // 2. The previous node's next node is succ in this layer. 360 valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ 361 } 362 if !valid { 363 unlockInt64(preds, highestLocked) 364 continue 365 } 366 367 nn := newInt64Node(key, value, level) 368 for layer := 0; layer < level; layer++ { 369 nn.storeNext(layer, succs[layer]) 370 preds[layer].atomicStoreNext(layer, nn) 371 } 372 nn.flags.SetTrue(fullyLinked) 373 unlockInt64(preds, highestLocked) 374 atomic.AddInt64(&s.length, 1) 375 return value, false 376 } 377 } 378 379 // LoadOrStoreLazy returns the existing value for the key if present. 380 // Otherwise, it stores and returns the given value from f, f will only be called once. 381 // The loaded result is true if the value was loaded, false if stored. 382 // (Modified from LoadOrStore) 383 func (s *Int64Map) LoadOrStoreLazy(key int64, f func() interface{}) (actual interface{}, loaded bool) { 384 var ( 385 level int 386 preds, succs [maxLevel]*int64Node 387 hl = int(atomic.LoadInt64(&s.highestLevel)) 388 ) 389 for { 390 nodeFound := s.findNode(key, &preds, &succs) 391 if nodeFound != nil { // indicating the key is already in the skip-list 392 if !nodeFound.flags.Get(marked) { 393 // We don't need to care about whether or not the node is fully linked, 394 // just return the value. 395 return nodeFound.loadVal(), true 396 } 397 // If the node is marked, represents some other goroutines is in the process of deleting this node, 398 // we need to add this node in next loop. 399 continue 400 } 401 402 // Add this node into skip list. 403 var ( 404 highestLocked = -1 // the highest level being locked by this process 405 valid = true 406 pred, succ, prevPred *int64Node 407 ) 408 if level == 0 { 409 level = s.randomlevel() 410 if level > hl { 411 // If the highest level is updated, usually means that many goroutines 412 // are inserting items. Hopefully we can find a better path in next loop. 413 // TODO(zyh): consider filling the preds if s.header[level].next == nil, 414 // but this strategy's performance is almost the same as the existing method. 415 continue 416 } 417 } 418 for layer := 0; valid && layer < level; layer++ { 419 pred = preds[layer] // target node's previous node 420 succ = succs[layer] // target node's next node 421 if pred != prevPred { // the node in this layer could be locked by previous loop 422 pred.mu.Lock() 423 highestLocked = layer 424 prevPred = pred 425 } 426 // valid check if there is another node has inserted into the skip list in this layer during this process. 427 // It is valid if: 428 // 1. The previous node and next node both are not marked. 429 // 2. The previous node's next node is succ in this layer. 430 valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) 431 } 432 if !valid { 433 unlockInt64(preds, highestLocked) 434 continue 435 } 436 value := f() 437 nn := newInt64Node(key, value, level) 438 for layer := 0; layer < level; layer++ { 439 nn.storeNext(layer, succs[layer]) 440 preds[layer].atomicStoreNext(layer, nn) 441 } 442 nn.flags.SetTrue(fullyLinked) 443 unlockInt64(preds, highestLocked) 444 atomic.AddInt64(&s.length, 1) 445 return value, false 446 } 447 } 448 449 // Delete deletes the value for a key. 450 func (s *Int64Map) Delete(key int64) bool { 451 var ( 452 nodeToDelete *int64Node 453 isMarked bool // represents if this operation mark the node 454 topLayer = -1 455 preds, succs [maxLevel]*int64Node 456 ) 457 for { 458 lFound := s.findNodeDelete(key, &preds, &succs) 459 if isMarked || // this process mark this node or we can find this node in the skip list 460 lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { 461 if !isMarked { // we don't mark this node for now 462 nodeToDelete = succs[lFound] 463 topLayer = lFound 464 nodeToDelete.mu.Lock() 465 if nodeToDelete.flags.Get(marked) { 466 // The node is marked by another process, 467 // the physical deletion will be accomplished by another process. 468 nodeToDelete.mu.Unlock() 469 return false 470 } 471 nodeToDelete.flags.SetTrue(marked) 472 isMarked = true 473 } 474 // Accomplish the physical deletion. 475 var ( 476 highestLocked = -1 // the highest level being locked by this process 477 valid = true 478 pred, succ, prevPred *int64Node 479 ) 480 for layer := 0; valid && (layer <= topLayer); layer++ { 481 pred, succ = preds[layer], succs[layer] 482 if pred != prevPred { // the node in this layer could be locked by previous loop 483 pred.mu.Lock() 484 highestLocked = layer 485 prevPred = pred 486 } 487 // valid check if there is another node has inserted into the skip list in this layer 488 // during this process, or the previous is deleted by another process. 489 // It is valid if: 490 // 1. the previous node exists. 491 // 2. no another node has inserted into the skip list in this layer. 492 valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ 493 } 494 if !valid { 495 unlockInt64(preds, highestLocked) 496 continue 497 } 498 for i := topLayer; i >= 0; i-- { 499 // Now we own the `nodeToDelete`, no other goroutine will modify it. 500 // So we don't need `nodeToDelete.loadNext` 501 preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) 502 } 503 nodeToDelete.mu.Unlock() 504 unlockInt64(preds, highestLocked) 505 atomic.AddInt64(&s.length, -1) 506 return true 507 } 508 return false 509 } 510 } 511 512 // Range calls f sequentially for each key and value present in the skipmap. 513 // If f returns false, range stops the iteration. 514 // 515 // Range does not necessarily correspond to any consistent snapshot of the Map's 516 // contents: no key will be visited more than once, but if the value for any key 517 // is stored or deleted concurrently, Range may reflect any mapping for that key 518 // from any point during the Range call. 519 func (s *Int64Map) Range(f func(key int64, value interface{}) bool) { 520 x := s.header.atomicLoadNext(0) 521 for x != nil { 522 if !x.flags.MGet(fullyLinked|marked, fullyLinked) { 523 x = x.atomicLoadNext(0) 524 continue 525 } 526 if !f(x.key, x.loadVal()) { 527 break 528 } 529 x = x.atomicLoadNext(0) 530 } 531 } 532 533 // Len return the length of this skipmap. 534 func (s *Int64Map) Len() int { 535 return int(atomic.LoadInt64(&s.length)) 536 }