github.com/songzhibin97/gkit@v1.2.13/structure/skipmap/types.go (about)

     1  // Copyright 2021 ByteDance Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by go run types_gen.go; DO NOT EDIT.
    16  package skipmap
    17  
    18  import (
    19  	"sync"
    20  	"sync/atomic"
    21  	"unsafe"
    22  )
    23  
    24  // ByteMap represents a map based on skip list in ascending order.
    25  type ByteMap struct {
    26  	header       *byteNode
    27  	length       int64
    28  	highestLevel int64 // highest level for now
    29  }
    30  
    31  type byteNode struct {
    32  	key   byte
    33  	value unsafe.Pointer // *interface{}
    34  	next  optionalArray  // [level]*byteNode
    35  	mu    sync.Mutex
    36  	flags bitflag
    37  	level uint32
    38  }
    39  
    40  func newByteNode(key byte, value interface{}, level int) *byteNode {
    41  	node := &byteNode{
    42  		key:   key,
    43  		level: uint32(level),
    44  	}
    45  	node.storeVal(value)
    46  	if level > op1 {
    47  		node.next.extra = new([op2]unsafe.Pointer)
    48  	}
    49  	return node
    50  }
    51  
    52  func (n *byteNode) storeVal(value interface{}) {
    53  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
    54  }
    55  
    56  func (n *byteNode) loadVal() interface{} {
    57  	return *(*interface{})(atomic.LoadPointer(&n.value))
    58  }
    59  
    60  func (n *byteNode) loadNext(i int) *byteNode {
    61  	return (*byteNode)(n.next.load(i))
    62  }
    63  
    64  func (n *byteNode) storeNext(i int, node *byteNode) {
    65  	n.next.store(i, unsafe.Pointer(node))
    66  }
    67  
    68  func (n *byteNode) atomicLoadNext(i int) *byteNode {
    69  	return (*byteNode)(n.next.atomicLoad(i))
    70  }
    71  
    72  func (n *byteNode) atomicStoreNext(i int, node *byteNode) {
    73  	n.next.atomicStore(i, unsafe.Pointer(node))
    74  }
    75  
    76  func (n *byteNode) lessthan(key byte) bool {
    77  	return n.key < key
    78  }
    79  
    80  func (n *byteNode) equal(key byte) bool {
    81  	return n.key == key
    82  }
    83  
    84  // NewByte return an empty byte skipmap.
    85  func NewByte() *ByteMap {
    86  	h := newByteNode(0, "", maxLevel)
    87  	h.flags.SetTrue(fullyLinked)
    88  	return &ByteMap{
    89  		header:       h,
    90  		highestLevel: defaultHighestLevel,
    91  	}
    92  }
    93  
    94  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
    95  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
    96  // (without fullpath, if find the node will return immediately)
    97  func (s *ByteMap) findNode(key byte, preds *[maxLevel]*byteNode, succs *[maxLevel]*byteNode) *byteNode {
    98  	x := s.header
    99  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   100  		succ := x.atomicLoadNext(i)
   101  		for succ != nil && succ.lessthan(key) {
   102  			x = succ
   103  			succ = x.atomicLoadNext(i)
   104  		}
   105  		preds[i] = x
   106  		succs[i] = succ
   107  
   108  		// Check if the key already in the skipmap.
   109  		if succ != nil && succ.equal(key) {
   110  			return succ
   111  		}
   112  	}
   113  	return nil
   114  }
   115  
   116  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
   117  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
   118  func (s *ByteMap) findNodeDelete(key byte, preds *[maxLevel]*byteNode, succs *[maxLevel]*byteNode) int {
   119  	// lFound represents the index of the first layer at which it found a node.
   120  	lFound, x := -1, s.header
   121  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   122  		succ := x.atomicLoadNext(i)
   123  		for succ != nil && succ.lessthan(key) {
   124  			x = succ
   125  			succ = x.atomicLoadNext(i)
   126  		}
   127  		preds[i] = x
   128  		succs[i] = succ
   129  
   130  		// Check if the key already in the skip list.
   131  		if lFound == -1 && succ != nil && succ.equal(key) {
   132  			lFound = i
   133  		}
   134  	}
   135  	return lFound
   136  }
   137  
   138  func unlockByte(preds [maxLevel]*byteNode, highestLevel int) {
   139  	var prevPred *byteNode
   140  	for i := highestLevel; i >= 0; i-- {
   141  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   142  			preds[i].mu.Unlock()
   143  			prevPred = preds[i]
   144  		}
   145  	}
   146  }
   147  
   148  // Store sets the value for a key.
   149  func (s *ByteMap) Store(key byte, value interface{}) {
   150  	level := s.randomlevel()
   151  	var preds, succs [maxLevel]*byteNode
   152  	for {
   153  		nodeFound := s.findNode(key, &preds, &succs)
   154  		if nodeFound != nil { // indicating the key is already in the skip-list
   155  			if !nodeFound.flags.Get(marked) {
   156  				// We don't need to care about whether or not the node is fully linked,
   157  				// just replace the value.
   158  				nodeFound.storeVal(value)
   159  				return
   160  			}
   161  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
   162  			// we need to add this node in next loop.
   163  			continue
   164  		}
   165  
   166  		// Add this node into skip list.
   167  		var (
   168  			highestLocked        = -1 // the highest level being locked by this process
   169  			valid                = true
   170  			pred, succ, prevPred *byteNode
   171  		)
   172  		for layer := 0; valid && layer < level; layer++ {
   173  			pred = preds[layer]   // target node's previous node
   174  			succ = succs[layer]   // target node's next node
   175  			if pred != prevPred { // the node in this layer could be locked by previous loop
   176  				pred.mu.Lock()
   177  				highestLocked = layer
   178  				prevPred = pred
   179  			}
   180  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   181  			// It is valid if:
   182  			// 1. The previous node and next node both are not marked.
   183  			// 2. The previous node's next node is succ in this layer.
   184  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   185  		}
   186  		if !valid {
   187  			unlockByte(preds, highestLocked)
   188  			continue
   189  		}
   190  
   191  		nn := newByteNode(key, value, level)
   192  		for layer := 0; layer < level; layer++ {
   193  			nn.storeNext(layer, succs[layer])
   194  			preds[layer].atomicStoreNext(layer, nn)
   195  		}
   196  		nn.flags.SetTrue(fullyLinked)
   197  		unlockByte(preds, highestLocked)
   198  		atomic.AddInt64(&s.length, 1)
   199  		return
   200  	}
   201  }
   202  
   203  func (s *ByteMap) randomlevel() int {
   204  	// Generate random level.
   205  	level := randomLevel()
   206  	// Update highest level if possible.
   207  	for {
   208  		hl := atomic.LoadInt64(&s.highestLevel)
   209  		if int64(level) <= hl {
   210  			break
   211  		}
   212  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   213  			break
   214  		}
   215  	}
   216  	return level
   217  }
   218  
   219  // Load returns the value stored in the map for a key, or nil if no
   220  // value is present.
   221  // The ok result indicates whether value was found in the map.
   222  func (s *ByteMap) Load(key byte) (value interface{}, ok bool) {
   223  	x := s.header
   224  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   225  		nex := x.atomicLoadNext(i)
   226  		for nex != nil && nex.lessthan(key) {
   227  			x = nex
   228  			nex = x.atomicLoadNext(i)
   229  		}
   230  
   231  		// Check if the key already in the skip list.
   232  		if nex != nil && nex.equal(key) {
   233  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
   234  				return nex.loadVal(), true
   235  			}
   236  			return nil, false
   237  		}
   238  	}
   239  	return nil, false
   240  }
   241  
   242  // LoadAndDelete deletes the value for a key, returning the previous value if any.
   243  // The loaded result reports whether the key was present.
   244  // (Modified from Delete)
   245  func (s *ByteMap) LoadAndDelete(key byte) (value interface{}, loaded bool) {
   246  	var (
   247  		nodeToDelete *byteNode
   248  		isMarked     bool // represents if this operation mark the node
   249  		topLayer     = -1
   250  		preds, succs [maxLevel]*byteNode
   251  	)
   252  	for {
   253  		lFound := s.findNodeDelete(key, &preds, &succs)
   254  		if isMarked || // this process mark this node or we can find this node in the skip list
   255  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   256  			if !isMarked { // we don't mark this node for now
   257  				nodeToDelete = succs[lFound]
   258  				topLayer = lFound
   259  				nodeToDelete.mu.Lock()
   260  				if nodeToDelete.flags.Get(marked) {
   261  					// The node is marked by another process,
   262  					// the physical deletion will be accomplished by another process.
   263  					nodeToDelete.mu.Unlock()
   264  					return nil, false
   265  				}
   266  				nodeToDelete.flags.SetTrue(marked)
   267  				isMarked = true
   268  			}
   269  			// Accomplish the physical deletion.
   270  			var (
   271  				highestLocked        = -1 // the highest level being locked by this process
   272  				valid                = true
   273  				pred, succ, prevPred *byteNode
   274  			)
   275  			for layer := 0; valid && (layer <= topLayer); layer++ {
   276  				pred, succ = preds[layer], succs[layer]
   277  				if pred != prevPred { // the node in this layer could be locked by previous loop
   278  					pred.mu.Lock()
   279  					highestLocked = layer
   280  					prevPred = pred
   281  				}
   282  				// valid check if there is another node has inserted into the skip list in this layer
   283  				// during this process, or the previous is deleted by another process.
   284  				// It is valid if:
   285  				// 1. the previous node exists.
   286  				// 2. no another node has inserted into the skip list in this layer.
   287  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   288  			}
   289  			if !valid {
   290  				unlockByte(preds, highestLocked)
   291  				continue
   292  			}
   293  			for i := topLayer; i >= 0; i-- {
   294  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
   295  				// So we don't need `nodeToDelete.loadNext`
   296  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
   297  			}
   298  			nodeToDelete.mu.Unlock()
   299  			unlockByte(preds, highestLocked)
   300  			atomic.AddInt64(&s.length, -1)
   301  			return nodeToDelete.loadVal(), true
   302  		}
   303  		return nil, false
   304  	}
   305  }
   306  
   307  // LoadOrStore returns the existing value for the key if present.
   308  // Otherwise, it stores and returns the given value.
   309  // The loaded result is true if the value was loaded, false if stored.
   310  // (Modified from Store)
   311  func (s *ByteMap) LoadOrStore(key byte, value interface{}) (actual interface{}, loaded bool) {
   312  	var (
   313  		level        int
   314  		preds, succs [maxLevel]*byteNode
   315  		hl           = int(atomic.LoadInt64(&s.highestLevel))
   316  	)
   317  	for {
   318  		nodeFound := s.findNode(key, &preds, &succs)
   319  		if nodeFound != nil { // indicating the key is already in the skip-list
   320  			if !nodeFound.flags.Get(marked) {
   321  				// We don't need to care about whether or not the node is fully linked,
   322  				// just return the value.
   323  				return nodeFound.loadVal(), true
   324  			}
   325  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
   326  			// we need to add this node in next loop.
   327  			continue
   328  		}
   329  
   330  		// Add this node into skip list.
   331  		var (
   332  			highestLocked        = -1 // the highest level being locked by this process
   333  			valid                = true
   334  			pred, succ, prevPred *byteNode
   335  		)
   336  		if level == 0 {
   337  			level = s.randomlevel()
   338  			if level > hl {
   339  				// If the highest level is updated, usually means that many goroutines
   340  				// are inserting items. Hopefully we can find a better path in next loop.
   341  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
   342  				// but this strategy's performance is almost the same as the existing method.
   343  				continue
   344  			}
   345  		}
   346  		for layer := 0; valid && layer < level; layer++ {
   347  			pred = preds[layer]   // target node's previous node
   348  			succ = succs[layer]   // target node's next node
   349  			if pred != prevPred { // the node in this layer could be locked by previous loop
   350  				pred.mu.Lock()
   351  				highestLocked = layer
   352  				prevPred = pred
   353  			}
   354  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   355  			// It is valid if:
   356  			// 1. The previous node and next node both are not marked.
   357  			// 2. The previous node's next node is succ in this layer.
   358  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   359  		}
   360  		if !valid {
   361  			unlockByte(preds, highestLocked)
   362  			continue
   363  		}
   364  
   365  		nn := newByteNode(key, value, level)
   366  		for layer := 0; layer < level; layer++ {
   367  			nn.storeNext(layer, succs[layer])
   368  			preds[layer].atomicStoreNext(layer, nn)
   369  		}
   370  		nn.flags.SetTrue(fullyLinked)
   371  		unlockByte(preds, highestLocked)
   372  		atomic.AddInt64(&s.length, 1)
   373  		return value, false
   374  	}
   375  }
   376  
   377  // LoadOrStoreLazy returns the existing value for the key if present.
   378  // Otherwise, it stores and returns the given value from f, f will only be called once.
   379  // The loaded result is true if the value was loaded, false if stored.
   380  // (Modified from LoadOrStore)
   381  func (s *ByteMap) LoadOrStoreLazy(key byte, f func() interface{}) (actual interface{}, loaded bool) {
   382  	var (
   383  		level        int
   384  		preds, succs [maxLevel]*byteNode
   385  		hl           = int(atomic.LoadInt64(&s.highestLevel))
   386  	)
   387  	for {
   388  		nodeFound := s.findNode(key, &preds, &succs)
   389  		if nodeFound != nil { // indicating the key is already in the skip-list
   390  			if !nodeFound.flags.Get(marked) {
   391  				// We don't need to care about whether or not the node is fully linked,
   392  				// just return the value.
   393  				return nodeFound.loadVal(), true
   394  			}
   395  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
   396  			// we need to add this node in next loop.
   397  			continue
   398  		}
   399  
   400  		// Add this node into skip list.
   401  		var (
   402  			highestLocked        = -1 // the highest level being locked by this process
   403  			valid                = true
   404  			pred, succ, prevPred *byteNode
   405  		)
   406  		if level == 0 {
   407  			level = s.randomlevel()
   408  			if level > hl {
   409  				// If the highest level is updated, usually means that many goroutines
   410  				// are inserting items. Hopefully we can find a better path in next loop.
   411  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
   412  				// but this strategy's performance is almost the same as the existing method.
   413  				continue
   414  			}
   415  		}
   416  		for layer := 0; valid && layer < level; layer++ {
   417  			pred = preds[layer]   // target node's previous node
   418  			succ = succs[layer]   // target node's next node
   419  			if pred != prevPred { // the node in this layer could be locked by previous loop
   420  				pred.mu.Lock()
   421  				highestLocked = layer
   422  				prevPred = pred
   423  			}
   424  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   425  			// It is valid if:
   426  			// 1. The previous node and next node both are not marked.
   427  			// 2. The previous node's next node is succ in this layer.
   428  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
   429  		}
   430  		if !valid {
   431  			unlockByte(preds, highestLocked)
   432  			continue
   433  		}
   434  		value := f()
   435  		nn := newByteNode(key, value, level)
   436  		for layer := 0; layer < level; layer++ {
   437  			nn.storeNext(layer, succs[layer])
   438  			preds[layer].atomicStoreNext(layer, nn)
   439  		}
   440  		nn.flags.SetTrue(fullyLinked)
   441  		unlockByte(preds, highestLocked)
   442  		atomic.AddInt64(&s.length, 1)
   443  		return value, false
   444  	}
   445  }
   446  
   447  // Delete deletes the value for a key.
   448  func (s *ByteMap) Delete(key byte) bool {
   449  	var (
   450  		nodeToDelete *byteNode
   451  		isMarked     bool // represents if this operation mark the node
   452  		topLayer     = -1
   453  		preds, succs [maxLevel]*byteNode
   454  	)
   455  	for {
   456  		lFound := s.findNodeDelete(key, &preds, &succs)
   457  		if isMarked || // this process mark this node or we can find this node in the skip list
   458  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   459  			if !isMarked { // we don't mark this node for now
   460  				nodeToDelete = succs[lFound]
   461  				topLayer = lFound
   462  				nodeToDelete.mu.Lock()
   463  				if nodeToDelete.flags.Get(marked) {
   464  					// The node is marked by another process,
   465  					// the physical deletion will be accomplished by another process.
   466  					nodeToDelete.mu.Unlock()
   467  					return false
   468  				}
   469  				nodeToDelete.flags.SetTrue(marked)
   470  				isMarked = true
   471  			}
   472  			// Accomplish the physical deletion.
   473  			var (
   474  				highestLocked        = -1 // the highest level being locked by this process
   475  				valid                = true
   476  				pred, succ, prevPred *byteNode
   477  			)
   478  			for layer := 0; valid && (layer <= topLayer); layer++ {
   479  				pred, succ = preds[layer], succs[layer]
   480  				if pred != prevPred { // the node in this layer could be locked by previous loop
   481  					pred.mu.Lock()
   482  					highestLocked = layer
   483  					prevPred = pred
   484  				}
   485  				// valid check if there is another node has inserted into the skip list in this layer
   486  				// during this process, or the previous is deleted by another process.
   487  				// It is valid if:
   488  				// 1. the previous node exists.
   489  				// 2. no another node has inserted into the skip list in this layer.
   490  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
   491  			}
   492  			if !valid {
   493  				unlockByte(preds, highestLocked)
   494  				continue
   495  			}
   496  			for i := topLayer; i >= 0; i-- {
   497  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
   498  				// So we don't need `nodeToDelete.loadNext`
   499  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
   500  			}
   501  			nodeToDelete.mu.Unlock()
   502  			unlockByte(preds, highestLocked)
   503  			atomic.AddInt64(&s.length, -1)
   504  			return true
   505  		}
   506  		return false
   507  	}
   508  }
   509  
   510  // Range calls f sequentially for each key and value present in the skipmap.
   511  // If f returns false, range stops the iteration.
   512  //
   513  // Range does not necessarily correspond to any consistent snapshot of the Map's
   514  // contents: no key will be visited more than once, but if the value for any key
   515  // is stored or deleted concurrently, Range may reflect any mapping for that key
   516  // from any point during the Range call.
   517  func (s *ByteMap) Range(f func(key byte, value interface{}) bool) {
   518  	x := s.header.atomicLoadNext(0)
   519  	for x != nil {
   520  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
   521  			x = x.atomicLoadNext(0)
   522  			continue
   523  		}
   524  		if !f(x.key, x.loadVal()) {
   525  			break
   526  		}
   527  		x = x.atomicLoadNext(0)
   528  	}
   529  }
   530  
   531  // Len return the length of this skipmap.
   532  func (s *ByteMap) Len() int {
   533  	return int(atomic.LoadInt64(&s.length))
   534  }
   535  
   536  // ByteMapDesc represents a map based on skip list in descending order.
   537  type ByteMapDesc struct {
   538  	header       *byteNodeDesc
   539  	length       int64
   540  	highestLevel int64 // highest level for now
   541  }
   542  
   543  type byteNodeDesc struct {
   544  	key   byte
   545  	value unsafe.Pointer // *interface{}
   546  	next  optionalArray  // [level]*byteNodeDesc
   547  	mu    sync.Mutex
   548  	flags bitflag
   549  	level uint32
   550  }
   551  
   552  func newByteNodeDesc(key byte, value interface{}, level int) *byteNodeDesc {
   553  	node := &byteNodeDesc{
   554  		key:   key,
   555  		level: uint32(level),
   556  	}
   557  	node.storeVal(value)
   558  	if level > op1 {
   559  		node.next.extra = new([op2]unsafe.Pointer)
   560  	}
   561  	return node
   562  }
   563  
   564  func (n *byteNodeDesc) storeVal(value interface{}) {
   565  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
   566  }
   567  
   568  func (n *byteNodeDesc) loadVal() interface{} {
   569  	return *(*interface{})(atomic.LoadPointer(&n.value))
   570  }
   571  
   572  func (n *byteNodeDesc) loadNext(i int) *byteNodeDesc {
   573  	return (*byteNodeDesc)(n.next.load(i))
   574  }
   575  
   576  func (n *byteNodeDesc) storeNext(i int, node *byteNodeDesc) {
   577  	n.next.store(i, unsafe.Pointer(node))
   578  }
   579  
   580  func (n *byteNodeDesc) atomicLoadNext(i int) *byteNodeDesc {
   581  	return (*byteNodeDesc)(n.next.atomicLoad(i))
   582  }
   583  
   584  func (n *byteNodeDesc) atomicStoreNext(i int, node *byteNodeDesc) {
   585  	n.next.atomicStore(i, unsafe.Pointer(node))
   586  }
   587  
   588  func (n *byteNodeDesc) lessthan(key byte) bool {
   589  	return n.key > key
   590  }
   591  
   592  func (n *byteNodeDesc) equal(key byte) bool {
   593  	return n.key == key
   594  }
   595  
   596  // NewByteDesc return an empty byte skipmap.
   597  func NewByteDesc() *ByteMapDesc {
   598  	h := newByteNodeDesc(0, "", maxLevel)
   599  	h.flags.SetTrue(fullyLinked)
   600  	return &ByteMapDesc{
   601  		header:       h,
   602  		highestLevel: defaultHighestLevel,
   603  	}
   604  }
   605  
   606  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
   607  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
   608  // (without fullpath, if find the node will return immediately)
   609  func (s *ByteMapDesc) findNode(key byte, preds *[maxLevel]*byteNodeDesc, succs *[maxLevel]*byteNodeDesc) *byteNodeDesc {
   610  	x := s.header
   611  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   612  		succ := x.atomicLoadNext(i)
   613  		for succ != nil && succ.lessthan(key) {
   614  			x = succ
   615  			succ = x.atomicLoadNext(i)
   616  		}
   617  		preds[i] = x
   618  		succs[i] = succ
   619  
   620  		// Check if the key already in the skipmap.
   621  		if succ != nil && succ.equal(key) {
   622  			return succ
   623  		}
   624  	}
   625  	return nil
   626  }
   627  
   628  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
   629  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
   630  func (s *ByteMapDesc) findNodeDelete(key byte, preds *[maxLevel]*byteNodeDesc, succs *[maxLevel]*byteNodeDesc) int {
   631  	// lFound represents the index of the first layer at which it found a node.
   632  	lFound, x := -1, s.header
   633  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   634  		succ := x.atomicLoadNext(i)
   635  		for succ != nil && succ.lessthan(key) {
   636  			x = succ
   637  			succ = x.atomicLoadNext(i)
   638  		}
   639  		preds[i] = x
   640  		succs[i] = succ
   641  
   642  		// Check if the key already in the skip list.
   643  		if lFound == -1 && succ != nil && succ.equal(key) {
   644  			lFound = i
   645  		}
   646  	}
   647  	return lFound
   648  }
   649  
   650  func unlockByteDesc(preds [maxLevel]*byteNodeDesc, highestLevel int) {
   651  	var prevPred *byteNodeDesc
   652  	for i := highestLevel; i >= 0; i-- {
   653  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   654  			preds[i].mu.Unlock()
   655  			prevPred = preds[i]
   656  		}
   657  	}
   658  }
   659  
   660  // Store sets the value for a key.
   661  func (s *ByteMapDesc) Store(key byte, value interface{}) {
   662  	level := s.randomlevel()
   663  	var preds, succs [maxLevel]*byteNodeDesc
   664  	for {
   665  		nodeFound := s.findNode(key, &preds, &succs)
   666  		if nodeFound != nil { // indicating the key is already in the skip-list
   667  			if !nodeFound.flags.Get(marked) {
   668  				// We don't need to care about whether or not the node is fully linked,
   669  				// just replace the value.
   670  				nodeFound.storeVal(value)
   671  				return
   672  			}
   673  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
   674  			// we need to add this node in next loop.
   675  			continue
   676  		}
   677  
   678  		// Add this node into skip list.
   679  		var (
   680  			highestLocked        = -1 // the highest level being locked by this process
   681  			valid                = true
   682  			pred, succ, prevPred *byteNodeDesc
   683  		)
   684  		for layer := 0; valid && layer < level; layer++ {
   685  			pred = preds[layer]   // target node's previous node
   686  			succ = succs[layer]   // target node's next node
   687  			if pred != prevPred { // the node in this layer could be locked by previous loop
   688  				pred.mu.Lock()
   689  				highestLocked = layer
   690  				prevPred = pred
   691  			}
   692  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   693  			// It is valid if:
   694  			// 1. The previous node and next node both are not marked.
   695  			// 2. The previous node's next node is succ in this layer.
   696  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   697  		}
   698  		if !valid {
   699  			unlockByteDesc(preds, highestLocked)
   700  			continue
   701  		}
   702  
   703  		nn := newByteNodeDesc(key, value, level)
   704  		for layer := 0; layer < level; layer++ {
   705  			nn.storeNext(layer, succs[layer])
   706  			preds[layer].atomicStoreNext(layer, nn)
   707  		}
   708  		nn.flags.SetTrue(fullyLinked)
   709  		unlockByteDesc(preds, highestLocked)
   710  		atomic.AddInt64(&s.length, 1)
   711  		return
   712  	}
   713  }
   714  
   715  func (s *ByteMapDesc) randomlevel() int {
   716  	// Generate random level.
   717  	level := randomLevel()
   718  	// Update highest level if possible.
   719  	for {
   720  		hl := atomic.LoadInt64(&s.highestLevel)
   721  		if int64(level) <= hl {
   722  			break
   723  		}
   724  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   725  			break
   726  		}
   727  	}
   728  	return level
   729  }
   730  
   731  // Load returns the value stored in the map for a key, or nil if no
   732  // value is present.
   733  // The ok result indicates whether value was found in the map.
   734  func (s *ByteMapDesc) Load(key byte) (value interface{}, ok bool) {
   735  	x := s.header
   736  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   737  		nex := x.atomicLoadNext(i)
   738  		for nex != nil && nex.lessthan(key) {
   739  			x = nex
   740  			nex = x.atomicLoadNext(i)
   741  		}
   742  
   743  		// Check if the key already in the skip list.
   744  		if nex != nil && nex.equal(key) {
   745  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
   746  				return nex.loadVal(), true
   747  			}
   748  			return nil, false
   749  		}
   750  	}
   751  	return nil, false
   752  }
   753  
   754  // LoadAndDelete deletes the value for a key, returning the previous value if any.
   755  // The loaded result reports whether the key was present.
   756  // (Modified from Delete)
   757  func (s *ByteMapDesc) LoadAndDelete(key byte) (value interface{}, loaded bool) {
   758  	var (
   759  		nodeToDelete *byteNodeDesc
   760  		isMarked     bool // represents if this operation mark the node
   761  		topLayer     = -1
   762  		preds, succs [maxLevel]*byteNodeDesc
   763  	)
   764  	for {
   765  		lFound := s.findNodeDelete(key, &preds, &succs)
   766  		if isMarked || // this process mark this node or we can find this node in the skip list
   767  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   768  			if !isMarked { // we don't mark this node for now
   769  				nodeToDelete = succs[lFound]
   770  				topLayer = lFound
   771  				nodeToDelete.mu.Lock()
   772  				if nodeToDelete.flags.Get(marked) {
   773  					// The node is marked by another process,
   774  					// the physical deletion will be accomplished by another process.
   775  					nodeToDelete.mu.Unlock()
   776  					return nil, false
   777  				}
   778  				nodeToDelete.flags.SetTrue(marked)
   779  				isMarked = true
   780  			}
   781  			// Accomplish the physical deletion.
   782  			var (
   783  				highestLocked        = -1 // the highest level being locked by this process
   784  				valid                = true
   785  				pred, succ, prevPred *byteNodeDesc
   786  			)
   787  			for layer := 0; valid && (layer <= topLayer); layer++ {
   788  				pred, succ = preds[layer], succs[layer]
   789  				if pred != prevPred { // the node in this layer could be locked by previous loop
   790  					pred.mu.Lock()
   791  					highestLocked = layer
   792  					prevPred = pred
   793  				}
   794  				// valid check if there is another node has inserted into the skip list in this layer
   795  				// during this process, or the previous is deleted by another process.
   796  				// It is valid if:
   797  				// 1. the previous node exists.
   798  				// 2. no another node has inserted into the skip list in this layer.
   799  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   800  			}
   801  			if !valid {
   802  				unlockByteDesc(preds, highestLocked)
   803  				continue
   804  			}
   805  			for i := topLayer; i >= 0; i-- {
   806  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
   807  				// So we don't need `nodeToDelete.loadNext`
   808  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
   809  			}
   810  			nodeToDelete.mu.Unlock()
   811  			unlockByteDesc(preds, highestLocked)
   812  			atomic.AddInt64(&s.length, -1)
   813  			return nodeToDelete.loadVal(), true
   814  		}
   815  		return nil, false
   816  	}
   817  }
   818  
   819  // LoadOrStore returns the existing value for the key if present.
   820  // Otherwise, it stores and returns the given value.
   821  // The loaded result is true if the value was loaded, false if stored.
   822  // (Modified from Store)
   823  func (s *ByteMapDesc) LoadOrStore(key byte, value interface{}) (actual interface{}, loaded bool) {
   824  	var (
   825  		level        int
   826  		preds, succs [maxLevel]*byteNodeDesc
   827  		hl           = int(atomic.LoadInt64(&s.highestLevel))
   828  	)
   829  	for {
   830  		nodeFound := s.findNode(key, &preds, &succs)
   831  		if nodeFound != nil { // indicating the key is already in the skip-list
   832  			if !nodeFound.flags.Get(marked) {
   833  				// We don't need to care about whether or not the node is fully linked,
   834  				// just return the value.
   835  				return nodeFound.loadVal(), true
   836  			}
   837  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
   838  			// we need to add this node in next loop.
   839  			continue
   840  		}
   841  
   842  		// Add this node into skip list.
   843  		var (
   844  			highestLocked        = -1 // the highest level being locked by this process
   845  			valid                = true
   846  			pred, succ, prevPred *byteNodeDesc
   847  		)
   848  		if level == 0 {
   849  			level = s.randomlevel()
   850  			if level > hl {
   851  				// If the highest level is updated, usually means that many goroutines
   852  				// are inserting items. Hopefully we can find a better path in next loop.
   853  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
   854  				// but this strategy's performance is almost the same as the existing method.
   855  				continue
   856  			}
   857  		}
   858  		for layer := 0; valid && layer < level; layer++ {
   859  			pred = preds[layer]   // target node's previous node
   860  			succ = succs[layer]   // target node's next node
   861  			if pred != prevPred { // the node in this layer could be locked by previous loop
   862  				pred.mu.Lock()
   863  				highestLocked = layer
   864  				prevPred = pred
   865  			}
   866  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   867  			// It is valid if:
   868  			// 1. The previous node and next node both are not marked.
   869  			// 2. The previous node's next node is succ in this layer.
   870  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   871  		}
   872  		if !valid {
   873  			unlockByteDesc(preds, highestLocked)
   874  			continue
   875  		}
   876  
   877  		nn := newByteNodeDesc(key, value, level)
   878  		for layer := 0; layer < level; layer++ {
   879  			nn.storeNext(layer, succs[layer])
   880  			preds[layer].atomicStoreNext(layer, nn)
   881  		}
   882  		nn.flags.SetTrue(fullyLinked)
   883  		unlockByteDesc(preds, highestLocked)
   884  		atomic.AddInt64(&s.length, 1)
   885  		return value, false
   886  	}
   887  }
   888  
   889  // LoadOrStoreLazy returns the existing value for the key if present.
   890  // Otherwise, it stores and returns the given value from f, f will only be called once.
   891  // The loaded result is true if the value was loaded, false if stored.
   892  // (Modified from LoadOrStore)
   893  func (s *ByteMapDesc) LoadOrStoreLazy(key byte, f func() interface{}) (actual interface{}, loaded bool) {
   894  	var (
   895  		level        int
   896  		preds, succs [maxLevel]*byteNodeDesc
   897  		hl           = int(atomic.LoadInt64(&s.highestLevel))
   898  	)
   899  	for {
   900  		nodeFound := s.findNode(key, &preds, &succs)
   901  		if nodeFound != nil { // indicating the key is already in the skip-list
   902  			if !nodeFound.flags.Get(marked) {
   903  				// We don't need to care about whether or not the node is fully linked,
   904  				// just return the value.
   905  				return nodeFound.loadVal(), true
   906  			}
   907  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
   908  			// we need to add this node in next loop.
   909  			continue
   910  		}
   911  
   912  		// Add this node into skip list.
   913  		var (
   914  			highestLocked        = -1 // the highest level being locked by this process
   915  			valid                = true
   916  			pred, succ, prevPred *byteNodeDesc
   917  		)
   918  		if level == 0 {
   919  			level = s.randomlevel()
   920  			if level > hl {
   921  				// If the highest level is updated, usually means that many goroutines
   922  				// are inserting items. Hopefully we can find a better path in next loop.
   923  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
   924  				// but this strategy's performance is almost the same as the existing method.
   925  				continue
   926  			}
   927  		}
   928  		for layer := 0; valid && layer < level; layer++ {
   929  			pred = preds[layer]   // target node's previous node
   930  			succ = succs[layer]   // target node's next node
   931  			if pred != prevPred { // the node in this layer could be locked by previous loop
   932  				pred.mu.Lock()
   933  				highestLocked = layer
   934  				prevPred = pred
   935  			}
   936  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   937  			// It is valid if:
   938  			// 1. The previous node and next node both are not marked.
   939  			// 2. The previous node's next node is succ in this layer.
   940  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
   941  		}
   942  		if !valid {
   943  			unlockByteDesc(preds, highestLocked)
   944  			continue
   945  		}
   946  		value := f()
   947  		nn := newByteNodeDesc(key, value, level)
   948  		for layer := 0; layer < level; layer++ {
   949  			nn.storeNext(layer, succs[layer])
   950  			preds[layer].atomicStoreNext(layer, nn)
   951  		}
   952  		nn.flags.SetTrue(fullyLinked)
   953  		unlockByteDesc(preds, highestLocked)
   954  		atomic.AddInt64(&s.length, 1)
   955  		return value, false
   956  	}
   957  }
   958  
   959  // Delete deletes the value for a key.
   960  func (s *ByteMapDesc) Delete(key byte) bool {
   961  	var (
   962  		nodeToDelete *byteNodeDesc
   963  		isMarked     bool // represents if this operation mark the node
   964  		topLayer     = -1
   965  		preds, succs [maxLevel]*byteNodeDesc
   966  	)
   967  	for {
   968  		lFound := s.findNodeDelete(key, &preds, &succs)
   969  		if isMarked || // this process mark this node or we can find this node in the skip list
   970  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   971  			if !isMarked { // we don't mark this node for now
   972  				nodeToDelete = succs[lFound]
   973  				topLayer = lFound
   974  				nodeToDelete.mu.Lock()
   975  				if nodeToDelete.flags.Get(marked) {
   976  					// The node is marked by another process,
   977  					// the physical deletion will be accomplished by another process.
   978  					nodeToDelete.mu.Unlock()
   979  					return false
   980  				}
   981  				nodeToDelete.flags.SetTrue(marked)
   982  				isMarked = true
   983  			}
   984  			// Accomplish the physical deletion.
   985  			var (
   986  				highestLocked        = -1 // the highest level being locked by this process
   987  				valid                = true
   988  				pred, succ, prevPred *byteNodeDesc
   989  			)
   990  			for layer := 0; valid && (layer <= topLayer); layer++ {
   991  				pred, succ = preds[layer], succs[layer]
   992  				if pred != prevPred { // the node in this layer could be locked by previous loop
   993  					pred.mu.Lock()
   994  					highestLocked = layer
   995  					prevPred = pred
   996  				}
   997  				// valid check if there is another node has inserted into the skip list in this layer
   998  				// during this process, or the previous is deleted by another process.
   999  				// It is valid if:
  1000  				// 1. the previous node exists.
  1001  				// 2. no another node has inserted into the skip list in this layer.
  1002  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  1003  			}
  1004  			if !valid {
  1005  				unlockByteDesc(preds, highestLocked)
  1006  				continue
  1007  			}
  1008  			for i := topLayer; i >= 0; i-- {
  1009  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  1010  				// So we don't need `nodeToDelete.loadNext`
  1011  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  1012  			}
  1013  			nodeToDelete.mu.Unlock()
  1014  			unlockByteDesc(preds, highestLocked)
  1015  			atomic.AddInt64(&s.length, -1)
  1016  			return true
  1017  		}
  1018  		return false
  1019  	}
  1020  }
  1021  
  1022  // Range calls f sequentially for each key and value present in the skipmap.
  1023  // If f returns false, range stops the iteration.
  1024  //
  1025  // Range does not necessarily correspond to any consistent snapshot of the Map's
  1026  // contents: no key will be visited more than once, but if the value for any key
  1027  // is stored or deleted concurrently, Range may reflect any mapping for that key
  1028  // from any point during the Range call.
  1029  func (s *ByteMapDesc) Range(f func(key byte, value interface{}) bool) {
  1030  	x := s.header.atomicLoadNext(0)
  1031  	for x != nil {
  1032  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1033  			x = x.atomicLoadNext(0)
  1034  			continue
  1035  		}
  1036  		if !f(x.key, x.loadVal()) {
  1037  			break
  1038  		}
  1039  		x = x.atomicLoadNext(0)
  1040  	}
  1041  }
  1042  
  1043  // Len return the length of this skipmap.
  1044  func (s *ByteMapDesc) Len() int {
  1045  	return int(atomic.LoadInt64(&s.length))
  1046  }
  1047  
  1048  // Float32Map represents a map based on skip list in ascending order.
  1049  type Float32Map struct {
  1050  	header       *float32Node
  1051  	length       int64
  1052  	highestLevel int64 // highest level for now
  1053  }
  1054  
  1055  type float32Node struct {
  1056  	key   float32
  1057  	value unsafe.Pointer // *interface{}
  1058  	next  optionalArray  // [level]*float32Node
  1059  	mu    sync.Mutex
  1060  	flags bitflag
  1061  	level uint32
  1062  }
  1063  
  1064  func newFloat32Node(key float32, value interface{}, level int) *float32Node {
  1065  	node := &float32Node{
  1066  		key:   key,
  1067  		level: uint32(level),
  1068  	}
  1069  	node.storeVal(value)
  1070  	if level > op1 {
  1071  		node.next.extra = new([op2]unsafe.Pointer)
  1072  	}
  1073  	return node
  1074  }
  1075  
  1076  func (n *float32Node) storeVal(value interface{}) {
  1077  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  1078  }
  1079  
  1080  func (n *float32Node) loadVal() interface{} {
  1081  	return *(*interface{})(atomic.LoadPointer(&n.value))
  1082  }
  1083  
  1084  func (n *float32Node) loadNext(i int) *float32Node {
  1085  	return (*float32Node)(n.next.load(i))
  1086  }
  1087  
  1088  func (n *float32Node) storeNext(i int, node *float32Node) {
  1089  	n.next.store(i, unsafe.Pointer(node))
  1090  }
  1091  
  1092  func (n *float32Node) atomicLoadNext(i int) *float32Node {
  1093  	return (*float32Node)(n.next.atomicLoad(i))
  1094  }
  1095  
  1096  func (n *float32Node) atomicStoreNext(i int, node *float32Node) {
  1097  	n.next.atomicStore(i, unsafe.Pointer(node))
  1098  }
  1099  
  1100  func (n *float32Node) lessthan(key float32) bool {
  1101  	return n.key < key
  1102  }
  1103  
  1104  func (n *float32Node) equal(key float32) bool {
  1105  	return n.key == key
  1106  }
  1107  
  1108  // NewFloat32 return an empty float32 skipmap.
  1109  func NewFloat32() *Float32Map {
  1110  	h := newFloat32Node(0, "", maxLevel)
  1111  	h.flags.SetTrue(fullyLinked)
  1112  	return &Float32Map{
  1113  		header:       h,
  1114  		highestLevel: defaultHighestLevel,
  1115  	}
  1116  }
  1117  
  1118  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  1119  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  1120  // (without fullpath, if find the node will return immediately)
  1121  func (s *Float32Map) findNode(key float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) *float32Node {
  1122  	x := s.header
  1123  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1124  		succ := x.atomicLoadNext(i)
  1125  		for succ != nil && succ.lessthan(key) {
  1126  			x = succ
  1127  			succ = x.atomicLoadNext(i)
  1128  		}
  1129  		preds[i] = x
  1130  		succs[i] = succ
  1131  
  1132  		// Check if the key already in the skipmap.
  1133  		if succ != nil && succ.equal(key) {
  1134  			return succ
  1135  		}
  1136  	}
  1137  	return nil
  1138  }
  1139  
  1140  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1141  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  1142  func (s *Float32Map) findNodeDelete(key float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int {
  1143  	// lFound represents the index of the first layer at which it found a node.
  1144  	lFound, x := -1, s.header
  1145  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1146  		succ := x.atomicLoadNext(i)
  1147  		for succ != nil && succ.lessthan(key) {
  1148  			x = succ
  1149  			succ = x.atomicLoadNext(i)
  1150  		}
  1151  		preds[i] = x
  1152  		succs[i] = succ
  1153  
  1154  		// Check if the key already in the skip list.
  1155  		if lFound == -1 && succ != nil && succ.equal(key) {
  1156  			lFound = i
  1157  		}
  1158  	}
  1159  	return lFound
  1160  }
  1161  
  1162  func unlockFloat32(preds [maxLevel]*float32Node, highestLevel int) {
  1163  	var prevPred *float32Node
  1164  	for i := highestLevel; i >= 0; i-- {
  1165  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1166  			preds[i].mu.Unlock()
  1167  			prevPred = preds[i]
  1168  		}
  1169  	}
  1170  }
  1171  
  1172  // Store sets the value for a key.
  1173  func (s *Float32Map) Store(key float32, value interface{}) {
  1174  	level := s.randomlevel()
  1175  	var preds, succs [maxLevel]*float32Node
  1176  	for {
  1177  		nodeFound := s.findNode(key, &preds, &succs)
  1178  		if nodeFound != nil { // indicating the key is already in the skip-list
  1179  			if !nodeFound.flags.Get(marked) {
  1180  				// We don't need to care about whether or not the node is fully linked,
  1181  				// just replace the value.
  1182  				nodeFound.storeVal(value)
  1183  				return
  1184  			}
  1185  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  1186  			// we need to add this node in next loop.
  1187  			continue
  1188  		}
  1189  
  1190  		// Add this node into skip list.
  1191  		var (
  1192  			highestLocked        = -1 // the highest level being locked by this process
  1193  			valid                = true
  1194  			pred, succ, prevPred *float32Node
  1195  		)
  1196  		for layer := 0; valid && layer < level; layer++ {
  1197  			pred = preds[layer]   // target node's previous node
  1198  			succ = succs[layer]   // target node's next node
  1199  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1200  				pred.mu.Lock()
  1201  				highestLocked = layer
  1202  				prevPred = pred
  1203  			}
  1204  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1205  			// It is valid if:
  1206  			// 1. The previous node and next node both are not marked.
  1207  			// 2. The previous node's next node is succ in this layer.
  1208  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1209  		}
  1210  		if !valid {
  1211  			unlockFloat32(preds, highestLocked)
  1212  			continue
  1213  		}
  1214  
  1215  		nn := newFloat32Node(key, value, level)
  1216  		for layer := 0; layer < level; layer++ {
  1217  			nn.storeNext(layer, succs[layer])
  1218  			preds[layer].atomicStoreNext(layer, nn)
  1219  		}
  1220  		nn.flags.SetTrue(fullyLinked)
  1221  		unlockFloat32(preds, highestLocked)
  1222  		atomic.AddInt64(&s.length, 1)
  1223  		return
  1224  	}
  1225  }
  1226  
  1227  func (s *Float32Map) randomlevel() int {
  1228  	// Generate random level.
  1229  	level := randomLevel()
  1230  	// Update highest level if possible.
  1231  	for {
  1232  		hl := atomic.LoadInt64(&s.highestLevel)
  1233  		if int64(level) <= hl {
  1234  			break
  1235  		}
  1236  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1237  			break
  1238  		}
  1239  	}
  1240  	return level
  1241  }
  1242  
  1243  // Load returns the value stored in the map for a key, or nil if no
  1244  // value is present.
  1245  // The ok result indicates whether value was found in the map.
  1246  func (s *Float32Map) Load(key float32) (value interface{}, ok bool) {
  1247  	x := s.header
  1248  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1249  		nex := x.atomicLoadNext(i)
  1250  		for nex != nil && nex.lessthan(key) {
  1251  			x = nex
  1252  			nex = x.atomicLoadNext(i)
  1253  		}
  1254  
  1255  		// Check if the key already in the skip list.
  1256  		if nex != nil && nex.equal(key) {
  1257  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  1258  				return nex.loadVal(), true
  1259  			}
  1260  			return nil, false
  1261  		}
  1262  	}
  1263  	return nil, false
  1264  }
  1265  
  1266  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  1267  // The loaded result reports whether the key was present.
  1268  // (Modified from Delete)
  1269  func (s *Float32Map) LoadAndDelete(key float32) (value interface{}, loaded bool) {
  1270  	var (
  1271  		nodeToDelete *float32Node
  1272  		isMarked     bool // represents if this operation mark the node
  1273  		topLayer     = -1
  1274  		preds, succs [maxLevel]*float32Node
  1275  	)
  1276  	for {
  1277  		lFound := s.findNodeDelete(key, &preds, &succs)
  1278  		if isMarked || // this process mark this node or we can find this node in the skip list
  1279  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1280  			if !isMarked { // we don't mark this node for now
  1281  				nodeToDelete = succs[lFound]
  1282  				topLayer = lFound
  1283  				nodeToDelete.mu.Lock()
  1284  				if nodeToDelete.flags.Get(marked) {
  1285  					// The node is marked by another process,
  1286  					// the physical deletion will be accomplished by another process.
  1287  					nodeToDelete.mu.Unlock()
  1288  					return nil, false
  1289  				}
  1290  				nodeToDelete.flags.SetTrue(marked)
  1291  				isMarked = true
  1292  			}
  1293  			// Accomplish the physical deletion.
  1294  			var (
  1295  				highestLocked        = -1 // the highest level being locked by this process
  1296  				valid                = true
  1297  				pred, succ, prevPred *float32Node
  1298  			)
  1299  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1300  				pred, succ = preds[layer], succs[layer]
  1301  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1302  					pred.mu.Lock()
  1303  					highestLocked = layer
  1304  					prevPred = pred
  1305  				}
  1306  				// valid check if there is another node has inserted into the skip list in this layer
  1307  				// during this process, or the previous is deleted by another process.
  1308  				// It is valid if:
  1309  				// 1. the previous node exists.
  1310  				// 2. no another node has inserted into the skip list in this layer.
  1311  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1312  			}
  1313  			if !valid {
  1314  				unlockFloat32(preds, highestLocked)
  1315  				continue
  1316  			}
  1317  			for i := topLayer; i >= 0; i-- {
  1318  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  1319  				// So we don't need `nodeToDelete.loadNext`
  1320  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  1321  			}
  1322  			nodeToDelete.mu.Unlock()
  1323  			unlockFloat32(preds, highestLocked)
  1324  			atomic.AddInt64(&s.length, -1)
  1325  			return nodeToDelete.loadVal(), true
  1326  		}
  1327  		return nil, false
  1328  	}
  1329  }
  1330  
  1331  // LoadOrStore returns the existing value for the key if present.
  1332  // Otherwise, it stores and returns the given value.
  1333  // The loaded result is true if the value was loaded, false if stored.
  1334  // (Modified from Store)
  1335  func (s *Float32Map) LoadOrStore(key float32, value interface{}) (actual interface{}, loaded bool) {
  1336  	var (
  1337  		level        int
  1338  		preds, succs [maxLevel]*float32Node
  1339  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  1340  	)
  1341  	for {
  1342  		nodeFound := s.findNode(key, &preds, &succs)
  1343  		if nodeFound != nil { // indicating the key is already in the skip-list
  1344  			if !nodeFound.flags.Get(marked) {
  1345  				// We don't need to care about whether or not the node is fully linked,
  1346  				// just return the value.
  1347  				return nodeFound.loadVal(), true
  1348  			}
  1349  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  1350  			// we need to add this node in next loop.
  1351  			continue
  1352  		}
  1353  
  1354  		// Add this node into skip list.
  1355  		var (
  1356  			highestLocked        = -1 // the highest level being locked by this process
  1357  			valid                = true
  1358  			pred, succ, prevPred *float32Node
  1359  		)
  1360  		if level == 0 {
  1361  			level = s.randomlevel()
  1362  			if level > hl {
  1363  				// If the highest level is updated, usually means that many goroutines
  1364  				// are inserting items. Hopefully we can find a better path in next loop.
  1365  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  1366  				// but this strategy's performance is almost the same as the existing method.
  1367  				continue
  1368  			}
  1369  		}
  1370  		for layer := 0; valid && layer < level; layer++ {
  1371  			pred = preds[layer]   // target node's previous node
  1372  			succ = succs[layer]   // target node's next node
  1373  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1374  				pred.mu.Lock()
  1375  				highestLocked = layer
  1376  				prevPred = pred
  1377  			}
  1378  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1379  			// It is valid if:
  1380  			// 1. The previous node and next node both are not marked.
  1381  			// 2. The previous node's next node is succ in this layer.
  1382  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1383  		}
  1384  		if !valid {
  1385  			unlockFloat32(preds, highestLocked)
  1386  			continue
  1387  		}
  1388  
  1389  		nn := newFloat32Node(key, value, level)
  1390  		for layer := 0; layer < level; layer++ {
  1391  			nn.storeNext(layer, succs[layer])
  1392  			preds[layer].atomicStoreNext(layer, nn)
  1393  		}
  1394  		nn.flags.SetTrue(fullyLinked)
  1395  		unlockFloat32(preds, highestLocked)
  1396  		atomic.AddInt64(&s.length, 1)
  1397  		return value, false
  1398  	}
  1399  }
  1400  
  1401  // LoadOrStoreLazy returns the existing value for the key if present.
  1402  // Otherwise, it stores and returns the given value from f, f will only be called once.
  1403  // The loaded result is true if the value was loaded, false if stored.
  1404  // (Modified from LoadOrStore)
  1405  func (s *Float32Map) LoadOrStoreLazy(key float32, f func() interface{}) (actual interface{}, loaded bool) {
  1406  	var (
  1407  		level        int
  1408  		preds, succs [maxLevel]*float32Node
  1409  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  1410  	)
  1411  	for {
  1412  		nodeFound := s.findNode(key, &preds, &succs)
  1413  		if nodeFound != nil { // indicating the key is already in the skip-list
  1414  			if !nodeFound.flags.Get(marked) {
  1415  				// We don't need to care about whether or not the node is fully linked,
  1416  				// just return the value.
  1417  				return nodeFound.loadVal(), true
  1418  			}
  1419  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  1420  			// we need to add this node in next loop.
  1421  			continue
  1422  		}
  1423  
  1424  		// Add this node into skip list.
  1425  		var (
  1426  			highestLocked        = -1 // the highest level being locked by this process
  1427  			valid                = true
  1428  			pred, succ, prevPred *float32Node
  1429  		)
  1430  		if level == 0 {
  1431  			level = s.randomlevel()
  1432  			if level > hl {
  1433  				// If the highest level is updated, usually means that many goroutines
  1434  				// are inserting items. Hopefully we can find a better path in next loop.
  1435  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  1436  				// but this strategy's performance is almost the same as the existing method.
  1437  				continue
  1438  			}
  1439  		}
  1440  		for layer := 0; valid && layer < level; layer++ {
  1441  			pred = preds[layer]   // target node's previous node
  1442  			succ = succs[layer]   // target node's next node
  1443  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1444  				pred.mu.Lock()
  1445  				highestLocked = layer
  1446  				prevPred = pred
  1447  			}
  1448  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1449  			// It is valid if:
  1450  			// 1. The previous node and next node both are not marked.
  1451  			// 2. The previous node's next node is succ in this layer.
  1452  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  1453  		}
  1454  		if !valid {
  1455  			unlockFloat32(preds, highestLocked)
  1456  			continue
  1457  		}
  1458  		value := f()
  1459  		nn := newFloat32Node(key, value, level)
  1460  		for layer := 0; layer < level; layer++ {
  1461  			nn.storeNext(layer, succs[layer])
  1462  			preds[layer].atomicStoreNext(layer, nn)
  1463  		}
  1464  		nn.flags.SetTrue(fullyLinked)
  1465  		unlockFloat32(preds, highestLocked)
  1466  		atomic.AddInt64(&s.length, 1)
  1467  		return value, false
  1468  	}
  1469  }
  1470  
  1471  // Delete deletes the value for a key.
  1472  func (s *Float32Map) Delete(key float32) bool {
  1473  	var (
  1474  		nodeToDelete *float32Node
  1475  		isMarked     bool // represents if this operation mark the node
  1476  		topLayer     = -1
  1477  		preds, succs [maxLevel]*float32Node
  1478  	)
  1479  	for {
  1480  		lFound := s.findNodeDelete(key, &preds, &succs)
  1481  		if isMarked || // this process mark this node or we can find this node in the skip list
  1482  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1483  			if !isMarked { // we don't mark this node for now
  1484  				nodeToDelete = succs[lFound]
  1485  				topLayer = lFound
  1486  				nodeToDelete.mu.Lock()
  1487  				if nodeToDelete.flags.Get(marked) {
  1488  					// The node is marked by another process,
  1489  					// the physical deletion will be accomplished by another process.
  1490  					nodeToDelete.mu.Unlock()
  1491  					return false
  1492  				}
  1493  				nodeToDelete.flags.SetTrue(marked)
  1494  				isMarked = true
  1495  			}
  1496  			// Accomplish the physical deletion.
  1497  			var (
  1498  				highestLocked        = -1 // the highest level being locked by this process
  1499  				valid                = true
  1500  				pred, succ, prevPred *float32Node
  1501  			)
  1502  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1503  				pred, succ = preds[layer], succs[layer]
  1504  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1505  					pred.mu.Lock()
  1506  					highestLocked = layer
  1507  					prevPred = pred
  1508  				}
  1509  				// valid check if there is another node has inserted into the skip list in this layer
  1510  				// during this process, or the previous is deleted by another process.
  1511  				// It is valid if:
  1512  				// 1. the previous node exists.
  1513  				// 2. no another node has inserted into the skip list in this layer.
  1514  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  1515  			}
  1516  			if !valid {
  1517  				unlockFloat32(preds, highestLocked)
  1518  				continue
  1519  			}
  1520  			for i := topLayer; i >= 0; i-- {
  1521  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  1522  				// So we don't need `nodeToDelete.loadNext`
  1523  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  1524  			}
  1525  			nodeToDelete.mu.Unlock()
  1526  			unlockFloat32(preds, highestLocked)
  1527  			atomic.AddInt64(&s.length, -1)
  1528  			return true
  1529  		}
  1530  		return false
  1531  	}
  1532  }
  1533  
  1534  // Range calls f sequentially for each key and value present in the skipmap.
  1535  // If f returns false, range stops the iteration.
  1536  //
  1537  // Range does not necessarily correspond to any consistent snapshot of the Map's
  1538  // contents: no key will be visited more than once, but if the value for any key
  1539  // is stored or deleted concurrently, Range may reflect any mapping for that key
  1540  // from any point during the Range call.
  1541  func (s *Float32Map) Range(f func(key float32, value interface{}) bool) {
  1542  	x := s.header.atomicLoadNext(0)
  1543  	for x != nil {
  1544  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1545  			x = x.atomicLoadNext(0)
  1546  			continue
  1547  		}
  1548  		if !f(x.key, x.loadVal()) {
  1549  			break
  1550  		}
  1551  		x = x.atomicLoadNext(0)
  1552  	}
  1553  }
  1554  
  1555  // Len return the length of this skipmap.
  1556  func (s *Float32Map) Len() int {
  1557  	return int(atomic.LoadInt64(&s.length))
  1558  }
  1559  
  1560  // Float32MapDesc represents a map based on skip list in descending order.
  1561  type Float32MapDesc struct {
  1562  	header       *float32NodeDesc
  1563  	length       int64
  1564  	highestLevel int64 // highest level for now
  1565  }
  1566  
  1567  type float32NodeDesc struct {
  1568  	key   float32
  1569  	value unsafe.Pointer // *interface{}
  1570  	next  optionalArray  // [level]*float32NodeDesc
  1571  	mu    sync.Mutex
  1572  	flags bitflag
  1573  	level uint32
  1574  }
  1575  
  1576  func newFloat32NodeDesc(key float32, value interface{}, level int) *float32NodeDesc {
  1577  	node := &float32NodeDesc{
  1578  		key:   key,
  1579  		level: uint32(level),
  1580  	}
  1581  	node.storeVal(value)
  1582  	if level > op1 {
  1583  		node.next.extra = new([op2]unsafe.Pointer)
  1584  	}
  1585  	return node
  1586  }
  1587  
  1588  func (n *float32NodeDesc) storeVal(value interface{}) {
  1589  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  1590  }
  1591  
  1592  func (n *float32NodeDesc) loadVal() interface{} {
  1593  	return *(*interface{})(atomic.LoadPointer(&n.value))
  1594  }
  1595  
  1596  func (n *float32NodeDesc) loadNext(i int) *float32NodeDesc {
  1597  	return (*float32NodeDesc)(n.next.load(i))
  1598  }
  1599  
  1600  func (n *float32NodeDesc) storeNext(i int, node *float32NodeDesc) {
  1601  	n.next.store(i, unsafe.Pointer(node))
  1602  }
  1603  
  1604  func (n *float32NodeDesc) atomicLoadNext(i int) *float32NodeDesc {
  1605  	return (*float32NodeDesc)(n.next.atomicLoad(i))
  1606  }
  1607  
  1608  func (n *float32NodeDesc) atomicStoreNext(i int, node *float32NodeDesc) {
  1609  	n.next.atomicStore(i, unsafe.Pointer(node))
  1610  }
  1611  
  1612  func (n *float32NodeDesc) lessthan(key float32) bool {
  1613  	return n.key > key
  1614  }
  1615  
  1616  func (n *float32NodeDesc) equal(key float32) bool {
  1617  	return n.key == key
  1618  }
  1619  
  1620  // NewFloat32Desc return an empty float32 skipmap.
  1621  func NewFloat32Desc() *Float32MapDesc {
  1622  	h := newFloat32NodeDesc(0, "", maxLevel)
  1623  	h.flags.SetTrue(fullyLinked)
  1624  	return &Float32MapDesc{
  1625  		header:       h,
  1626  		highestLevel: defaultHighestLevel,
  1627  	}
  1628  }
  1629  
  1630  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  1631  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  1632  // (without fullpath, if find the node will return immediately)
  1633  func (s *Float32MapDesc) findNode(key float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) *float32NodeDesc {
  1634  	x := s.header
  1635  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1636  		succ := x.atomicLoadNext(i)
  1637  		for succ != nil && succ.lessthan(key) {
  1638  			x = succ
  1639  			succ = x.atomicLoadNext(i)
  1640  		}
  1641  		preds[i] = x
  1642  		succs[i] = succ
  1643  
  1644  		// Check if the key already in the skipmap.
  1645  		if succ != nil && succ.equal(key) {
  1646  			return succ
  1647  		}
  1648  	}
  1649  	return nil
  1650  }
  1651  
  1652  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1653  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  1654  func (s *Float32MapDesc) findNodeDelete(key float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int {
  1655  	// lFound represents the index of the first layer at which it found a node.
  1656  	lFound, x := -1, s.header
  1657  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1658  		succ := x.atomicLoadNext(i)
  1659  		for succ != nil && succ.lessthan(key) {
  1660  			x = succ
  1661  			succ = x.atomicLoadNext(i)
  1662  		}
  1663  		preds[i] = x
  1664  		succs[i] = succ
  1665  
  1666  		// Check if the key already in the skip list.
  1667  		if lFound == -1 && succ != nil && succ.equal(key) {
  1668  			lFound = i
  1669  		}
  1670  	}
  1671  	return lFound
  1672  }
  1673  
  1674  func unlockFloat32Desc(preds [maxLevel]*float32NodeDesc, highestLevel int) {
  1675  	var prevPred *float32NodeDesc
  1676  	for i := highestLevel; i >= 0; i-- {
  1677  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1678  			preds[i].mu.Unlock()
  1679  			prevPred = preds[i]
  1680  		}
  1681  	}
  1682  }
  1683  
  1684  // Store sets the value for a key.
  1685  func (s *Float32MapDesc) Store(key float32, value interface{}) {
  1686  	level := s.randomlevel()
  1687  	var preds, succs [maxLevel]*float32NodeDesc
  1688  	for {
  1689  		nodeFound := s.findNode(key, &preds, &succs)
  1690  		if nodeFound != nil { // indicating the key is already in the skip-list
  1691  			if !nodeFound.flags.Get(marked) {
  1692  				// We don't need to care about whether or not the node is fully linked,
  1693  				// just replace the value.
  1694  				nodeFound.storeVal(value)
  1695  				return
  1696  			}
  1697  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  1698  			// we need to add this node in next loop.
  1699  			continue
  1700  		}
  1701  
  1702  		// Add this node into skip list.
  1703  		var (
  1704  			highestLocked        = -1 // the highest level being locked by this process
  1705  			valid                = true
  1706  			pred, succ, prevPred *float32NodeDesc
  1707  		)
  1708  		for layer := 0; valid && layer < level; layer++ {
  1709  			pred = preds[layer]   // target node's previous node
  1710  			succ = succs[layer]   // target node's next node
  1711  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1712  				pred.mu.Lock()
  1713  				highestLocked = layer
  1714  				prevPred = pred
  1715  			}
  1716  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1717  			// It is valid if:
  1718  			// 1. The previous node and next node both are not marked.
  1719  			// 2. The previous node's next node is succ in this layer.
  1720  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1721  		}
  1722  		if !valid {
  1723  			unlockFloat32Desc(preds, highestLocked)
  1724  			continue
  1725  		}
  1726  
  1727  		nn := newFloat32NodeDesc(key, value, level)
  1728  		for layer := 0; layer < level; layer++ {
  1729  			nn.storeNext(layer, succs[layer])
  1730  			preds[layer].atomicStoreNext(layer, nn)
  1731  		}
  1732  		nn.flags.SetTrue(fullyLinked)
  1733  		unlockFloat32Desc(preds, highestLocked)
  1734  		atomic.AddInt64(&s.length, 1)
  1735  		return
  1736  	}
  1737  }
  1738  
  1739  func (s *Float32MapDesc) randomlevel() int {
  1740  	// Generate random level.
  1741  	level := randomLevel()
  1742  	// Update highest level if possible.
  1743  	for {
  1744  		hl := atomic.LoadInt64(&s.highestLevel)
  1745  		if int64(level) <= hl {
  1746  			break
  1747  		}
  1748  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1749  			break
  1750  		}
  1751  	}
  1752  	return level
  1753  }
  1754  
  1755  // Load returns the value stored in the map for a key, or nil if no
  1756  // value is present.
  1757  // The ok result indicates whether value was found in the map.
  1758  func (s *Float32MapDesc) Load(key float32) (value interface{}, ok bool) {
  1759  	x := s.header
  1760  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1761  		nex := x.atomicLoadNext(i)
  1762  		for nex != nil && nex.lessthan(key) {
  1763  			x = nex
  1764  			nex = x.atomicLoadNext(i)
  1765  		}
  1766  
  1767  		// Check if the key already in the skip list.
  1768  		if nex != nil && nex.equal(key) {
  1769  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  1770  				return nex.loadVal(), true
  1771  			}
  1772  			return nil, false
  1773  		}
  1774  	}
  1775  	return nil, false
  1776  }
  1777  
  1778  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  1779  // The loaded result reports whether the key was present.
  1780  // (Modified from Delete)
  1781  func (s *Float32MapDesc) LoadAndDelete(key float32) (value interface{}, loaded bool) {
  1782  	var (
  1783  		nodeToDelete *float32NodeDesc
  1784  		isMarked     bool // represents if this operation mark the node
  1785  		topLayer     = -1
  1786  		preds, succs [maxLevel]*float32NodeDesc
  1787  	)
  1788  	for {
  1789  		lFound := s.findNodeDelete(key, &preds, &succs)
  1790  		if isMarked || // this process mark this node or we can find this node in the skip list
  1791  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1792  			if !isMarked { // we don't mark this node for now
  1793  				nodeToDelete = succs[lFound]
  1794  				topLayer = lFound
  1795  				nodeToDelete.mu.Lock()
  1796  				if nodeToDelete.flags.Get(marked) {
  1797  					// The node is marked by another process,
  1798  					// the physical deletion will be accomplished by another process.
  1799  					nodeToDelete.mu.Unlock()
  1800  					return nil, false
  1801  				}
  1802  				nodeToDelete.flags.SetTrue(marked)
  1803  				isMarked = true
  1804  			}
  1805  			// Accomplish the physical deletion.
  1806  			var (
  1807  				highestLocked        = -1 // the highest level being locked by this process
  1808  				valid                = true
  1809  				pred, succ, prevPred *float32NodeDesc
  1810  			)
  1811  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1812  				pred, succ = preds[layer], succs[layer]
  1813  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1814  					pred.mu.Lock()
  1815  					highestLocked = layer
  1816  					prevPred = pred
  1817  				}
  1818  				// valid check if there is another node has inserted into the skip list in this layer
  1819  				// during this process, or the previous is deleted by another process.
  1820  				// It is valid if:
  1821  				// 1. the previous node exists.
  1822  				// 2. no another node has inserted into the skip list in this layer.
  1823  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1824  			}
  1825  			if !valid {
  1826  				unlockFloat32Desc(preds, highestLocked)
  1827  				continue
  1828  			}
  1829  			for i := topLayer; i >= 0; i-- {
  1830  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  1831  				// So we don't need `nodeToDelete.loadNext`
  1832  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  1833  			}
  1834  			nodeToDelete.mu.Unlock()
  1835  			unlockFloat32Desc(preds, highestLocked)
  1836  			atomic.AddInt64(&s.length, -1)
  1837  			return nodeToDelete.loadVal(), true
  1838  		}
  1839  		return nil, false
  1840  	}
  1841  }
  1842  
  1843  // LoadOrStore returns the existing value for the key if present.
  1844  // Otherwise, it stores and returns the given value.
  1845  // The loaded result is true if the value was loaded, false if stored.
  1846  // (Modified from Store)
  1847  func (s *Float32MapDesc) LoadOrStore(key float32, value interface{}) (actual interface{}, loaded bool) {
  1848  	var (
  1849  		level        int
  1850  		preds, succs [maxLevel]*float32NodeDesc
  1851  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  1852  	)
  1853  	for {
  1854  		nodeFound := s.findNode(key, &preds, &succs)
  1855  		if nodeFound != nil { // indicating the key is already in the skip-list
  1856  			if !nodeFound.flags.Get(marked) {
  1857  				// We don't need to care about whether or not the node is fully linked,
  1858  				// just return the value.
  1859  				return nodeFound.loadVal(), true
  1860  			}
  1861  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  1862  			// we need to add this node in next loop.
  1863  			continue
  1864  		}
  1865  
  1866  		// Add this node into skip list.
  1867  		var (
  1868  			highestLocked        = -1 // the highest level being locked by this process
  1869  			valid                = true
  1870  			pred, succ, prevPred *float32NodeDesc
  1871  		)
  1872  		if level == 0 {
  1873  			level = s.randomlevel()
  1874  			if level > hl {
  1875  				// If the highest level is updated, usually means that many goroutines
  1876  				// are inserting items. Hopefully we can find a better path in next loop.
  1877  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  1878  				// but this strategy's performance is almost the same as the existing method.
  1879  				continue
  1880  			}
  1881  		}
  1882  		for layer := 0; valid && layer < level; layer++ {
  1883  			pred = preds[layer]   // target node's previous node
  1884  			succ = succs[layer]   // target node's next node
  1885  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1886  				pred.mu.Lock()
  1887  				highestLocked = layer
  1888  				prevPred = pred
  1889  			}
  1890  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1891  			// It is valid if:
  1892  			// 1. The previous node and next node both are not marked.
  1893  			// 2. The previous node's next node is succ in this layer.
  1894  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1895  		}
  1896  		if !valid {
  1897  			unlockFloat32Desc(preds, highestLocked)
  1898  			continue
  1899  		}
  1900  
  1901  		nn := newFloat32NodeDesc(key, value, level)
  1902  		for layer := 0; layer < level; layer++ {
  1903  			nn.storeNext(layer, succs[layer])
  1904  			preds[layer].atomicStoreNext(layer, nn)
  1905  		}
  1906  		nn.flags.SetTrue(fullyLinked)
  1907  		unlockFloat32Desc(preds, highestLocked)
  1908  		atomic.AddInt64(&s.length, 1)
  1909  		return value, false
  1910  	}
  1911  }
  1912  
  1913  // LoadOrStoreLazy returns the existing value for the key if present.
  1914  // Otherwise, it stores and returns the given value from f, f will only be called once.
  1915  // The loaded result is true if the value was loaded, false if stored.
  1916  // (Modified from LoadOrStore)
  1917  func (s *Float32MapDesc) LoadOrStoreLazy(key float32, f func() interface{}) (actual interface{}, loaded bool) {
  1918  	var (
  1919  		level        int
  1920  		preds, succs [maxLevel]*float32NodeDesc
  1921  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  1922  	)
  1923  	for {
  1924  		nodeFound := s.findNode(key, &preds, &succs)
  1925  		if nodeFound != nil { // indicating the key is already in the skip-list
  1926  			if !nodeFound.flags.Get(marked) {
  1927  				// We don't need to care about whether or not the node is fully linked,
  1928  				// just return the value.
  1929  				return nodeFound.loadVal(), true
  1930  			}
  1931  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  1932  			// we need to add this node in next loop.
  1933  			continue
  1934  		}
  1935  
  1936  		// Add this node into skip list.
  1937  		var (
  1938  			highestLocked        = -1 // the highest level being locked by this process
  1939  			valid                = true
  1940  			pred, succ, prevPred *float32NodeDesc
  1941  		)
  1942  		if level == 0 {
  1943  			level = s.randomlevel()
  1944  			if level > hl {
  1945  				// If the highest level is updated, usually means that many goroutines
  1946  				// are inserting items. Hopefully we can find a better path in next loop.
  1947  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  1948  				// but this strategy's performance is almost the same as the existing method.
  1949  				continue
  1950  			}
  1951  		}
  1952  		for layer := 0; valid && layer < level; layer++ {
  1953  			pred = preds[layer]   // target node's previous node
  1954  			succ = succs[layer]   // target node's next node
  1955  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1956  				pred.mu.Lock()
  1957  				highestLocked = layer
  1958  				prevPred = pred
  1959  			}
  1960  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1961  			// It is valid if:
  1962  			// 1. The previous node and next node both are not marked.
  1963  			// 2. The previous node's next node is succ in this layer.
  1964  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  1965  		}
  1966  		if !valid {
  1967  			unlockFloat32Desc(preds, highestLocked)
  1968  			continue
  1969  		}
  1970  		value := f()
  1971  		nn := newFloat32NodeDesc(key, value, level)
  1972  		for layer := 0; layer < level; layer++ {
  1973  			nn.storeNext(layer, succs[layer])
  1974  			preds[layer].atomicStoreNext(layer, nn)
  1975  		}
  1976  		nn.flags.SetTrue(fullyLinked)
  1977  		unlockFloat32Desc(preds, highestLocked)
  1978  		atomic.AddInt64(&s.length, 1)
  1979  		return value, false
  1980  	}
  1981  }
  1982  
  1983  // Delete deletes the value for a key.
  1984  func (s *Float32MapDesc) Delete(key float32) bool {
  1985  	var (
  1986  		nodeToDelete *float32NodeDesc
  1987  		isMarked     bool // represents if this operation mark the node
  1988  		topLayer     = -1
  1989  		preds, succs [maxLevel]*float32NodeDesc
  1990  	)
  1991  	for {
  1992  		lFound := s.findNodeDelete(key, &preds, &succs)
  1993  		if isMarked || // this process mark this node or we can find this node in the skip list
  1994  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1995  			if !isMarked { // we don't mark this node for now
  1996  				nodeToDelete = succs[lFound]
  1997  				topLayer = lFound
  1998  				nodeToDelete.mu.Lock()
  1999  				if nodeToDelete.flags.Get(marked) {
  2000  					// The node is marked by another process,
  2001  					// the physical deletion will be accomplished by another process.
  2002  					nodeToDelete.mu.Unlock()
  2003  					return false
  2004  				}
  2005  				nodeToDelete.flags.SetTrue(marked)
  2006  				isMarked = true
  2007  			}
  2008  			// Accomplish the physical deletion.
  2009  			var (
  2010  				highestLocked        = -1 // the highest level being locked by this process
  2011  				valid                = true
  2012  				pred, succ, prevPred *float32NodeDesc
  2013  			)
  2014  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2015  				pred, succ = preds[layer], succs[layer]
  2016  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2017  					pred.mu.Lock()
  2018  					highestLocked = layer
  2019  					prevPred = pred
  2020  				}
  2021  				// valid check if there is another node has inserted into the skip list in this layer
  2022  				// during this process, or the previous is deleted by another process.
  2023  				// It is valid if:
  2024  				// 1. the previous node exists.
  2025  				// 2. no another node has inserted into the skip list in this layer.
  2026  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  2027  			}
  2028  			if !valid {
  2029  				unlockFloat32Desc(preds, highestLocked)
  2030  				continue
  2031  			}
  2032  			for i := topLayer; i >= 0; i-- {
  2033  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  2034  				// So we don't need `nodeToDelete.loadNext`
  2035  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  2036  			}
  2037  			nodeToDelete.mu.Unlock()
  2038  			unlockFloat32Desc(preds, highestLocked)
  2039  			atomic.AddInt64(&s.length, -1)
  2040  			return true
  2041  		}
  2042  		return false
  2043  	}
  2044  }
  2045  
  2046  // Range calls f sequentially for each key and value present in the skipmap.
  2047  // If f returns false, range stops the iteration.
  2048  //
  2049  // Range does not necessarily correspond to any consistent snapshot of the Map's
  2050  // contents: no key will be visited more than once, but if the value for any key
  2051  // is stored or deleted concurrently, Range may reflect any mapping for that key
  2052  // from any point during the Range call.
  2053  func (s *Float32MapDesc) Range(f func(key float32, value interface{}) bool) {
  2054  	x := s.header.atomicLoadNext(0)
  2055  	for x != nil {
  2056  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2057  			x = x.atomicLoadNext(0)
  2058  			continue
  2059  		}
  2060  		if !f(x.key, x.loadVal()) {
  2061  			break
  2062  		}
  2063  		x = x.atomicLoadNext(0)
  2064  	}
  2065  }
  2066  
  2067  // Len return the length of this skipmap.
  2068  func (s *Float32MapDesc) Len() int {
  2069  	return int(atomic.LoadInt64(&s.length))
  2070  }
  2071  
  2072  // Float64Map represents a map based on skip list in ascending order.
  2073  type Float64Map struct {
  2074  	header       *float64Node
  2075  	length       int64
  2076  	highestLevel int64 // highest level for now
  2077  }
  2078  
  2079  type float64Node struct {
  2080  	key   float64
  2081  	value unsafe.Pointer // *interface{}
  2082  	next  optionalArray  // [level]*float64Node
  2083  	mu    sync.Mutex
  2084  	flags bitflag
  2085  	level uint32
  2086  }
  2087  
  2088  func newFloat64Node(key float64, value interface{}, level int) *float64Node {
  2089  	node := &float64Node{
  2090  		key:   key,
  2091  		level: uint32(level),
  2092  	}
  2093  	node.storeVal(value)
  2094  	if level > op1 {
  2095  		node.next.extra = new([op2]unsafe.Pointer)
  2096  	}
  2097  	return node
  2098  }
  2099  
  2100  func (n *float64Node) storeVal(value interface{}) {
  2101  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  2102  }
  2103  
  2104  func (n *float64Node) loadVal() interface{} {
  2105  	return *(*interface{})(atomic.LoadPointer(&n.value))
  2106  }
  2107  
  2108  func (n *float64Node) loadNext(i int) *float64Node {
  2109  	return (*float64Node)(n.next.load(i))
  2110  }
  2111  
  2112  func (n *float64Node) storeNext(i int, node *float64Node) {
  2113  	n.next.store(i, unsafe.Pointer(node))
  2114  }
  2115  
  2116  func (n *float64Node) atomicLoadNext(i int) *float64Node {
  2117  	return (*float64Node)(n.next.atomicLoad(i))
  2118  }
  2119  
  2120  func (n *float64Node) atomicStoreNext(i int, node *float64Node) {
  2121  	n.next.atomicStore(i, unsafe.Pointer(node))
  2122  }
  2123  
  2124  func (n *float64Node) lessthan(key float64) bool {
  2125  	return n.key < key
  2126  }
  2127  
  2128  func (n *float64Node) equal(key float64) bool {
  2129  	return n.key == key
  2130  }
  2131  
  2132  // NewFloat64 return an empty float64 skipmap.
  2133  func NewFloat64() *Float64Map {
  2134  	h := newFloat64Node(0, "", maxLevel)
  2135  	h.flags.SetTrue(fullyLinked)
  2136  	return &Float64Map{
  2137  		header:       h,
  2138  		highestLevel: defaultHighestLevel,
  2139  	}
  2140  }
  2141  
  2142  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  2143  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  2144  // (without fullpath, if find the node will return immediately)
  2145  func (s *Float64Map) findNode(key float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) *float64Node {
  2146  	x := s.header
  2147  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2148  		succ := x.atomicLoadNext(i)
  2149  		for succ != nil && succ.lessthan(key) {
  2150  			x = succ
  2151  			succ = x.atomicLoadNext(i)
  2152  		}
  2153  		preds[i] = x
  2154  		succs[i] = succ
  2155  
  2156  		// Check if the key already in the skipmap.
  2157  		if succ != nil && succ.equal(key) {
  2158  			return succ
  2159  		}
  2160  	}
  2161  	return nil
  2162  }
  2163  
  2164  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2165  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  2166  func (s *Float64Map) findNodeDelete(key float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int {
  2167  	// lFound represents the index of the first layer at which it found a node.
  2168  	lFound, x := -1, s.header
  2169  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2170  		succ := x.atomicLoadNext(i)
  2171  		for succ != nil && succ.lessthan(key) {
  2172  			x = succ
  2173  			succ = x.atomicLoadNext(i)
  2174  		}
  2175  		preds[i] = x
  2176  		succs[i] = succ
  2177  
  2178  		// Check if the key already in the skip list.
  2179  		if lFound == -1 && succ != nil && succ.equal(key) {
  2180  			lFound = i
  2181  		}
  2182  	}
  2183  	return lFound
  2184  }
  2185  
  2186  func unlockFloat64(preds [maxLevel]*float64Node, highestLevel int) {
  2187  	var prevPred *float64Node
  2188  	for i := highestLevel; i >= 0; i-- {
  2189  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2190  			preds[i].mu.Unlock()
  2191  			prevPred = preds[i]
  2192  		}
  2193  	}
  2194  }
  2195  
  2196  // Store sets the value for a key.
  2197  func (s *Float64Map) Store(key float64, value interface{}) {
  2198  	level := s.randomlevel()
  2199  	var preds, succs [maxLevel]*float64Node
  2200  	for {
  2201  		nodeFound := s.findNode(key, &preds, &succs)
  2202  		if nodeFound != nil { // indicating the key is already in the skip-list
  2203  			if !nodeFound.flags.Get(marked) {
  2204  				// We don't need to care about whether or not the node is fully linked,
  2205  				// just replace the value.
  2206  				nodeFound.storeVal(value)
  2207  				return
  2208  			}
  2209  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  2210  			// we need to add this node in next loop.
  2211  			continue
  2212  		}
  2213  
  2214  		// Add this node into skip list.
  2215  		var (
  2216  			highestLocked        = -1 // the highest level being locked by this process
  2217  			valid                = true
  2218  			pred, succ, prevPred *float64Node
  2219  		)
  2220  		for layer := 0; valid && layer < level; layer++ {
  2221  			pred = preds[layer]   // target node's previous node
  2222  			succ = succs[layer]   // target node's next node
  2223  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2224  				pred.mu.Lock()
  2225  				highestLocked = layer
  2226  				prevPred = pred
  2227  			}
  2228  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2229  			// It is valid if:
  2230  			// 1. The previous node and next node both are not marked.
  2231  			// 2. The previous node's next node is succ in this layer.
  2232  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2233  		}
  2234  		if !valid {
  2235  			unlockFloat64(preds, highestLocked)
  2236  			continue
  2237  		}
  2238  
  2239  		nn := newFloat64Node(key, value, level)
  2240  		for layer := 0; layer < level; layer++ {
  2241  			nn.storeNext(layer, succs[layer])
  2242  			preds[layer].atomicStoreNext(layer, nn)
  2243  		}
  2244  		nn.flags.SetTrue(fullyLinked)
  2245  		unlockFloat64(preds, highestLocked)
  2246  		atomic.AddInt64(&s.length, 1)
  2247  		return
  2248  	}
  2249  }
  2250  
  2251  func (s *Float64Map) randomlevel() int {
  2252  	// Generate random level.
  2253  	level := randomLevel()
  2254  	// Update highest level if possible.
  2255  	for {
  2256  		hl := atomic.LoadInt64(&s.highestLevel)
  2257  		if int64(level) <= hl {
  2258  			break
  2259  		}
  2260  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2261  			break
  2262  		}
  2263  	}
  2264  	return level
  2265  }
  2266  
  2267  // Load returns the value stored in the map for a key, or nil if no
  2268  // value is present.
  2269  // The ok result indicates whether value was found in the map.
  2270  func (s *Float64Map) Load(key float64) (value interface{}, ok bool) {
  2271  	x := s.header
  2272  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2273  		nex := x.atomicLoadNext(i)
  2274  		for nex != nil && nex.lessthan(key) {
  2275  			x = nex
  2276  			nex = x.atomicLoadNext(i)
  2277  		}
  2278  
  2279  		// Check if the key already in the skip list.
  2280  		if nex != nil && nex.equal(key) {
  2281  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  2282  				return nex.loadVal(), true
  2283  			}
  2284  			return nil, false
  2285  		}
  2286  	}
  2287  	return nil, false
  2288  }
  2289  
  2290  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  2291  // The loaded result reports whether the key was present.
  2292  // (Modified from Delete)
  2293  func (s *Float64Map) LoadAndDelete(key float64) (value interface{}, loaded bool) {
  2294  	var (
  2295  		nodeToDelete *float64Node
  2296  		isMarked     bool // represents if this operation mark the node
  2297  		topLayer     = -1
  2298  		preds, succs [maxLevel]*float64Node
  2299  	)
  2300  	for {
  2301  		lFound := s.findNodeDelete(key, &preds, &succs)
  2302  		if isMarked || // this process mark this node or we can find this node in the skip list
  2303  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2304  			if !isMarked { // we don't mark this node for now
  2305  				nodeToDelete = succs[lFound]
  2306  				topLayer = lFound
  2307  				nodeToDelete.mu.Lock()
  2308  				if nodeToDelete.flags.Get(marked) {
  2309  					// The node is marked by another process,
  2310  					// the physical deletion will be accomplished by another process.
  2311  					nodeToDelete.mu.Unlock()
  2312  					return nil, false
  2313  				}
  2314  				nodeToDelete.flags.SetTrue(marked)
  2315  				isMarked = true
  2316  			}
  2317  			// Accomplish the physical deletion.
  2318  			var (
  2319  				highestLocked        = -1 // the highest level being locked by this process
  2320  				valid                = true
  2321  				pred, succ, prevPred *float64Node
  2322  			)
  2323  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2324  				pred, succ = preds[layer], succs[layer]
  2325  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2326  					pred.mu.Lock()
  2327  					highestLocked = layer
  2328  					prevPred = pred
  2329  				}
  2330  				// valid check if there is another node has inserted into the skip list in this layer
  2331  				// during this process, or the previous is deleted by another process.
  2332  				// It is valid if:
  2333  				// 1. the previous node exists.
  2334  				// 2. no another node has inserted into the skip list in this layer.
  2335  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2336  			}
  2337  			if !valid {
  2338  				unlockFloat64(preds, highestLocked)
  2339  				continue
  2340  			}
  2341  			for i := topLayer; i >= 0; i-- {
  2342  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  2343  				// So we don't need `nodeToDelete.loadNext`
  2344  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  2345  			}
  2346  			nodeToDelete.mu.Unlock()
  2347  			unlockFloat64(preds, highestLocked)
  2348  			atomic.AddInt64(&s.length, -1)
  2349  			return nodeToDelete.loadVal(), true
  2350  		}
  2351  		return nil, false
  2352  	}
  2353  }
  2354  
  2355  // LoadOrStore returns the existing value for the key if present.
  2356  // Otherwise, it stores and returns the given value.
  2357  // The loaded result is true if the value was loaded, false if stored.
  2358  // (Modified from Store)
  2359  func (s *Float64Map) LoadOrStore(key float64, value interface{}) (actual interface{}, loaded bool) {
  2360  	var (
  2361  		level        int
  2362  		preds, succs [maxLevel]*float64Node
  2363  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  2364  	)
  2365  	for {
  2366  		nodeFound := s.findNode(key, &preds, &succs)
  2367  		if nodeFound != nil { // indicating the key is already in the skip-list
  2368  			if !nodeFound.flags.Get(marked) {
  2369  				// We don't need to care about whether or not the node is fully linked,
  2370  				// just return the value.
  2371  				return nodeFound.loadVal(), true
  2372  			}
  2373  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  2374  			// we need to add this node in next loop.
  2375  			continue
  2376  		}
  2377  
  2378  		// Add this node into skip list.
  2379  		var (
  2380  			highestLocked        = -1 // the highest level being locked by this process
  2381  			valid                = true
  2382  			pred, succ, prevPred *float64Node
  2383  		)
  2384  		if level == 0 {
  2385  			level = s.randomlevel()
  2386  			if level > hl {
  2387  				// If the highest level is updated, usually means that many goroutines
  2388  				// are inserting items. Hopefully we can find a better path in next loop.
  2389  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  2390  				// but this strategy's performance is almost the same as the existing method.
  2391  				continue
  2392  			}
  2393  		}
  2394  		for layer := 0; valid && layer < level; layer++ {
  2395  			pred = preds[layer]   // target node's previous node
  2396  			succ = succs[layer]   // target node's next node
  2397  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2398  				pred.mu.Lock()
  2399  				highestLocked = layer
  2400  				prevPred = pred
  2401  			}
  2402  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2403  			// It is valid if:
  2404  			// 1. The previous node and next node both are not marked.
  2405  			// 2. The previous node's next node is succ in this layer.
  2406  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2407  		}
  2408  		if !valid {
  2409  			unlockFloat64(preds, highestLocked)
  2410  			continue
  2411  		}
  2412  
  2413  		nn := newFloat64Node(key, value, level)
  2414  		for layer := 0; layer < level; layer++ {
  2415  			nn.storeNext(layer, succs[layer])
  2416  			preds[layer].atomicStoreNext(layer, nn)
  2417  		}
  2418  		nn.flags.SetTrue(fullyLinked)
  2419  		unlockFloat64(preds, highestLocked)
  2420  		atomic.AddInt64(&s.length, 1)
  2421  		return value, false
  2422  	}
  2423  }
  2424  
  2425  // LoadOrStoreLazy returns the existing value for the key if present.
  2426  // Otherwise, it stores and returns the given value from f, f will only be called once.
  2427  // The loaded result is true if the value was loaded, false if stored.
  2428  // (Modified from LoadOrStore)
  2429  func (s *Float64Map) LoadOrStoreLazy(key float64, f func() interface{}) (actual interface{}, loaded bool) {
  2430  	var (
  2431  		level        int
  2432  		preds, succs [maxLevel]*float64Node
  2433  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  2434  	)
  2435  	for {
  2436  		nodeFound := s.findNode(key, &preds, &succs)
  2437  		if nodeFound != nil { // indicating the key is already in the skip-list
  2438  			if !nodeFound.flags.Get(marked) {
  2439  				// We don't need to care about whether or not the node is fully linked,
  2440  				// just return the value.
  2441  				return nodeFound.loadVal(), true
  2442  			}
  2443  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  2444  			// we need to add this node in next loop.
  2445  			continue
  2446  		}
  2447  
  2448  		// Add this node into skip list.
  2449  		var (
  2450  			highestLocked        = -1 // the highest level being locked by this process
  2451  			valid                = true
  2452  			pred, succ, prevPred *float64Node
  2453  		)
  2454  		if level == 0 {
  2455  			level = s.randomlevel()
  2456  			if level > hl {
  2457  				// If the highest level is updated, usually means that many goroutines
  2458  				// are inserting items. Hopefully we can find a better path in next loop.
  2459  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  2460  				// but this strategy's performance is almost the same as the existing method.
  2461  				continue
  2462  			}
  2463  		}
  2464  		for layer := 0; valid && layer < level; layer++ {
  2465  			pred = preds[layer]   // target node's previous node
  2466  			succ = succs[layer]   // target node's next node
  2467  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2468  				pred.mu.Lock()
  2469  				highestLocked = layer
  2470  				prevPred = pred
  2471  			}
  2472  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2473  			// It is valid if:
  2474  			// 1. The previous node and next node both are not marked.
  2475  			// 2. The previous node's next node is succ in this layer.
  2476  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  2477  		}
  2478  		if !valid {
  2479  			unlockFloat64(preds, highestLocked)
  2480  			continue
  2481  		}
  2482  		value := f()
  2483  		nn := newFloat64Node(key, value, level)
  2484  		for layer := 0; layer < level; layer++ {
  2485  			nn.storeNext(layer, succs[layer])
  2486  			preds[layer].atomicStoreNext(layer, nn)
  2487  		}
  2488  		nn.flags.SetTrue(fullyLinked)
  2489  		unlockFloat64(preds, highestLocked)
  2490  		atomic.AddInt64(&s.length, 1)
  2491  		return value, false
  2492  	}
  2493  }
  2494  
  2495  // Delete deletes the value for a key.
  2496  func (s *Float64Map) Delete(key float64) bool {
  2497  	var (
  2498  		nodeToDelete *float64Node
  2499  		isMarked     bool // represents if this operation mark the node
  2500  		topLayer     = -1
  2501  		preds, succs [maxLevel]*float64Node
  2502  	)
  2503  	for {
  2504  		lFound := s.findNodeDelete(key, &preds, &succs)
  2505  		if isMarked || // this process mark this node or we can find this node in the skip list
  2506  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2507  			if !isMarked { // we don't mark this node for now
  2508  				nodeToDelete = succs[lFound]
  2509  				topLayer = lFound
  2510  				nodeToDelete.mu.Lock()
  2511  				if nodeToDelete.flags.Get(marked) {
  2512  					// The node is marked by another process,
  2513  					// the physical deletion will be accomplished by another process.
  2514  					nodeToDelete.mu.Unlock()
  2515  					return false
  2516  				}
  2517  				nodeToDelete.flags.SetTrue(marked)
  2518  				isMarked = true
  2519  			}
  2520  			// Accomplish the physical deletion.
  2521  			var (
  2522  				highestLocked        = -1 // the highest level being locked by this process
  2523  				valid                = true
  2524  				pred, succ, prevPred *float64Node
  2525  			)
  2526  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2527  				pred, succ = preds[layer], succs[layer]
  2528  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2529  					pred.mu.Lock()
  2530  					highestLocked = layer
  2531  					prevPred = pred
  2532  				}
  2533  				// valid check if there is another node has inserted into the skip list in this layer
  2534  				// during this process, or the previous is deleted by another process.
  2535  				// It is valid if:
  2536  				// 1. the previous node exists.
  2537  				// 2. no another node has inserted into the skip list in this layer.
  2538  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  2539  			}
  2540  			if !valid {
  2541  				unlockFloat64(preds, highestLocked)
  2542  				continue
  2543  			}
  2544  			for i := topLayer; i >= 0; i-- {
  2545  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  2546  				// So we don't need `nodeToDelete.loadNext`
  2547  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  2548  			}
  2549  			nodeToDelete.mu.Unlock()
  2550  			unlockFloat64(preds, highestLocked)
  2551  			atomic.AddInt64(&s.length, -1)
  2552  			return true
  2553  		}
  2554  		return false
  2555  	}
  2556  }
  2557  
  2558  // Range calls f sequentially for each key and value present in the skipmap.
  2559  // If f returns false, range stops the iteration.
  2560  //
  2561  // Range does not necessarily correspond to any consistent snapshot of the Map's
  2562  // contents: no key will be visited more than once, but if the value for any key
  2563  // is stored or deleted concurrently, Range may reflect any mapping for that key
  2564  // from any point during the Range call.
  2565  func (s *Float64Map) Range(f func(key float64, value interface{}) bool) {
  2566  	x := s.header.atomicLoadNext(0)
  2567  	for x != nil {
  2568  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2569  			x = x.atomicLoadNext(0)
  2570  			continue
  2571  		}
  2572  		if !f(x.key, x.loadVal()) {
  2573  			break
  2574  		}
  2575  		x = x.atomicLoadNext(0)
  2576  	}
  2577  }
  2578  
  2579  // Len return the length of this skipmap.
  2580  func (s *Float64Map) Len() int {
  2581  	return int(atomic.LoadInt64(&s.length))
  2582  }
  2583  
  2584  // Float64MapDesc represents a map based on skip list in descending order.
  2585  type Float64MapDesc struct {
  2586  	header       *float64NodeDesc
  2587  	length       int64
  2588  	highestLevel int64 // highest level for now
  2589  }
  2590  
  2591  type float64NodeDesc struct {
  2592  	key   float64
  2593  	value unsafe.Pointer // *interface{}
  2594  	next  optionalArray  // [level]*float64NodeDesc
  2595  	mu    sync.Mutex
  2596  	flags bitflag
  2597  	level uint32
  2598  }
  2599  
  2600  func newFloat64NodeDesc(key float64, value interface{}, level int) *float64NodeDesc {
  2601  	node := &float64NodeDesc{
  2602  		key:   key,
  2603  		level: uint32(level),
  2604  	}
  2605  	node.storeVal(value)
  2606  	if level > op1 {
  2607  		node.next.extra = new([op2]unsafe.Pointer)
  2608  	}
  2609  	return node
  2610  }
  2611  
  2612  func (n *float64NodeDesc) storeVal(value interface{}) {
  2613  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  2614  }
  2615  
  2616  func (n *float64NodeDesc) loadVal() interface{} {
  2617  	return *(*interface{})(atomic.LoadPointer(&n.value))
  2618  }
  2619  
  2620  func (n *float64NodeDesc) loadNext(i int) *float64NodeDesc {
  2621  	return (*float64NodeDesc)(n.next.load(i))
  2622  }
  2623  
  2624  func (n *float64NodeDesc) storeNext(i int, node *float64NodeDesc) {
  2625  	n.next.store(i, unsafe.Pointer(node))
  2626  }
  2627  
  2628  func (n *float64NodeDesc) atomicLoadNext(i int) *float64NodeDesc {
  2629  	return (*float64NodeDesc)(n.next.atomicLoad(i))
  2630  }
  2631  
  2632  func (n *float64NodeDesc) atomicStoreNext(i int, node *float64NodeDesc) {
  2633  	n.next.atomicStore(i, unsafe.Pointer(node))
  2634  }
  2635  
  2636  func (n *float64NodeDesc) lessthan(key float64) bool {
  2637  	return n.key > key
  2638  }
  2639  
  2640  func (n *float64NodeDesc) equal(key float64) bool {
  2641  	return n.key == key
  2642  }
  2643  
  2644  // NewFloat64Desc return an empty float64 skipmap.
  2645  func NewFloat64Desc() *Float64MapDesc {
  2646  	h := newFloat64NodeDesc(0, "", maxLevel)
  2647  	h.flags.SetTrue(fullyLinked)
  2648  	return &Float64MapDesc{
  2649  		header:       h,
  2650  		highestLevel: defaultHighestLevel,
  2651  	}
  2652  }
  2653  
  2654  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  2655  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  2656  // (without fullpath, if find the node will return immediately)
  2657  func (s *Float64MapDesc) findNode(key float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) *float64NodeDesc {
  2658  	x := s.header
  2659  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2660  		succ := x.atomicLoadNext(i)
  2661  		for succ != nil && succ.lessthan(key) {
  2662  			x = succ
  2663  			succ = x.atomicLoadNext(i)
  2664  		}
  2665  		preds[i] = x
  2666  		succs[i] = succ
  2667  
  2668  		// Check if the key already in the skipmap.
  2669  		if succ != nil && succ.equal(key) {
  2670  			return succ
  2671  		}
  2672  	}
  2673  	return nil
  2674  }
  2675  
  2676  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2677  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  2678  func (s *Float64MapDesc) findNodeDelete(key float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int {
  2679  	// lFound represents the index of the first layer at which it found a node.
  2680  	lFound, x := -1, s.header
  2681  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2682  		succ := x.atomicLoadNext(i)
  2683  		for succ != nil && succ.lessthan(key) {
  2684  			x = succ
  2685  			succ = x.atomicLoadNext(i)
  2686  		}
  2687  		preds[i] = x
  2688  		succs[i] = succ
  2689  
  2690  		// Check if the key already in the skip list.
  2691  		if lFound == -1 && succ != nil && succ.equal(key) {
  2692  			lFound = i
  2693  		}
  2694  	}
  2695  	return lFound
  2696  }
  2697  
  2698  func unlockFloat64Desc(preds [maxLevel]*float64NodeDesc, highestLevel int) {
  2699  	var prevPred *float64NodeDesc
  2700  	for i := highestLevel; i >= 0; i-- {
  2701  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2702  			preds[i].mu.Unlock()
  2703  			prevPred = preds[i]
  2704  		}
  2705  	}
  2706  }
  2707  
  2708  // Store sets the value for a key.
  2709  func (s *Float64MapDesc) Store(key float64, value interface{}) {
  2710  	level := s.randomlevel()
  2711  	var preds, succs [maxLevel]*float64NodeDesc
  2712  	for {
  2713  		nodeFound := s.findNode(key, &preds, &succs)
  2714  		if nodeFound != nil { // indicating the key is already in the skip-list
  2715  			if !nodeFound.flags.Get(marked) {
  2716  				// We don't need to care about whether or not the node is fully linked,
  2717  				// just replace the value.
  2718  				nodeFound.storeVal(value)
  2719  				return
  2720  			}
  2721  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  2722  			// we need to add this node in next loop.
  2723  			continue
  2724  		}
  2725  
  2726  		// Add this node into skip list.
  2727  		var (
  2728  			highestLocked        = -1 // the highest level being locked by this process
  2729  			valid                = true
  2730  			pred, succ, prevPred *float64NodeDesc
  2731  		)
  2732  		for layer := 0; valid && layer < level; layer++ {
  2733  			pred = preds[layer]   // target node's previous node
  2734  			succ = succs[layer]   // target node's next node
  2735  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2736  				pred.mu.Lock()
  2737  				highestLocked = layer
  2738  				prevPred = pred
  2739  			}
  2740  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2741  			// It is valid if:
  2742  			// 1. The previous node and next node both are not marked.
  2743  			// 2. The previous node's next node is succ in this layer.
  2744  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2745  		}
  2746  		if !valid {
  2747  			unlockFloat64Desc(preds, highestLocked)
  2748  			continue
  2749  		}
  2750  
  2751  		nn := newFloat64NodeDesc(key, value, level)
  2752  		for layer := 0; layer < level; layer++ {
  2753  			nn.storeNext(layer, succs[layer])
  2754  			preds[layer].atomicStoreNext(layer, nn)
  2755  		}
  2756  		nn.flags.SetTrue(fullyLinked)
  2757  		unlockFloat64Desc(preds, highestLocked)
  2758  		atomic.AddInt64(&s.length, 1)
  2759  		return
  2760  	}
  2761  }
  2762  
  2763  func (s *Float64MapDesc) randomlevel() int {
  2764  	// Generate random level.
  2765  	level := randomLevel()
  2766  	// Update highest level if possible.
  2767  	for {
  2768  		hl := atomic.LoadInt64(&s.highestLevel)
  2769  		if int64(level) <= hl {
  2770  			break
  2771  		}
  2772  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2773  			break
  2774  		}
  2775  	}
  2776  	return level
  2777  }
  2778  
  2779  // Load returns the value stored in the map for a key, or nil if no
  2780  // value is present.
  2781  // The ok result indicates whether value was found in the map.
  2782  func (s *Float64MapDesc) Load(key float64) (value interface{}, ok bool) {
  2783  	x := s.header
  2784  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2785  		nex := x.atomicLoadNext(i)
  2786  		for nex != nil && nex.lessthan(key) {
  2787  			x = nex
  2788  			nex = x.atomicLoadNext(i)
  2789  		}
  2790  
  2791  		// Check if the key already in the skip list.
  2792  		if nex != nil && nex.equal(key) {
  2793  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  2794  				return nex.loadVal(), true
  2795  			}
  2796  			return nil, false
  2797  		}
  2798  	}
  2799  	return nil, false
  2800  }
  2801  
  2802  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  2803  // The loaded result reports whether the key was present.
  2804  // (Modified from Delete)
  2805  func (s *Float64MapDesc) LoadAndDelete(key float64) (value interface{}, loaded bool) {
  2806  	var (
  2807  		nodeToDelete *float64NodeDesc
  2808  		isMarked     bool // represents if this operation mark the node
  2809  		topLayer     = -1
  2810  		preds, succs [maxLevel]*float64NodeDesc
  2811  	)
  2812  	for {
  2813  		lFound := s.findNodeDelete(key, &preds, &succs)
  2814  		if isMarked || // this process mark this node or we can find this node in the skip list
  2815  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2816  			if !isMarked { // we don't mark this node for now
  2817  				nodeToDelete = succs[lFound]
  2818  				topLayer = lFound
  2819  				nodeToDelete.mu.Lock()
  2820  				if nodeToDelete.flags.Get(marked) {
  2821  					// The node is marked by another process,
  2822  					// the physical deletion will be accomplished by another process.
  2823  					nodeToDelete.mu.Unlock()
  2824  					return nil, false
  2825  				}
  2826  				nodeToDelete.flags.SetTrue(marked)
  2827  				isMarked = true
  2828  			}
  2829  			// Accomplish the physical deletion.
  2830  			var (
  2831  				highestLocked        = -1 // the highest level being locked by this process
  2832  				valid                = true
  2833  				pred, succ, prevPred *float64NodeDesc
  2834  			)
  2835  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2836  				pred, succ = preds[layer], succs[layer]
  2837  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2838  					pred.mu.Lock()
  2839  					highestLocked = layer
  2840  					prevPred = pred
  2841  				}
  2842  				// valid check if there is another node has inserted into the skip list in this layer
  2843  				// during this process, or the previous is deleted by another process.
  2844  				// It is valid if:
  2845  				// 1. the previous node exists.
  2846  				// 2. no another node has inserted into the skip list in this layer.
  2847  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2848  			}
  2849  			if !valid {
  2850  				unlockFloat64Desc(preds, highestLocked)
  2851  				continue
  2852  			}
  2853  			for i := topLayer; i >= 0; i-- {
  2854  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  2855  				// So we don't need `nodeToDelete.loadNext`
  2856  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  2857  			}
  2858  			nodeToDelete.mu.Unlock()
  2859  			unlockFloat64Desc(preds, highestLocked)
  2860  			atomic.AddInt64(&s.length, -1)
  2861  			return nodeToDelete.loadVal(), true
  2862  		}
  2863  		return nil, false
  2864  	}
  2865  }
  2866  
  2867  // LoadOrStore returns the existing value for the key if present.
  2868  // Otherwise, it stores and returns the given value.
  2869  // The loaded result is true if the value was loaded, false if stored.
  2870  // (Modified from Store)
  2871  func (s *Float64MapDesc) LoadOrStore(key float64, value interface{}) (actual interface{}, loaded bool) {
  2872  	var (
  2873  		level        int
  2874  		preds, succs [maxLevel]*float64NodeDesc
  2875  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  2876  	)
  2877  	for {
  2878  		nodeFound := s.findNode(key, &preds, &succs)
  2879  		if nodeFound != nil { // indicating the key is already in the skip-list
  2880  			if !nodeFound.flags.Get(marked) {
  2881  				// We don't need to care about whether or not the node is fully linked,
  2882  				// just return the value.
  2883  				return nodeFound.loadVal(), true
  2884  			}
  2885  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  2886  			// we need to add this node in next loop.
  2887  			continue
  2888  		}
  2889  
  2890  		// Add this node into skip list.
  2891  		var (
  2892  			highestLocked        = -1 // the highest level being locked by this process
  2893  			valid                = true
  2894  			pred, succ, prevPred *float64NodeDesc
  2895  		)
  2896  		if level == 0 {
  2897  			level = s.randomlevel()
  2898  			if level > hl {
  2899  				// If the highest level is updated, usually means that many goroutines
  2900  				// are inserting items. Hopefully we can find a better path in next loop.
  2901  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  2902  				// but this strategy's performance is almost the same as the existing method.
  2903  				continue
  2904  			}
  2905  		}
  2906  		for layer := 0; valid && layer < level; layer++ {
  2907  			pred = preds[layer]   // target node's previous node
  2908  			succ = succs[layer]   // target node's next node
  2909  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2910  				pred.mu.Lock()
  2911  				highestLocked = layer
  2912  				prevPred = pred
  2913  			}
  2914  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2915  			// It is valid if:
  2916  			// 1. The previous node and next node both are not marked.
  2917  			// 2. The previous node's next node is succ in this layer.
  2918  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2919  		}
  2920  		if !valid {
  2921  			unlockFloat64Desc(preds, highestLocked)
  2922  			continue
  2923  		}
  2924  
  2925  		nn := newFloat64NodeDesc(key, value, level)
  2926  		for layer := 0; layer < level; layer++ {
  2927  			nn.storeNext(layer, succs[layer])
  2928  			preds[layer].atomicStoreNext(layer, nn)
  2929  		}
  2930  		nn.flags.SetTrue(fullyLinked)
  2931  		unlockFloat64Desc(preds, highestLocked)
  2932  		atomic.AddInt64(&s.length, 1)
  2933  		return value, false
  2934  	}
  2935  }
  2936  
  2937  // LoadOrStoreLazy returns the existing value for the key if present.
  2938  // Otherwise, it stores and returns the given value from f, f will only be called once.
  2939  // The loaded result is true if the value was loaded, false if stored.
  2940  // (Modified from LoadOrStore)
  2941  func (s *Float64MapDesc) LoadOrStoreLazy(key float64, f func() interface{}) (actual interface{}, loaded bool) {
  2942  	var (
  2943  		level        int
  2944  		preds, succs [maxLevel]*float64NodeDesc
  2945  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  2946  	)
  2947  	for {
  2948  		nodeFound := s.findNode(key, &preds, &succs)
  2949  		if nodeFound != nil { // indicating the key is already in the skip-list
  2950  			if !nodeFound.flags.Get(marked) {
  2951  				// We don't need to care about whether or not the node is fully linked,
  2952  				// just return the value.
  2953  				return nodeFound.loadVal(), true
  2954  			}
  2955  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  2956  			// we need to add this node in next loop.
  2957  			continue
  2958  		}
  2959  
  2960  		// Add this node into skip list.
  2961  		var (
  2962  			highestLocked        = -1 // the highest level being locked by this process
  2963  			valid                = true
  2964  			pred, succ, prevPred *float64NodeDesc
  2965  		)
  2966  		if level == 0 {
  2967  			level = s.randomlevel()
  2968  			if level > hl {
  2969  				// If the highest level is updated, usually means that many goroutines
  2970  				// are inserting items. Hopefully we can find a better path in next loop.
  2971  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  2972  				// but this strategy's performance is almost the same as the existing method.
  2973  				continue
  2974  			}
  2975  		}
  2976  		for layer := 0; valid && layer < level; layer++ {
  2977  			pred = preds[layer]   // target node's previous node
  2978  			succ = succs[layer]   // target node's next node
  2979  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2980  				pred.mu.Lock()
  2981  				highestLocked = layer
  2982  				prevPred = pred
  2983  			}
  2984  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2985  			// It is valid if:
  2986  			// 1. The previous node and next node both are not marked.
  2987  			// 2. The previous node's next node is succ in this layer.
  2988  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  2989  		}
  2990  		if !valid {
  2991  			unlockFloat64Desc(preds, highestLocked)
  2992  			continue
  2993  		}
  2994  		value := f()
  2995  		nn := newFloat64NodeDesc(key, value, level)
  2996  		for layer := 0; layer < level; layer++ {
  2997  			nn.storeNext(layer, succs[layer])
  2998  			preds[layer].atomicStoreNext(layer, nn)
  2999  		}
  3000  		nn.flags.SetTrue(fullyLinked)
  3001  		unlockFloat64Desc(preds, highestLocked)
  3002  		atomic.AddInt64(&s.length, 1)
  3003  		return value, false
  3004  	}
  3005  }
  3006  
  3007  // Delete deletes the value for a key.
  3008  func (s *Float64MapDesc) Delete(key float64) bool {
  3009  	var (
  3010  		nodeToDelete *float64NodeDesc
  3011  		isMarked     bool // represents if this operation mark the node
  3012  		topLayer     = -1
  3013  		preds, succs [maxLevel]*float64NodeDesc
  3014  	)
  3015  	for {
  3016  		lFound := s.findNodeDelete(key, &preds, &succs)
  3017  		if isMarked || // this process mark this node or we can find this node in the skip list
  3018  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3019  			if !isMarked { // we don't mark this node for now
  3020  				nodeToDelete = succs[lFound]
  3021  				topLayer = lFound
  3022  				nodeToDelete.mu.Lock()
  3023  				if nodeToDelete.flags.Get(marked) {
  3024  					// The node is marked by another process,
  3025  					// the physical deletion will be accomplished by another process.
  3026  					nodeToDelete.mu.Unlock()
  3027  					return false
  3028  				}
  3029  				nodeToDelete.flags.SetTrue(marked)
  3030  				isMarked = true
  3031  			}
  3032  			// Accomplish the physical deletion.
  3033  			var (
  3034  				highestLocked        = -1 // the highest level being locked by this process
  3035  				valid                = true
  3036  				pred, succ, prevPred *float64NodeDesc
  3037  			)
  3038  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3039  				pred, succ = preds[layer], succs[layer]
  3040  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3041  					pred.mu.Lock()
  3042  					highestLocked = layer
  3043  					prevPred = pred
  3044  				}
  3045  				// valid check if there is another node has inserted into the skip list in this layer
  3046  				// during this process, or the previous is deleted by another process.
  3047  				// It is valid if:
  3048  				// 1. the previous node exists.
  3049  				// 2. no another node has inserted into the skip list in this layer.
  3050  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  3051  			}
  3052  			if !valid {
  3053  				unlockFloat64Desc(preds, highestLocked)
  3054  				continue
  3055  			}
  3056  			for i := topLayer; i >= 0; i-- {
  3057  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  3058  				// So we don't need `nodeToDelete.loadNext`
  3059  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  3060  			}
  3061  			nodeToDelete.mu.Unlock()
  3062  			unlockFloat64Desc(preds, highestLocked)
  3063  			atomic.AddInt64(&s.length, -1)
  3064  			return true
  3065  		}
  3066  		return false
  3067  	}
  3068  }
  3069  
  3070  // Range calls f sequentially for each key and value present in the skipmap.
  3071  // If f returns false, range stops the iteration.
  3072  //
  3073  // Range does not necessarily correspond to any consistent snapshot of the Map's
  3074  // contents: no key will be visited more than once, but if the value for any key
  3075  // is stored or deleted concurrently, Range may reflect any mapping for that key
  3076  // from any point during the Range call.
  3077  func (s *Float64MapDesc) Range(f func(key float64, value interface{}) bool) {
  3078  	x := s.header.atomicLoadNext(0)
  3079  	for x != nil {
  3080  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3081  			x = x.atomicLoadNext(0)
  3082  			continue
  3083  		}
  3084  		if !f(x.key, x.loadVal()) {
  3085  			break
  3086  		}
  3087  		x = x.atomicLoadNext(0)
  3088  	}
  3089  }
  3090  
  3091  // Len return the length of this skipmap.
  3092  func (s *Float64MapDesc) Len() int {
  3093  	return int(atomic.LoadInt64(&s.length))
  3094  }
  3095  
  3096  // IntMap represents a map based on skip list in ascending order.
  3097  type IntMap struct {
  3098  	header       *intNode
  3099  	length       int64
  3100  	highestLevel int64 // highest level for now
  3101  }
  3102  
  3103  type intNode struct {
  3104  	key   int
  3105  	value unsafe.Pointer // *interface{}
  3106  	next  optionalArray  // [level]*intNode
  3107  	mu    sync.Mutex
  3108  	flags bitflag
  3109  	level uint32
  3110  }
  3111  
  3112  func newIntNode(key int, value interface{}, level int) *intNode {
  3113  	node := &intNode{
  3114  		key:   key,
  3115  		level: uint32(level),
  3116  	}
  3117  	node.storeVal(value)
  3118  	if level > op1 {
  3119  		node.next.extra = new([op2]unsafe.Pointer)
  3120  	}
  3121  	return node
  3122  }
  3123  
  3124  func (n *intNode) storeVal(value interface{}) {
  3125  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  3126  }
  3127  
  3128  func (n *intNode) loadVal() interface{} {
  3129  	return *(*interface{})(atomic.LoadPointer(&n.value))
  3130  }
  3131  
  3132  func (n *intNode) loadNext(i int) *intNode {
  3133  	return (*intNode)(n.next.load(i))
  3134  }
  3135  
  3136  func (n *intNode) storeNext(i int, node *intNode) {
  3137  	n.next.store(i, unsafe.Pointer(node))
  3138  }
  3139  
  3140  func (n *intNode) atomicLoadNext(i int) *intNode {
  3141  	return (*intNode)(n.next.atomicLoad(i))
  3142  }
  3143  
  3144  func (n *intNode) atomicStoreNext(i int, node *intNode) {
  3145  	n.next.atomicStore(i, unsafe.Pointer(node))
  3146  }
  3147  
  3148  func (n *intNode) lessthan(key int) bool {
  3149  	return n.key < key
  3150  }
  3151  
  3152  func (n *intNode) equal(key int) bool {
  3153  	return n.key == key
  3154  }
  3155  
  3156  // NewInt return an empty int skipmap.
  3157  func NewInt() *IntMap {
  3158  	h := newIntNode(0, "", maxLevel)
  3159  	h.flags.SetTrue(fullyLinked)
  3160  	return &IntMap{
  3161  		header:       h,
  3162  		highestLevel: defaultHighestLevel,
  3163  	}
  3164  }
  3165  
  3166  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  3167  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  3168  // (without fullpath, if find the node will return immediately)
  3169  func (s *IntMap) findNode(key int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) *intNode {
  3170  	x := s.header
  3171  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3172  		succ := x.atomicLoadNext(i)
  3173  		for succ != nil && succ.lessthan(key) {
  3174  			x = succ
  3175  			succ = x.atomicLoadNext(i)
  3176  		}
  3177  		preds[i] = x
  3178  		succs[i] = succ
  3179  
  3180  		// Check if the key already in the skipmap.
  3181  		if succ != nil && succ.equal(key) {
  3182  			return succ
  3183  		}
  3184  	}
  3185  	return nil
  3186  }
  3187  
  3188  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3189  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  3190  func (s *IntMap) findNodeDelete(key int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int {
  3191  	// lFound represents the index of the first layer at which it found a node.
  3192  	lFound, x := -1, s.header
  3193  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3194  		succ := x.atomicLoadNext(i)
  3195  		for succ != nil && succ.lessthan(key) {
  3196  			x = succ
  3197  			succ = x.atomicLoadNext(i)
  3198  		}
  3199  		preds[i] = x
  3200  		succs[i] = succ
  3201  
  3202  		// Check if the key already in the skip list.
  3203  		if lFound == -1 && succ != nil && succ.equal(key) {
  3204  			lFound = i
  3205  		}
  3206  	}
  3207  	return lFound
  3208  }
  3209  
  3210  func unlockInt(preds [maxLevel]*intNode, highestLevel int) {
  3211  	var prevPred *intNode
  3212  	for i := highestLevel; i >= 0; i-- {
  3213  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3214  			preds[i].mu.Unlock()
  3215  			prevPred = preds[i]
  3216  		}
  3217  	}
  3218  }
  3219  
  3220  // Store sets the value for a key.
  3221  func (s *IntMap) Store(key int, value interface{}) {
  3222  	level := s.randomlevel()
  3223  	var preds, succs [maxLevel]*intNode
  3224  	for {
  3225  		nodeFound := s.findNode(key, &preds, &succs)
  3226  		if nodeFound != nil { // indicating the key is already in the skip-list
  3227  			if !nodeFound.flags.Get(marked) {
  3228  				// We don't need to care about whether or not the node is fully linked,
  3229  				// just replace the value.
  3230  				nodeFound.storeVal(value)
  3231  				return
  3232  			}
  3233  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  3234  			// we need to add this node in next loop.
  3235  			continue
  3236  		}
  3237  
  3238  		// Add this node into skip list.
  3239  		var (
  3240  			highestLocked        = -1 // the highest level being locked by this process
  3241  			valid                = true
  3242  			pred, succ, prevPred *intNode
  3243  		)
  3244  		for layer := 0; valid && layer < level; layer++ {
  3245  			pred = preds[layer]   // target node's previous node
  3246  			succ = succs[layer]   // target node's next node
  3247  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3248  				pred.mu.Lock()
  3249  				highestLocked = layer
  3250  				prevPred = pred
  3251  			}
  3252  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3253  			// It is valid if:
  3254  			// 1. The previous node and next node both are not marked.
  3255  			// 2. The previous node's next node is succ in this layer.
  3256  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3257  		}
  3258  		if !valid {
  3259  			unlockInt(preds, highestLocked)
  3260  			continue
  3261  		}
  3262  
  3263  		nn := newIntNode(key, value, level)
  3264  		for layer := 0; layer < level; layer++ {
  3265  			nn.storeNext(layer, succs[layer])
  3266  			preds[layer].atomicStoreNext(layer, nn)
  3267  		}
  3268  		nn.flags.SetTrue(fullyLinked)
  3269  		unlockInt(preds, highestLocked)
  3270  		atomic.AddInt64(&s.length, 1)
  3271  		return
  3272  	}
  3273  }
  3274  
  3275  func (s *IntMap) randomlevel() int {
  3276  	// Generate random level.
  3277  	level := randomLevel()
  3278  	// Update highest level if possible.
  3279  	for {
  3280  		hl := atomic.LoadInt64(&s.highestLevel)
  3281  		if int64(level) <= hl {
  3282  			break
  3283  		}
  3284  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3285  			break
  3286  		}
  3287  	}
  3288  	return level
  3289  }
  3290  
  3291  // Load returns the value stored in the map for a key, or nil if no
  3292  // value is present.
  3293  // The ok result indicates whether value was found in the map.
  3294  func (s *IntMap) Load(key int) (value interface{}, ok bool) {
  3295  	x := s.header
  3296  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3297  		nex := x.atomicLoadNext(i)
  3298  		for nex != nil && nex.lessthan(key) {
  3299  			x = nex
  3300  			nex = x.atomicLoadNext(i)
  3301  		}
  3302  
  3303  		// Check if the key already in the skip list.
  3304  		if nex != nil && nex.equal(key) {
  3305  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  3306  				return nex.loadVal(), true
  3307  			}
  3308  			return nil, false
  3309  		}
  3310  	}
  3311  	return nil, false
  3312  }
  3313  
  3314  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  3315  // The loaded result reports whether the key was present.
  3316  // (Modified from Delete)
  3317  func (s *IntMap) LoadAndDelete(key int) (value interface{}, loaded bool) {
  3318  	var (
  3319  		nodeToDelete *intNode
  3320  		isMarked     bool // represents if this operation mark the node
  3321  		topLayer     = -1
  3322  		preds, succs [maxLevel]*intNode
  3323  	)
  3324  	for {
  3325  		lFound := s.findNodeDelete(key, &preds, &succs)
  3326  		if isMarked || // this process mark this node or we can find this node in the skip list
  3327  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3328  			if !isMarked { // we don't mark this node for now
  3329  				nodeToDelete = succs[lFound]
  3330  				topLayer = lFound
  3331  				nodeToDelete.mu.Lock()
  3332  				if nodeToDelete.flags.Get(marked) {
  3333  					// The node is marked by another process,
  3334  					// the physical deletion will be accomplished by another process.
  3335  					nodeToDelete.mu.Unlock()
  3336  					return nil, false
  3337  				}
  3338  				nodeToDelete.flags.SetTrue(marked)
  3339  				isMarked = true
  3340  			}
  3341  			// Accomplish the physical deletion.
  3342  			var (
  3343  				highestLocked        = -1 // the highest level being locked by this process
  3344  				valid                = true
  3345  				pred, succ, prevPred *intNode
  3346  			)
  3347  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3348  				pred, succ = preds[layer], succs[layer]
  3349  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3350  					pred.mu.Lock()
  3351  					highestLocked = layer
  3352  					prevPred = pred
  3353  				}
  3354  				// valid check if there is another node has inserted into the skip list in this layer
  3355  				// during this process, or the previous is deleted by another process.
  3356  				// It is valid if:
  3357  				// 1. the previous node exists.
  3358  				// 2. no another node has inserted into the skip list in this layer.
  3359  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3360  			}
  3361  			if !valid {
  3362  				unlockInt(preds, highestLocked)
  3363  				continue
  3364  			}
  3365  			for i := topLayer; i >= 0; i-- {
  3366  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  3367  				// So we don't need `nodeToDelete.loadNext`
  3368  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  3369  			}
  3370  			nodeToDelete.mu.Unlock()
  3371  			unlockInt(preds, highestLocked)
  3372  			atomic.AddInt64(&s.length, -1)
  3373  			return nodeToDelete.loadVal(), true
  3374  		}
  3375  		return nil, false
  3376  	}
  3377  }
  3378  
  3379  // LoadOrStore returns the existing value for the key if present.
  3380  // Otherwise, it stores and returns the given value.
  3381  // The loaded result is true if the value was loaded, false if stored.
  3382  // (Modified from Store)
  3383  func (s *IntMap) LoadOrStore(key int, value interface{}) (actual interface{}, loaded bool) {
  3384  	var (
  3385  		level        int
  3386  		preds, succs [maxLevel]*intNode
  3387  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  3388  	)
  3389  	for {
  3390  		nodeFound := s.findNode(key, &preds, &succs)
  3391  		if nodeFound != nil { // indicating the key is already in the skip-list
  3392  			if !nodeFound.flags.Get(marked) {
  3393  				// We don't need to care about whether or not the node is fully linked,
  3394  				// just return the value.
  3395  				return nodeFound.loadVal(), true
  3396  			}
  3397  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  3398  			// we need to add this node in next loop.
  3399  			continue
  3400  		}
  3401  
  3402  		// Add this node into skip list.
  3403  		var (
  3404  			highestLocked        = -1 // the highest level being locked by this process
  3405  			valid                = true
  3406  			pred, succ, prevPred *intNode
  3407  		)
  3408  		if level == 0 {
  3409  			level = s.randomlevel()
  3410  			if level > hl {
  3411  				// If the highest level is updated, usually means that many goroutines
  3412  				// are inserting items. Hopefully we can find a better path in next loop.
  3413  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  3414  				// but this strategy's performance is almost the same as the existing method.
  3415  				continue
  3416  			}
  3417  		}
  3418  		for layer := 0; valid && layer < level; layer++ {
  3419  			pred = preds[layer]   // target node's previous node
  3420  			succ = succs[layer]   // target node's next node
  3421  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3422  				pred.mu.Lock()
  3423  				highestLocked = layer
  3424  				prevPred = pred
  3425  			}
  3426  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3427  			// It is valid if:
  3428  			// 1. The previous node and next node both are not marked.
  3429  			// 2. The previous node's next node is succ in this layer.
  3430  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3431  		}
  3432  		if !valid {
  3433  			unlockInt(preds, highestLocked)
  3434  			continue
  3435  		}
  3436  
  3437  		nn := newIntNode(key, value, level)
  3438  		for layer := 0; layer < level; layer++ {
  3439  			nn.storeNext(layer, succs[layer])
  3440  			preds[layer].atomicStoreNext(layer, nn)
  3441  		}
  3442  		nn.flags.SetTrue(fullyLinked)
  3443  		unlockInt(preds, highestLocked)
  3444  		atomic.AddInt64(&s.length, 1)
  3445  		return value, false
  3446  	}
  3447  }
  3448  
  3449  // LoadOrStoreLazy returns the existing value for the key if present.
  3450  // Otherwise, it stores and returns the given value from f, f will only be called once.
  3451  // The loaded result is true if the value was loaded, false if stored.
  3452  // (Modified from LoadOrStore)
  3453  func (s *IntMap) LoadOrStoreLazy(key int, f func() interface{}) (actual interface{}, loaded bool) {
  3454  	var (
  3455  		level        int
  3456  		preds, succs [maxLevel]*intNode
  3457  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  3458  	)
  3459  	for {
  3460  		nodeFound := s.findNode(key, &preds, &succs)
  3461  		if nodeFound != nil { // indicating the key is already in the skip-list
  3462  			if !nodeFound.flags.Get(marked) {
  3463  				// We don't need to care about whether or not the node is fully linked,
  3464  				// just return the value.
  3465  				return nodeFound.loadVal(), true
  3466  			}
  3467  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  3468  			// we need to add this node in next loop.
  3469  			continue
  3470  		}
  3471  
  3472  		// Add this node into skip list.
  3473  		var (
  3474  			highestLocked        = -1 // the highest level being locked by this process
  3475  			valid                = true
  3476  			pred, succ, prevPred *intNode
  3477  		)
  3478  		if level == 0 {
  3479  			level = s.randomlevel()
  3480  			if level > hl {
  3481  				// If the highest level is updated, usually means that many goroutines
  3482  				// are inserting items. Hopefully we can find a better path in next loop.
  3483  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  3484  				// but this strategy's performance is almost the same as the existing method.
  3485  				continue
  3486  			}
  3487  		}
  3488  		for layer := 0; valid && layer < level; layer++ {
  3489  			pred = preds[layer]   // target node's previous node
  3490  			succ = succs[layer]   // target node's next node
  3491  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3492  				pred.mu.Lock()
  3493  				highestLocked = layer
  3494  				prevPred = pred
  3495  			}
  3496  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3497  			// It is valid if:
  3498  			// 1. The previous node and next node both are not marked.
  3499  			// 2. The previous node's next node is succ in this layer.
  3500  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  3501  		}
  3502  		if !valid {
  3503  			unlockInt(preds, highestLocked)
  3504  			continue
  3505  		}
  3506  		value := f()
  3507  		nn := newIntNode(key, value, level)
  3508  		for layer := 0; layer < level; layer++ {
  3509  			nn.storeNext(layer, succs[layer])
  3510  			preds[layer].atomicStoreNext(layer, nn)
  3511  		}
  3512  		nn.flags.SetTrue(fullyLinked)
  3513  		unlockInt(preds, highestLocked)
  3514  		atomic.AddInt64(&s.length, 1)
  3515  		return value, false
  3516  	}
  3517  }
  3518  
  3519  // Delete deletes the value for a key.
  3520  func (s *IntMap) Delete(key int) bool {
  3521  	var (
  3522  		nodeToDelete *intNode
  3523  		isMarked     bool // represents if this operation mark the node
  3524  		topLayer     = -1
  3525  		preds, succs [maxLevel]*intNode
  3526  	)
  3527  	for {
  3528  		lFound := s.findNodeDelete(key, &preds, &succs)
  3529  		if isMarked || // this process mark this node or we can find this node in the skip list
  3530  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3531  			if !isMarked { // we don't mark this node for now
  3532  				nodeToDelete = succs[lFound]
  3533  				topLayer = lFound
  3534  				nodeToDelete.mu.Lock()
  3535  				if nodeToDelete.flags.Get(marked) {
  3536  					// The node is marked by another process,
  3537  					// the physical deletion will be accomplished by another process.
  3538  					nodeToDelete.mu.Unlock()
  3539  					return false
  3540  				}
  3541  				nodeToDelete.flags.SetTrue(marked)
  3542  				isMarked = true
  3543  			}
  3544  			// Accomplish the physical deletion.
  3545  			var (
  3546  				highestLocked        = -1 // the highest level being locked by this process
  3547  				valid                = true
  3548  				pred, succ, prevPred *intNode
  3549  			)
  3550  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3551  				pred, succ = preds[layer], succs[layer]
  3552  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3553  					pred.mu.Lock()
  3554  					highestLocked = layer
  3555  					prevPred = pred
  3556  				}
  3557  				// valid check if there is another node has inserted into the skip list in this layer
  3558  				// during this process, or the previous is deleted by another process.
  3559  				// It is valid if:
  3560  				// 1. the previous node exists.
  3561  				// 2. no another node has inserted into the skip list in this layer.
  3562  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  3563  			}
  3564  			if !valid {
  3565  				unlockInt(preds, highestLocked)
  3566  				continue
  3567  			}
  3568  			for i := topLayer; i >= 0; i-- {
  3569  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  3570  				// So we don't need `nodeToDelete.loadNext`
  3571  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  3572  			}
  3573  			nodeToDelete.mu.Unlock()
  3574  			unlockInt(preds, highestLocked)
  3575  			atomic.AddInt64(&s.length, -1)
  3576  			return true
  3577  		}
  3578  		return false
  3579  	}
  3580  }
  3581  
  3582  // Range calls f sequentially for each key and value present in the skipmap.
  3583  // If f returns false, range stops the iteration.
  3584  //
  3585  // Range does not necessarily correspond to any consistent snapshot of the Map's
  3586  // contents: no key will be visited more than once, but if the value for any key
  3587  // is stored or deleted concurrently, Range may reflect any mapping for that key
  3588  // from any point during the Range call.
  3589  func (s *IntMap) Range(f func(key int, value interface{}) bool) {
  3590  	x := s.header.atomicLoadNext(0)
  3591  	for x != nil {
  3592  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3593  			x = x.atomicLoadNext(0)
  3594  			continue
  3595  		}
  3596  		if !f(x.key, x.loadVal()) {
  3597  			break
  3598  		}
  3599  		x = x.atomicLoadNext(0)
  3600  	}
  3601  }
  3602  
  3603  // Len return the length of this skipmap.
  3604  func (s *IntMap) Len() int {
  3605  	return int(atomic.LoadInt64(&s.length))
  3606  }
  3607  
  3608  // IntMapDesc represents a map based on skip list in descending order.
  3609  type IntMapDesc struct {
  3610  	header       *intNodeDesc
  3611  	length       int64
  3612  	highestLevel int64 // highest level for now
  3613  }
  3614  
  3615  type intNodeDesc struct {
  3616  	key   int
  3617  	value unsafe.Pointer // *interface{}
  3618  	next  optionalArray  // [level]*intNodeDesc
  3619  	mu    sync.Mutex
  3620  	flags bitflag
  3621  	level uint32
  3622  }
  3623  
  3624  func newIntNodeDesc(key int, value interface{}, level int) *intNodeDesc {
  3625  	node := &intNodeDesc{
  3626  		key:   key,
  3627  		level: uint32(level),
  3628  	}
  3629  	node.storeVal(value)
  3630  	if level > op1 {
  3631  		node.next.extra = new([op2]unsafe.Pointer)
  3632  	}
  3633  	return node
  3634  }
  3635  
  3636  func (n *intNodeDesc) storeVal(value interface{}) {
  3637  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  3638  }
  3639  
  3640  func (n *intNodeDesc) loadVal() interface{} {
  3641  	return *(*interface{})(atomic.LoadPointer(&n.value))
  3642  }
  3643  
  3644  func (n *intNodeDesc) loadNext(i int) *intNodeDesc {
  3645  	return (*intNodeDesc)(n.next.load(i))
  3646  }
  3647  
  3648  func (n *intNodeDesc) storeNext(i int, node *intNodeDesc) {
  3649  	n.next.store(i, unsafe.Pointer(node))
  3650  }
  3651  
  3652  func (n *intNodeDesc) atomicLoadNext(i int) *intNodeDesc {
  3653  	return (*intNodeDesc)(n.next.atomicLoad(i))
  3654  }
  3655  
  3656  func (n *intNodeDesc) atomicStoreNext(i int, node *intNodeDesc) {
  3657  	n.next.atomicStore(i, unsafe.Pointer(node))
  3658  }
  3659  
  3660  func (n *intNodeDesc) lessthan(key int) bool {
  3661  	return n.key > key
  3662  }
  3663  
  3664  func (n *intNodeDesc) equal(key int) bool {
  3665  	return n.key == key
  3666  }
  3667  
  3668  // NewIntDesc return an empty int skipmap.
  3669  func NewIntDesc() *IntMapDesc {
  3670  	h := newIntNodeDesc(0, "", maxLevel)
  3671  	h.flags.SetTrue(fullyLinked)
  3672  	return &IntMapDesc{
  3673  		header:       h,
  3674  		highestLevel: defaultHighestLevel,
  3675  	}
  3676  }
  3677  
  3678  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  3679  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  3680  // (without fullpath, if find the node will return immediately)
  3681  func (s *IntMapDesc) findNode(key int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) *intNodeDesc {
  3682  	x := s.header
  3683  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3684  		succ := x.atomicLoadNext(i)
  3685  		for succ != nil && succ.lessthan(key) {
  3686  			x = succ
  3687  			succ = x.atomicLoadNext(i)
  3688  		}
  3689  		preds[i] = x
  3690  		succs[i] = succ
  3691  
  3692  		// Check if the key already in the skipmap.
  3693  		if succ != nil && succ.equal(key) {
  3694  			return succ
  3695  		}
  3696  	}
  3697  	return nil
  3698  }
  3699  
  3700  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3701  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  3702  func (s *IntMapDesc) findNodeDelete(key int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int {
  3703  	// lFound represents the index of the first layer at which it found a node.
  3704  	lFound, x := -1, s.header
  3705  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3706  		succ := x.atomicLoadNext(i)
  3707  		for succ != nil && succ.lessthan(key) {
  3708  			x = succ
  3709  			succ = x.atomicLoadNext(i)
  3710  		}
  3711  		preds[i] = x
  3712  		succs[i] = succ
  3713  
  3714  		// Check if the key already in the skip list.
  3715  		if lFound == -1 && succ != nil && succ.equal(key) {
  3716  			lFound = i
  3717  		}
  3718  	}
  3719  	return lFound
  3720  }
  3721  
  3722  func unlockIntDesc(preds [maxLevel]*intNodeDesc, highestLevel int) {
  3723  	var prevPred *intNodeDesc
  3724  	for i := highestLevel; i >= 0; i-- {
  3725  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3726  			preds[i].mu.Unlock()
  3727  			prevPred = preds[i]
  3728  		}
  3729  	}
  3730  }
  3731  
  3732  // Store sets the value for a key.
  3733  func (s *IntMapDesc) Store(key int, value interface{}) {
  3734  	level := s.randomlevel()
  3735  	var preds, succs [maxLevel]*intNodeDesc
  3736  	for {
  3737  		nodeFound := s.findNode(key, &preds, &succs)
  3738  		if nodeFound != nil { // indicating the key is already in the skip-list
  3739  			if !nodeFound.flags.Get(marked) {
  3740  				// We don't need to care about whether or not the node is fully linked,
  3741  				// just replace the value.
  3742  				nodeFound.storeVal(value)
  3743  				return
  3744  			}
  3745  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  3746  			// we need to add this node in next loop.
  3747  			continue
  3748  		}
  3749  
  3750  		// Add this node into skip list.
  3751  		var (
  3752  			highestLocked        = -1 // the highest level being locked by this process
  3753  			valid                = true
  3754  			pred, succ, prevPred *intNodeDesc
  3755  		)
  3756  		for layer := 0; valid && layer < level; layer++ {
  3757  			pred = preds[layer]   // target node's previous node
  3758  			succ = succs[layer]   // target node's next node
  3759  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3760  				pred.mu.Lock()
  3761  				highestLocked = layer
  3762  				prevPred = pred
  3763  			}
  3764  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3765  			// It is valid if:
  3766  			// 1. The previous node and next node both are not marked.
  3767  			// 2. The previous node's next node is succ in this layer.
  3768  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3769  		}
  3770  		if !valid {
  3771  			unlockIntDesc(preds, highestLocked)
  3772  			continue
  3773  		}
  3774  
  3775  		nn := newIntNodeDesc(key, value, level)
  3776  		for layer := 0; layer < level; layer++ {
  3777  			nn.storeNext(layer, succs[layer])
  3778  			preds[layer].atomicStoreNext(layer, nn)
  3779  		}
  3780  		nn.flags.SetTrue(fullyLinked)
  3781  		unlockIntDesc(preds, highestLocked)
  3782  		atomic.AddInt64(&s.length, 1)
  3783  		return
  3784  	}
  3785  }
  3786  
  3787  func (s *IntMapDesc) randomlevel() int {
  3788  	// Generate random level.
  3789  	level := randomLevel()
  3790  	// Update highest level if possible.
  3791  	for {
  3792  		hl := atomic.LoadInt64(&s.highestLevel)
  3793  		if int64(level) <= hl {
  3794  			break
  3795  		}
  3796  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3797  			break
  3798  		}
  3799  	}
  3800  	return level
  3801  }
  3802  
  3803  // Load returns the value stored in the map for a key, or nil if no
  3804  // value is present.
  3805  // The ok result indicates whether value was found in the map.
  3806  func (s *IntMapDesc) Load(key int) (value interface{}, ok bool) {
  3807  	x := s.header
  3808  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3809  		nex := x.atomicLoadNext(i)
  3810  		for nex != nil && nex.lessthan(key) {
  3811  			x = nex
  3812  			nex = x.atomicLoadNext(i)
  3813  		}
  3814  
  3815  		// Check if the key already in the skip list.
  3816  		if nex != nil && nex.equal(key) {
  3817  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  3818  				return nex.loadVal(), true
  3819  			}
  3820  			return nil, false
  3821  		}
  3822  	}
  3823  	return nil, false
  3824  }
  3825  
  3826  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  3827  // The loaded result reports whether the key was present.
  3828  // (Modified from Delete)
  3829  func (s *IntMapDesc) LoadAndDelete(key int) (value interface{}, loaded bool) {
  3830  	var (
  3831  		nodeToDelete *intNodeDesc
  3832  		isMarked     bool // represents if this operation mark the node
  3833  		topLayer     = -1
  3834  		preds, succs [maxLevel]*intNodeDesc
  3835  	)
  3836  	for {
  3837  		lFound := s.findNodeDelete(key, &preds, &succs)
  3838  		if isMarked || // this process mark this node or we can find this node in the skip list
  3839  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3840  			if !isMarked { // we don't mark this node for now
  3841  				nodeToDelete = succs[lFound]
  3842  				topLayer = lFound
  3843  				nodeToDelete.mu.Lock()
  3844  				if nodeToDelete.flags.Get(marked) {
  3845  					// The node is marked by another process,
  3846  					// the physical deletion will be accomplished by another process.
  3847  					nodeToDelete.mu.Unlock()
  3848  					return nil, false
  3849  				}
  3850  				nodeToDelete.flags.SetTrue(marked)
  3851  				isMarked = true
  3852  			}
  3853  			// Accomplish the physical deletion.
  3854  			var (
  3855  				highestLocked        = -1 // the highest level being locked by this process
  3856  				valid                = true
  3857  				pred, succ, prevPred *intNodeDesc
  3858  			)
  3859  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3860  				pred, succ = preds[layer], succs[layer]
  3861  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3862  					pred.mu.Lock()
  3863  					highestLocked = layer
  3864  					prevPred = pred
  3865  				}
  3866  				// valid check if there is another node has inserted into the skip list in this layer
  3867  				// during this process, or the previous is deleted by another process.
  3868  				// It is valid if:
  3869  				// 1. the previous node exists.
  3870  				// 2. no another node has inserted into the skip list in this layer.
  3871  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3872  			}
  3873  			if !valid {
  3874  				unlockIntDesc(preds, highestLocked)
  3875  				continue
  3876  			}
  3877  			for i := topLayer; i >= 0; i-- {
  3878  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  3879  				// So we don't need `nodeToDelete.loadNext`
  3880  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  3881  			}
  3882  			nodeToDelete.mu.Unlock()
  3883  			unlockIntDesc(preds, highestLocked)
  3884  			atomic.AddInt64(&s.length, -1)
  3885  			return nodeToDelete.loadVal(), true
  3886  		}
  3887  		return nil, false
  3888  	}
  3889  }
  3890  
  3891  // LoadOrStore returns the existing value for the key if present.
  3892  // Otherwise, it stores and returns the given value.
  3893  // The loaded result is true if the value was loaded, false if stored.
  3894  // (Modified from Store)
  3895  func (s *IntMapDesc) LoadOrStore(key int, value interface{}) (actual interface{}, loaded bool) {
  3896  	var (
  3897  		level        int
  3898  		preds, succs [maxLevel]*intNodeDesc
  3899  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  3900  	)
  3901  	for {
  3902  		nodeFound := s.findNode(key, &preds, &succs)
  3903  		if nodeFound != nil { // indicating the key is already in the skip-list
  3904  			if !nodeFound.flags.Get(marked) {
  3905  				// We don't need to care about whether or not the node is fully linked,
  3906  				// just return the value.
  3907  				return nodeFound.loadVal(), true
  3908  			}
  3909  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  3910  			// we need to add this node in next loop.
  3911  			continue
  3912  		}
  3913  
  3914  		// Add this node into skip list.
  3915  		var (
  3916  			highestLocked        = -1 // the highest level being locked by this process
  3917  			valid                = true
  3918  			pred, succ, prevPred *intNodeDesc
  3919  		)
  3920  		if level == 0 {
  3921  			level = s.randomlevel()
  3922  			if level > hl {
  3923  				// If the highest level is updated, usually means that many goroutines
  3924  				// are inserting items. Hopefully we can find a better path in next loop.
  3925  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  3926  				// but this strategy's performance is almost the same as the existing method.
  3927  				continue
  3928  			}
  3929  		}
  3930  		for layer := 0; valid && layer < level; layer++ {
  3931  			pred = preds[layer]   // target node's previous node
  3932  			succ = succs[layer]   // target node's next node
  3933  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3934  				pred.mu.Lock()
  3935  				highestLocked = layer
  3936  				prevPred = pred
  3937  			}
  3938  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3939  			// It is valid if:
  3940  			// 1. The previous node and next node both are not marked.
  3941  			// 2. The previous node's next node is succ in this layer.
  3942  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3943  		}
  3944  		if !valid {
  3945  			unlockIntDesc(preds, highestLocked)
  3946  			continue
  3947  		}
  3948  
  3949  		nn := newIntNodeDesc(key, value, level)
  3950  		for layer := 0; layer < level; layer++ {
  3951  			nn.storeNext(layer, succs[layer])
  3952  			preds[layer].atomicStoreNext(layer, nn)
  3953  		}
  3954  		nn.flags.SetTrue(fullyLinked)
  3955  		unlockIntDesc(preds, highestLocked)
  3956  		atomic.AddInt64(&s.length, 1)
  3957  		return value, false
  3958  	}
  3959  }
  3960  
  3961  // LoadOrStoreLazy returns the existing value for the key if present.
  3962  // Otherwise, it stores and returns the given value from f, f will only be called once.
  3963  // The loaded result is true if the value was loaded, false if stored.
  3964  // (Modified from LoadOrStore)
  3965  func (s *IntMapDesc) LoadOrStoreLazy(key int, f func() interface{}) (actual interface{}, loaded bool) {
  3966  	var (
  3967  		level        int
  3968  		preds, succs [maxLevel]*intNodeDesc
  3969  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  3970  	)
  3971  	for {
  3972  		nodeFound := s.findNode(key, &preds, &succs)
  3973  		if nodeFound != nil { // indicating the key is already in the skip-list
  3974  			if !nodeFound.flags.Get(marked) {
  3975  				// We don't need to care about whether or not the node is fully linked,
  3976  				// just return the value.
  3977  				return nodeFound.loadVal(), true
  3978  			}
  3979  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  3980  			// we need to add this node in next loop.
  3981  			continue
  3982  		}
  3983  
  3984  		// Add this node into skip list.
  3985  		var (
  3986  			highestLocked        = -1 // the highest level being locked by this process
  3987  			valid                = true
  3988  			pred, succ, prevPred *intNodeDesc
  3989  		)
  3990  		if level == 0 {
  3991  			level = s.randomlevel()
  3992  			if level > hl {
  3993  				// If the highest level is updated, usually means that many goroutines
  3994  				// are inserting items. Hopefully we can find a better path in next loop.
  3995  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  3996  				// but this strategy's performance is almost the same as the existing method.
  3997  				continue
  3998  			}
  3999  		}
  4000  		for layer := 0; valid && layer < level; layer++ {
  4001  			pred = preds[layer]   // target node's previous node
  4002  			succ = succs[layer]   // target node's next node
  4003  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4004  				pred.mu.Lock()
  4005  				highestLocked = layer
  4006  				prevPred = pred
  4007  			}
  4008  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4009  			// It is valid if:
  4010  			// 1. The previous node and next node both are not marked.
  4011  			// 2. The previous node's next node is succ in this layer.
  4012  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  4013  		}
  4014  		if !valid {
  4015  			unlockIntDesc(preds, highestLocked)
  4016  			continue
  4017  		}
  4018  		value := f()
  4019  		nn := newIntNodeDesc(key, value, level)
  4020  		for layer := 0; layer < level; layer++ {
  4021  			nn.storeNext(layer, succs[layer])
  4022  			preds[layer].atomicStoreNext(layer, nn)
  4023  		}
  4024  		nn.flags.SetTrue(fullyLinked)
  4025  		unlockIntDesc(preds, highestLocked)
  4026  		atomic.AddInt64(&s.length, 1)
  4027  		return value, false
  4028  	}
  4029  }
  4030  
  4031  // Delete deletes the value for a key.
  4032  func (s *IntMapDesc) Delete(key int) bool {
  4033  	var (
  4034  		nodeToDelete *intNodeDesc
  4035  		isMarked     bool // represents if this operation mark the node
  4036  		topLayer     = -1
  4037  		preds, succs [maxLevel]*intNodeDesc
  4038  	)
  4039  	for {
  4040  		lFound := s.findNodeDelete(key, &preds, &succs)
  4041  		if isMarked || // this process mark this node or we can find this node in the skip list
  4042  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4043  			if !isMarked { // we don't mark this node for now
  4044  				nodeToDelete = succs[lFound]
  4045  				topLayer = lFound
  4046  				nodeToDelete.mu.Lock()
  4047  				if nodeToDelete.flags.Get(marked) {
  4048  					// The node is marked by another process,
  4049  					// the physical deletion will be accomplished by another process.
  4050  					nodeToDelete.mu.Unlock()
  4051  					return false
  4052  				}
  4053  				nodeToDelete.flags.SetTrue(marked)
  4054  				isMarked = true
  4055  			}
  4056  			// Accomplish the physical deletion.
  4057  			var (
  4058  				highestLocked        = -1 // the highest level being locked by this process
  4059  				valid                = true
  4060  				pred, succ, prevPred *intNodeDesc
  4061  			)
  4062  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4063  				pred, succ = preds[layer], succs[layer]
  4064  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4065  					pred.mu.Lock()
  4066  					highestLocked = layer
  4067  					prevPred = pred
  4068  				}
  4069  				// valid check if there is another node has inserted into the skip list in this layer
  4070  				// during this process, or the previous is deleted by another process.
  4071  				// It is valid if:
  4072  				// 1. the previous node exists.
  4073  				// 2. no another node has inserted into the skip list in this layer.
  4074  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  4075  			}
  4076  			if !valid {
  4077  				unlockIntDesc(preds, highestLocked)
  4078  				continue
  4079  			}
  4080  			for i := topLayer; i >= 0; i-- {
  4081  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  4082  				// So we don't need `nodeToDelete.loadNext`
  4083  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  4084  			}
  4085  			nodeToDelete.mu.Unlock()
  4086  			unlockIntDesc(preds, highestLocked)
  4087  			atomic.AddInt64(&s.length, -1)
  4088  			return true
  4089  		}
  4090  		return false
  4091  	}
  4092  }
  4093  
  4094  // Range calls f sequentially for each key and value present in the skipmap.
  4095  // If f returns false, range stops the iteration.
  4096  //
  4097  // Range does not necessarily correspond to any consistent snapshot of the Map's
  4098  // contents: no key will be visited more than once, but if the value for any key
  4099  // is stored or deleted concurrently, Range may reflect any mapping for that key
  4100  // from any point during the Range call.
  4101  func (s *IntMapDesc) Range(f func(key int, value interface{}) bool) {
  4102  	x := s.header.atomicLoadNext(0)
  4103  	for x != nil {
  4104  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4105  			x = x.atomicLoadNext(0)
  4106  			continue
  4107  		}
  4108  		if !f(x.key, x.loadVal()) {
  4109  			break
  4110  		}
  4111  		x = x.atomicLoadNext(0)
  4112  	}
  4113  }
  4114  
  4115  // Len return the length of this skipmap.
  4116  func (s *IntMapDesc) Len() int {
  4117  	return int(atomic.LoadInt64(&s.length))
  4118  }
  4119  
  4120  // Int8Map represents a map based on skip list in ascending order.
  4121  type Int8Map struct {
  4122  	header       *int8Node
  4123  	length       int64
  4124  	highestLevel int64 // highest level for now
  4125  }
  4126  
  4127  type int8Node struct {
  4128  	key   int8
  4129  	value unsafe.Pointer // *interface{}
  4130  	next  optionalArray  // [level]*int8Node
  4131  	mu    sync.Mutex
  4132  	flags bitflag
  4133  	level uint32
  4134  }
  4135  
  4136  func newInt8Node(key int8, value interface{}, level int) *int8Node {
  4137  	node := &int8Node{
  4138  		key:   key,
  4139  		level: uint32(level),
  4140  	}
  4141  	node.storeVal(value)
  4142  	if level > op1 {
  4143  		node.next.extra = new([op2]unsafe.Pointer)
  4144  	}
  4145  	return node
  4146  }
  4147  
  4148  func (n *int8Node) storeVal(value interface{}) {
  4149  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  4150  }
  4151  
  4152  func (n *int8Node) loadVal() interface{} {
  4153  	return *(*interface{})(atomic.LoadPointer(&n.value))
  4154  }
  4155  
  4156  func (n *int8Node) loadNext(i int) *int8Node {
  4157  	return (*int8Node)(n.next.load(i))
  4158  }
  4159  
  4160  func (n *int8Node) storeNext(i int, node *int8Node) {
  4161  	n.next.store(i, unsafe.Pointer(node))
  4162  }
  4163  
  4164  func (n *int8Node) atomicLoadNext(i int) *int8Node {
  4165  	return (*int8Node)(n.next.atomicLoad(i))
  4166  }
  4167  
  4168  func (n *int8Node) atomicStoreNext(i int, node *int8Node) {
  4169  	n.next.atomicStore(i, unsafe.Pointer(node))
  4170  }
  4171  
  4172  func (n *int8Node) lessthan(key int8) bool {
  4173  	return n.key < key
  4174  }
  4175  
  4176  func (n *int8Node) equal(key int8) bool {
  4177  	return n.key == key
  4178  }
  4179  
  4180  // NewInt8 return an empty int8 skipmap.
  4181  func NewInt8() *Int8Map {
  4182  	h := newInt8Node(0, "", maxLevel)
  4183  	h.flags.SetTrue(fullyLinked)
  4184  	return &Int8Map{
  4185  		header:       h,
  4186  		highestLevel: defaultHighestLevel,
  4187  	}
  4188  }
  4189  
  4190  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  4191  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  4192  // (without fullpath, if find the node will return immediately)
  4193  func (s *Int8Map) findNode(key int8, preds *[maxLevel]*int8Node, succs *[maxLevel]*int8Node) *int8Node {
  4194  	x := s.header
  4195  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4196  		succ := x.atomicLoadNext(i)
  4197  		for succ != nil && succ.lessthan(key) {
  4198  			x = succ
  4199  			succ = x.atomicLoadNext(i)
  4200  		}
  4201  		preds[i] = x
  4202  		succs[i] = succ
  4203  
  4204  		// Check if the key already in the skipmap.
  4205  		if succ != nil && succ.equal(key) {
  4206  			return succ
  4207  		}
  4208  	}
  4209  	return nil
  4210  }
  4211  
  4212  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4213  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  4214  func (s *Int8Map) findNodeDelete(key int8, preds *[maxLevel]*int8Node, succs *[maxLevel]*int8Node) int {
  4215  	// lFound represents the index of the first layer at which it found a node.
  4216  	lFound, x := -1, s.header
  4217  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4218  		succ := x.atomicLoadNext(i)
  4219  		for succ != nil && succ.lessthan(key) {
  4220  			x = succ
  4221  			succ = x.atomicLoadNext(i)
  4222  		}
  4223  		preds[i] = x
  4224  		succs[i] = succ
  4225  
  4226  		// Check if the key already in the skip list.
  4227  		if lFound == -1 && succ != nil && succ.equal(key) {
  4228  			lFound = i
  4229  		}
  4230  	}
  4231  	return lFound
  4232  }
  4233  
  4234  func unlockInt8(preds [maxLevel]*int8Node, highestLevel int) {
  4235  	var prevPred *int8Node
  4236  	for i := highestLevel; i >= 0; i-- {
  4237  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4238  			preds[i].mu.Unlock()
  4239  			prevPred = preds[i]
  4240  		}
  4241  	}
  4242  }
  4243  
  4244  // Store sets the value for a key.
  4245  func (s *Int8Map) Store(key int8, value interface{}) {
  4246  	level := s.randomlevel()
  4247  	var preds, succs [maxLevel]*int8Node
  4248  	for {
  4249  		nodeFound := s.findNode(key, &preds, &succs)
  4250  		if nodeFound != nil { // indicating the key is already in the skip-list
  4251  			if !nodeFound.flags.Get(marked) {
  4252  				// We don't need to care about whether or not the node is fully linked,
  4253  				// just replace the value.
  4254  				nodeFound.storeVal(value)
  4255  				return
  4256  			}
  4257  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  4258  			// we need to add this node in next loop.
  4259  			continue
  4260  		}
  4261  
  4262  		// Add this node into skip list.
  4263  		var (
  4264  			highestLocked        = -1 // the highest level being locked by this process
  4265  			valid                = true
  4266  			pred, succ, prevPred *int8Node
  4267  		)
  4268  		for layer := 0; valid && layer < level; layer++ {
  4269  			pred = preds[layer]   // target node's previous node
  4270  			succ = succs[layer]   // target node's next node
  4271  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4272  				pred.mu.Lock()
  4273  				highestLocked = layer
  4274  				prevPred = pred
  4275  			}
  4276  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4277  			// It is valid if:
  4278  			// 1. The previous node and next node both are not marked.
  4279  			// 2. The previous node's next node is succ in this layer.
  4280  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4281  		}
  4282  		if !valid {
  4283  			unlockInt8(preds, highestLocked)
  4284  			continue
  4285  		}
  4286  
  4287  		nn := newInt8Node(key, value, level)
  4288  		for layer := 0; layer < level; layer++ {
  4289  			nn.storeNext(layer, succs[layer])
  4290  			preds[layer].atomicStoreNext(layer, nn)
  4291  		}
  4292  		nn.flags.SetTrue(fullyLinked)
  4293  		unlockInt8(preds, highestLocked)
  4294  		atomic.AddInt64(&s.length, 1)
  4295  		return
  4296  	}
  4297  }
  4298  
  4299  func (s *Int8Map) randomlevel() int {
  4300  	// Generate random level.
  4301  	level := randomLevel()
  4302  	// Update highest level if possible.
  4303  	for {
  4304  		hl := atomic.LoadInt64(&s.highestLevel)
  4305  		if int64(level) <= hl {
  4306  			break
  4307  		}
  4308  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4309  			break
  4310  		}
  4311  	}
  4312  	return level
  4313  }
  4314  
  4315  // Load returns the value stored in the map for a key, or nil if no
  4316  // value is present.
  4317  // The ok result indicates whether value was found in the map.
  4318  func (s *Int8Map) Load(key int8) (value interface{}, ok bool) {
  4319  	x := s.header
  4320  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4321  		nex := x.atomicLoadNext(i)
  4322  		for nex != nil && nex.lessthan(key) {
  4323  			x = nex
  4324  			nex = x.atomicLoadNext(i)
  4325  		}
  4326  
  4327  		// Check if the key already in the skip list.
  4328  		if nex != nil && nex.equal(key) {
  4329  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  4330  				return nex.loadVal(), true
  4331  			}
  4332  			return nil, false
  4333  		}
  4334  	}
  4335  	return nil, false
  4336  }
  4337  
  4338  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  4339  // The loaded result reports whether the key was present.
  4340  // (Modified from Delete)
  4341  func (s *Int8Map) LoadAndDelete(key int8) (value interface{}, loaded bool) {
  4342  	var (
  4343  		nodeToDelete *int8Node
  4344  		isMarked     bool // represents if this operation mark the node
  4345  		topLayer     = -1
  4346  		preds, succs [maxLevel]*int8Node
  4347  	)
  4348  	for {
  4349  		lFound := s.findNodeDelete(key, &preds, &succs)
  4350  		if isMarked || // this process mark this node or we can find this node in the skip list
  4351  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4352  			if !isMarked { // we don't mark this node for now
  4353  				nodeToDelete = succs[lFound]
  4354  				topLayer = lFound
  4355  				nodeToDelete.mu.Lock()
  4356  				if nodeToDelete.flags.Get(marked) {
  4357  					// The node is marked by another process,
  4358  					// the physical deletion will be accomplished by another process.
  4359  					nodeToDelete.mu.Unlock()
  4360  					return nil, false
  4361  				}
  4362  				nodeToDelete.flags.SetTrue(marked)
  4363  				isMarked = true
  4364  			}
  4365  			// Accomplish the physical deletion.
  4366  			var (
  4367  				highestLocked        = -1 // the highest level being locked by this process
  4368  				valid                = true
  4369  				pred, succ, prevPred *int8Node
  4370  			)
  4371  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4372  				pred, succ = preds[layer], succs[layer]
  4373  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4374  					pred.mu.Lock()
  4375  					highestLocked = layer
  4376  					prevPred = pred
  4377  				}
  4378  				// valid check if there is another node has inserted into the skip list in this layer
  4379  				// during this process, or the previous is deleted by another process.
  4380  				// It is valid if:
  4381  				// 1. the previous node exists.
  4382  				// 2. no another node has inserted into the skip list in this layer.
  4383  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4384  			}
  4385  			if !valid {
  4386  				unlockInt8(preds, highestLocked)
  4387  				continue
  4388  			}
  4389  			for i := topLayer; i >= 0; i-- {
  4390  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  4391  				// So we don't need `nodeToDelete.loadNext`
  4392  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  4393  			}
  4394  			nodeToDelete.mu.Unlock()
  4395  			unlockInt8(preds, highestLocked)
  4396  			atomic.AddInt64(&s.length, -1)
  4397  			return nodeToDelete.loadVal(), true
  4398  		}
  4399  		return nil, false
  4400  	}
  4401  }
  4402  
  4403  // LoadOrStore returns the existing value for the key if present.
  4404  // Otherwise, it stores and returns the given value.
  4405  // The loaded result is true if the value was loaded, false if stored.
  4406  // (Modified from Store)
  4407  func (s *Int8Map) LoadOrStore(key int8, value interface{}) (actual interface{}, loaded bool) {
  4408  	var (
  4409  		level        int
  4410  		preds, succs [maxLevel]*int8Node
  4411  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  4412  	)
  4413  	for {
  4414  		nodeFound := s.findNode(key, &preds, &succs)
  4415  		if nodeFound != nil { // indicating the key is already in the skip-list
  4416  			if !nodeFound.flags.Get(marked) {
  4417  				// We don't need to care about whether or not the node is fully linked,
  4418  				// just return the value.
  4419  				return nodeFound.loadVal(), true
  4420  			}
  4421  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  4422  			// we need to add this node in next loop.
  4423  			continue
  4424  		}
  4425  
  4426  		// Add this node into skip list.
  4427  		var (
  4428  			highestLocked        = -1 // the highest level being locked by this process
  4429  			valid                = true
  4430  			pred, succ, prevPred *int8Node
  4431  		)
  4432  		if level == 0 {
  4433  			level = s.randomlevel()
  4434  			if level > hl {
  4435  				// If the highest level is updated, usually means that many goroutines
  4436  				// are inserting items. Hopefully we can find a better path in next loop.
  4437  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  4438  				// but this strategy's performance is almost the same as the existing method.
  4439  				continue
  4440  			}
  4441  		}
  4442  		for layer := 0; valid && layer < level; layer++ {
  4443  			pred = preds[layer]   // target node's previous node
  4444  			succ = succs[layer]   // target node's next node
  4445  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4446  				pred.mu.Lock()
  4447  				highestLocked = layer
  4448  				prevPred = pred
  4449  			}
  4450  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4451  			// It is valid if:
  4452  			// 1. The previous node and next node both are not marked.
  4453  			// 2. The previous node's next node is succ in this layer.
  4454  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4455  		}
  4456  		if !valid {
  4457  			unlockInt8(preds, highestLocked)
  4458  			continue
  4459  		}
  4460  
  4461  		nn := newInt8Node(key, value, level)
  4462  		for layer := 0; layer < level; layer++ {
  4463  			nn.storeNext(layer, succs[layer])
  4464  			preds[layer].atomicStoreNext(layer, nn)
  4465  		}
  4466  		nn.flags.SetTrue(fullyLinked)
  4467  		unlockInt8(preds, highestLocked)
  4468  		atomic.AddInt64(&s.length, 1)
  4469  		return value, false
  4470  	}
  4471  }
  4472  
  4473  // LoadOrStoreLazy returns the existing value for the key if present.
  4474  // Otherwise, it stores and returns the given value from f, f will only be called once.
  4475  // The loaded result is true if the value was loaded, false if stored.
  4476  // (Modified from LoadOrStore)
  4477  func (s *Int8Map) LoadOrStoreLazy(key int8, f func() interface{}) (actual interface{}, loaded bool) {
  4478  	var (
  4479  		level        int
  4480  		preds, succs [maxLevel]*int8Node
  4481  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  4482  	)
  4483  	for {
  4484  		nodeFound := s.findNode(key, &preds, &succs)
  4485  		if nodeFound != nil { // indicating the key is already in the skip-list
  4486  			if !nodeFound.flags.Get(marked) {
  4487  				// We don't need to care about whether or not the node is fully linked,
  4488  				// just return the value.
  4489  				return nodeFound.loadVal(), true
  4490  			}
  4491  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  4492  			// we need to add this node in next loop.
  4493  			continue
  4494  		}
  4495  
  4496  		// Add this node into skip list.
  4497  		var (
  4498  			highestLocked        = -1 // the highest level being locked by this process
  4499  			valid                = true
  4500  			pred, succ, prevPred *int8Node
  4501  		)
  4502  		if level == 0 {
  4503  			level = s.randomlevel()
  4504  			if level > hl {
  4505  				// If the highest level is updated, usually means that many goroutines
  4506  				// are inserting items. Hopefully we can find a better path in next loop.
  4507  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  4508  				// but this strategy's performance is almost the same as the existing method.
  4509  				continue
  4510  			}
  4511  		}
  4512  		for layer := 0; valid && layer < level; layer++ {
  4513  			pred = preds[layer]   // target node's previous node
  4514  			succ = succs[layer]   // target node's next node
  4515  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4516  				pred.mu.Lock()
  4517  				highestLocked = layer
  4518  				prevPred = pred
  4519  			}
  4520  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4521  			// It is valid if:
  4522  			// 1. The previous node and next node both are not marked.
  4523  			// 2. The previous node's next node is succ in this layer.
  4524  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  4525  		}
  4526  		if !valid {
  4527  			unlockInt8(preds, highestLocked)
  4528  			continue
  4529  		}
  4530  		value := f()
  4531  		nn := newInt8Node(key, value, level)
  4532  		for layer := 0; layer < level; layer++ {
  4533  			nn.storeNext(layer, succs[layer])
  4534  			preds[layer].atomicStoreNext(layer, nn)
  4535  		}
  4536  		nn.flags.SetTrue(fullyLinked)
  4537  		unlockInt8(preds, highestLocked)
  4538  		atomic.AddInt64(&s.length, 1)
  4539  		return value, false
  4540  	}
  4541  }
  4542  
  4543  // Delete deletes the value for a key.
  4544  func (s *Int8Map) Delete(key int8) bool {
  4545  	var (
  4546  		nodeToDelete *int8Node
  4547  		isMarked     bool // represents if this operation mark the node
  4548  		topLayer     = -1
  4549  		preds, succs [maxLevel]*int8Node
  4550  	)
  4551  	for {
  4552  		lFound := s.findNodeDelete(key, &preds, &succs)
  4553  		if isMarked || // this process mark this node or we can find this node in the skip list
  4554  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4555  			if !isMarked { // we don't mark this node for now
  4556  				nodeToDelete = succs[lFound]
  4557  				topLayer = lFound
  4558  				nodeToDelete.mu.Lock()
  4559  				if nodeToDelete.flags.Get(marked) {
  4560  					// The node is marked by another process,
  4561  					// the physical deletion will be accomplished by another process.
  4562  					nodeToDelete.mu.Unlock()
  4563  					return false
  4564  				}
  4565  				nodeToDelete.flags.SetTrue(marked)
  4566  				isMarked = true
  4567  			}
  4568  			// Accomplish the physical deletion.
  4569  			var (
  4570  				highestLocked        = -1 // the highest level being locked by this process
  4571  				valid                = true
  4572  				pred, succ, prevPred *int8Node
  4573  			)
  4574  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4575  				pred, succ = preds[layer], succs[layer]
  4576  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4577  					pred.mu.Lock()
  4578  					highestLocked = layer
  4579  					prevPred = pred
  4580  				}
  4581  				// valid check if there is another node has inserted into the skip list in this layer
  4582  				// during this process, or the previous is deleted by another process.
  4583  				// It is valid if:
  4584  				// 1. the previous node exists.
  4585  				// 2. no another node has inserted into the skip list in this layer.
  4586  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  4587  			}
  4588  			if !valid {
  4589  				unlockInt8(preds, highestLocked)
  4590  				continue
  4591  			}
  4592  			for i := topLayer; i >= 0; i-- {
  4593  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  4594  				// So we don't need `nodeToDelete.loadNext`
  4595  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  4596  			}
  4597  			nodeToDelete.mu.Unlock()
  4598  			unlockInt8(preds, highestLocked)
  4599  			atomic.AddInt64(&s.length, -1)
  4600  			return true
  4601  		}
  4602  		return false
  4603  	}
  4604  }
  4605  
  4606  // Range calls f sequentially for each key and value present in the skipmap.
  4607  // If f returns false, range stops the iteration.
  4608  //
  4609  // Range does not necessarily correspond to any consistent snapshot of the Map's
  4610  // contents: no key will be visited more than once, but if the value for any key
  4611  // is stored or deleted concurrently, Range may reflect any mapping for that key
  4612  // from any point during the Range call.
  4613  func (s *Int8Map) Range(f func(key int8, value interface{}) bool) {
  4614  	x := s.header.atomicLoadNext(0)
  4615  	for x != nil {
  4616  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4617  			x = x.atomicLoadNext(0)
  4618  			continue
  4619  		}
  4620  		if !f(x.key, x.loadVal()) {
  4621  			break
  4622  		}
  4623  		x = x.atomicLoadNext(0)
  4624  	}
  4625  }
  4626  
  4627  // Len return the length of this skipmap.
  4628  func (s *Int8Map) Len() int {
  4629  	return int(atomic.LoadInt64(&s.length))
  4630  }
  4631  
  4632  // Int8MapDesc represents a map based on skip list in descending order.
  4633  type Int8MapDesc struct {
  4634  	header       *int8NodeDesc
  4635  	length       int64
  4636  	highestLevel int64 // highest level for now
  4637  }
  4638  
  4639  type int8NodeDesc struct {
  4640  	key   int8
  4641  	value unsafe.Pointer // *interface{}
  4642  	next  optionalArray  // [level]*int8NodeDesc
  4643  	mu    sync.Mutex
  4644  	flags bitflag
  4645  	level uint32
  4646  }
  4647  
  4648  func newInt8NodeDesc(key int8, value interface{}, level int) *int8NodeDesc {
  4649  	node := &int8NodeDesc{
  4650  		key:   key,
  4651  		level: uint32(level),
  4652  	}
  4653  	node.storeVal(value)
  4654  	if level > op1 {
  4655  		node.next.extra = new([op2]unsafe.Pointer)
  4656  	}
  4657  	return node
  4658  }
  4659  
  4660  func (n *int8NodeDesc) storeVal(value interface{}) {
  4661  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  4662  }
  4663  
  4664  func (n *int8NodeDesc) loadVal() interface{} {
  4665  	return *(*interface{})(atomic.LoadPointer(&n.value))
  4666  }
  4667  
  4668  func (n *int8NodeDesc) loadNext(i int) *int8NodeDesc {
  4669  	return (*int8NodeDesc)(n.next.load(i))
  4670  }
  4671  
  4672  func (n *int8NodeDesc) storeNext(i int, node *int8NodeDesc) {
  4673  	n.next.store(i, unsafe.Pointer(node))
  4674  }
  4675  
  4676  func (n *int8NodeDesc) atomicLoadNext(i int) *int8NodeDesc {
  4677  	return (*int8NodeDesc)(n.next.atomicLoad(i))
  4678  }
  4679  
  4680  func (n *int8NodeDesc) atomicStoreNext(i int, node *int8NodeDesc) {
  4681  	n.next.atomicStore(i, unsafe.Pointer(node))
  4682  }
  4683  
  4684  func (n *int8NodeDesc) lessthan(key int8) bool {
  4685  	return n.key > key
  4686  }
  4687  
  4688  func (n *int8NodeDesc) equal(key int8) bool {
  4689  	return n.key == key
  4690  }
  4691  
  4692  // NewInt8Desc return an empty int8 skipmap.
  4693  func NewInt8Desc() *Int8MapDesc {
  4694  	h := newInt8NodeDesc(0, "", maxLevel)
  4695  	h.flags.SetTrue(fullyLinked)
  4696  	return &Int8MapDesc{
  4697  		header:       h,
  4698  		highestLevel: defaultHighestLevel,
  4699  	}
  4700  }
  4701  
  4702  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  4703  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  4704  // (without fullpath, if find the node will return immediately)
  4705  func (s *Int8MapDesc) findNode(key int8, preds *[maxLevel]*int8NodeDesc, succs *[maxLevel]*int8NodeDesc) *int8NodeDesc {
  4706  	x := s.header
  4707  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4708  		succ := x.atomicLoadNext(i)
  4709  		for succ != nil && succ.lessthan(key) {
  4710  			x = succ
  4711  			succ = x.atomicLoadNext(i)
  4712  		}
  4713  		preds[i] = x
  4714  		succs[i] = succ
  4715  
  4716  		// Check if the key already in the skipmap.
  4717  		if succ != nil && succ.equal(key) {
  4718  			return succ
  4719  		}
  4720  	}
  4721  	return nil
  4722  }
  4723  
  4724  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4725  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  4726  func (s *Int8MapDesc) findNodeDelete(key int8, preds *[maxLevel]*int8NodeDesc, succs *[maxLevel]*int8NodeDesc) int {
  4727  	// lFound represents the index of the first layer at which it found a node.
  4728  	lFound, x := -1, s.header
  4729  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4730  		succ := x.atomicLoadNext(i)
  4731  		for succ != nil && succ.lessthan(key) {
  4732  			x = succ
  4733  			succ = x.atomicLoadNext(i)
  4734  		}
  4735  		preds[i] = x
  4736  		succs[i] = succ
  4737  
  4738  		// Check if the key already in the skip list.
  4739  		if lFound == -1 && succ != nil && succ.equal(key) {
  4740  			lFound = i
  4741  		}
  4742  	}
  4743  	return lFound
  4744  }
  4745  
  4746  func unlockInt8Desc(preds [maxLevel]*int8NodeDesc, highestLevel int) {
  4747  	var prevPred *int8NodeDesc
  4748  	for i := highestLevel; i >= 0; i-- {
  4749  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4750  			preds[i].mu.Unlock()
  4751  			prevPred = preds[i]
  4752  		}
  4753  	}
  4754  }
  4755  
  4756  // Store sets the value for a key.
  4757  func (s *Int8MapDesc) Store(key int8, value interface{}) {
  4758  	level := s.randomlevel()
  4759  	var preds, succs [maxLevel]*int8NodeDesc
  4760  	for {
  4761  		nodeFound := s.findNode(key, &preds, &succs)
  4762  		if nodeFound != nil { // indicating the key is already in the skip-list
  4763  			if !nodeFound.flags.Get(marked) {
  4764  				// We don't need to care about whether or not the node is fully linked,
  4765  				// just replace the value.
  4766  				nodeFound.storeVal(value)
  4767  				return
  4768  			}
  4769  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  4770  			// we need to add this node in next loop.
  4771  			continue
  4772  		}
  4773  
  4774  		// Add this node into skip list.
  4775  		var (
  4776  			highestLocked        = -1 // the highest level being locked by this process
  4777  			valid                = true
  4778  			pred, succ, prevPred *int8NodeDesc
  4779  		)
  4780  		for layer := 0; valid && layer < level; layer++ {
  4781  			pred = preds[layer]   // target node's previous node
  4782  			succ = succs[layer]   // target node's next node
  4783  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4784  				pred.mu.Lock()
  4785  				highestLocked = layer
  4786  				prevPred = pred
  4787  			}
  4788  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4789  			// It is valid if:
  4790  			// 1. The previous node and next node both are not marked.
  4791  			// 2. The previous node's next node is succ in this layer.
  4792  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4793  		}
  4794  		if !valid {
  4795  			unlockInt8Desc(preds, highestLocked)
  4796  			continue
  4797  		}
  4798  
  4799  		nn := newInt8NodeDesc(key, value, level)
  4800  		for layer := 0; layer < level; layer++ {
  4801  			nn.storeNext(layer, succs[layer])
  4802  			preds[layer].atomicStoreNext(layer, nn)
  4803  		}
  4804  		nn.flags.SetTrue(fullyLinked)
  4805  		unlockInt8Desc(preds, highestLocked)
  4806  		atomic.AddInt64(&s.length, 1)
  4807  		return
  4808  	}
  4809  }
  4810  
  4811  func (s *Int8MapDesc) randomlevel() int {
  4812  	// Generate random level.
  4813  	level := randomLevel()
  4814  	// Update highest level if possible.
  4815  	for {
  4816  		hl := atomic.LoadInt64(&s.highestLevel)
  4817  		if int64(level) <= hl {
  4818  			break
  4819  		}
  4820  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4821  			break
  4822  		}
  4823  	}
  4824  	return level
  4825  }
  4826  
  4827  // Load returns the value stored in the map for a key, or nil if no
  4828  // value is present.
  4829  // The ok result indicates whether value was found in the map.
  4830  func (s *Int8MapDesc) Load(key int8) (value interface{}, ok bool) {
  4831  	x := s.header
  4832  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4833  		nex := x.atomicLoadNext(i)
  4834  		for nex != nil && nex.lessthan(key) {
  4835  			x = nex
  4836  			nex = x.atomicLoadNext(i)
  4837  		}
  4838  
  4839  		// Check if the key already in the skip list.
  4840  		if nex != nil && nex.equal(key) {
  4841  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  4842  				return nex.loadVal(), true
  4843  			}
  4844  			return nil, false
  4845  		}
  4846  	}
  4847  	return nil, false
  4848  }
  4849  
  4850  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  4851  // The loaded result reports whether the key was present.
  4852  // (Modified from Delete)
  4853  func (s *Int8MapDesc) LoadAndDelete(key int8) (value interface{}, loaded bool) {
  4854  	var (
  4855  		nodeToDelete *int8NodeDesc
  4856  		isMarked     bool // represents if this operation mark the node
  4857  		topLayer     = -1
  4858  		preds, succs [maxLevel]*int8NodeDesc
  4859  	)
  4860  	for {
  4861  		lFound := s.findNodeDelete(key, &preds, &succs)
  4862  		if isMarked || // this process mark this node or we can find this node in the skip list
  4863  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4864  			if !isMarked { // we don't mark this node for now
  4865  				nodeToDelete = succs[lFound]
  4866  				topLayer = lFound
  4867  				nodeToDelete.mu.Lock()
  4868  				if nodeToDelete.flags.Get(marked) {
  4869  					// The node is marked by another process,
  4870  					// the physical deletion will be accomplished by another process.
  4871  					nodeToDelete.mu.Unlock()
  4872  					return nil, false
  4873  				}
  4874  				nodeToDelete.flags.SetTrue(marked)
  4875  				isMarked = true
  4876  			}
  4877  			// Accomplish the physical deletion.
  4878  			var (
  4879  				highestLocked        = -1 // the highest level being locked by this process
  4880  				valid                = true
  4881  				pred, succ, prevPred *int8NodeDesc
  4882  			)
  4883  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4884  				pred, succ = preds[layer], succs[layer]
  4885  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4886  					pred.mu.Lock()
  4887  					highestLocked = layer
  4888  					prevPred = pred
  4889  				}
  4890  				// valid check if there is another node has inserted into the skip list in this layer
  4891  				// during this process, or the previous is deleted by another process.
  4892  				// It is valid if:
  4893  				// 1. the previous node exists.
  4894  				// 2. no another node has inserted into the skip list in this layer.
  4895  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4896  			}
  4897  			if !valid {
  4898  				unlockInt8Desc(preds, highestLocked)
  4899  				continue
  4900  			}
  4901  			for i := topLayer; i >= 0; i-- {
  4902  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  4903  				// So we don't need `nodeToDelete.loadNext`
  4904  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  4905  			}
  4906  			nodeToDelete.mu.Unlock()
  4907  			unlockInt8Desc(preds, highestLocked)
  4908  			atomic.AddInt64(&s.length, -1)
  4909  			return nodeToDelete.loadVal(), true
  4910  		}
  4911  		return nil, false
  4912  	}
  4913  }
  4914  
  4915  // LoadOrStore returns the existing value for the key if present.
  4916  // Otherwise, it stores and returns the given value.
  4917  // The loaded result is true if the value was loaded, false if stored.
  4918  // (Modified from Store)
  4919  func (s *Int8MapDesc) LoadOrStore(key int8, value interface{}) (actual interface{}, loaded bool) {
  4920  	var (
  4921  		level        int
  4922  		preds, succs [maxLevel]*int8NodeDesc
  4923  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  4924  	)
  4925  	for {
  4926  		nodeFound := s.findNode(key, &preds, &succs)
  4927  		if nodeFound != nil { // indicating the key is already in the skip-list
  4928  			if !nodeFound.flags.Get(marked) {
  4929  				// We don't need to care about whether or not the node is fully linked,
  4930  				// just return the value.
  4931  				return nodeFound.loadVal(), true
  4932  			}
  4933  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  4934  			// we need to add this node in next loop.
  4935  			continue
  4936  		}
  4937  
  4938  		// Add this node into skip list.
  4939  		var (
  4940  			highestLocked        = -1 // the highest level being locked by this process
  4941  			valid                = true
  4942  			pred, succ, prevPred *int8NodeDesc
  4943  		)
  4944  		if level == 0 {
  4945  			level = s.randomlevel()
  4946  			if level > hl {
  4947  				// If the highest level is updated, usually means that many goroutines
  4948  				// are inserting items. Hopefully we can find a better path in next loop.
  4949  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  4950  				// but this strategy's performance is almost the same as the existing method.
  4951  				continue
  4952  			}
  4953  		}
  4954  		for layer := 0; valid && layer < level; layer++ {
  4955  			pred = preds[layer]   // target node's previous node
  4956  			succ = succs[layer]   // target node's next node
  4957  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4958  				pred.mu.Lock()
  4959  				highestLocked = layer
  4960  				prevPred = pred
  4961  			}
  4962  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4963  			// It is valid if:
  4964  			// 1. The previous node and next node both are not marked.
  4965  			// 2. The previous node's next node is succ in this layer.
  4966  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4967  		}
  4968  		if !valid {
  4969  			unlockInt8Desc(preds, highestLocked)
  4970  			continue
  4971  		}
  4972  
  4973  		nn := newInt8NodeDesc(key, value, level)
  4974  		for layer := 0; layer < level; layer++ {
  4975  			nn.storeNext(layer, succs[layer])
  4976  			preds[layer].atomicStoreNext(layer, nn)
  4977  		}
  4978  		nn.flags.SetTrue(fullyLinked)
  4979  		unlockInt8Desc(preds, highestLocked)
  4980  		atomic.AddInt64(&s.length, 1)
  4981  		return value, false
  4982  	}
  4983  }
  4984  
  4985  // LoadOrStoreLazy returns the existing value for the key if present.
  4986  // Otherwise, it stores and returns the given value from f, f will only be called once.
  4987  // The loaded result is true if the value was loaded, false if stored.
  4988  // (Modified from LoadOrStore)
  4989  func (s *Int8MapDesc) LoadOrStoreLazy(key int8, f func() interface{}) (actual interface{}, loaded bool) {
  4990  	var (
  4991  		level        int
  4992  		preds, succs [maxLevel]*int8NodeDesc
  4993  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  4994  	)
  4995  	for {
  4996  		nodeFound := s.findNode(key, &preds, &succs)
  4997  		if nodeFound != nil { // indicating the key is already in the skip-list
  4998  			if !nodeFound.flags.Get(marked) {
  4999  				// We don't need to care about whether or not the node is fully linked,
  5000  				// just return the value.
  5001  				return nodeFound.loadVal(), true
  5002  			}
  5003  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  5004  			// we need to add this node in next loop.
  5005  			continue
  5006  		}
  5007  
  5008  		// Add this node into skip list.
  5009  		var (
  5010  			highestLocked        = -1 // the highest level being locked by this process
  5011  			valid                = true
  5012  			pred, succ, prevPred *int8NodeDesc
  5013  		)
  5014  		if level == 0 {
  5015  			level = s.randomlevel()
  5016  			if level > hl {
  5017  				// If the highest level is updated, usually means that many goroutines
  5018  				// are inserting items. Hopefully we can find a better path in next loop.
  5019  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  5020  				// but this strategy's performance is almost the same as the existing method.
  5021  				continue
  5022  			}
  5023  		}
  5024  		for layer := 0; valid && layer < level; layer++ {
  5025  			pred = preds[layer]   // target node's previous node
  5026  			succ = succs[layer]   // target node's next node
  5027  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5028  				pred.mu.Lock()
  5029  				highestLocked = layer
  5030  				prevPred = pred
  5031  			}
  5032  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5033  			// It is valid if:
  5034  			// 1. The previous node and next node both are not marked.
  5035  			// 2. The previous node's next node is succ in this layer.
  5036  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  5037  		}
  5038  		if !valid {
  5039  			unlockInt8Desc(preds, highestLocked)
  5040  			continue
  5041  		}
  5042  		value := f()
  5043  		nn := newInt8NodeDesc(key, value, level)
  5044  		for layer := 0; layer < level; layer++ {
  5045  			nn.storeNext(layer, succs[layer])
  5046  			preds[layer].atomicStoreNext(layer, nn)
  5047  		}
  5048  		nn.flags.SetTrue(fullyLinked)
  5049  		unlockInt8Desc(preds, highestLocked)
  5050  		atomic.AddInt64(&s.length, 1)
  5051  		return value, false
  5052  	}
  5053  }
  5054  
  5055  // Delete deletes the value for a key.
  5056  func (s *Int8MapDesc) Delete(key int8) bool {
  5057  	var (
  5058  		nodeToDelete *int8NodeDesc
  5059  		isMarked     bool // represents if this operation mark the node
  5060  		topLayer     = -1
  5061  		preds, succs [maxLevel]*int8NodeDesc
  5062  	)
  5063  	for {
  5064  		lFound := s.findNodeDelete(key, &preds, &succs)
  5065  		if isMarked || // this process mark this node or we can find this node in the skip list
  5066  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5067  			if !isMarked { // we don't mark this node for now
  5068  				nodeToDelete = succs[lFound]
  5069  				topLayer = lFound
  5070  				nodeToDelete.mu.Lock()
  5071  				if nodeToDelete.flags.Get(marked) {
  5072  					// The node is marked by another process,
  5073  					// the physical deletion will be accomplished by another process.
  5074  					nodeToDelete.mu.Unlock()
  5075  					return false
  5076  				}
  5077  				nodeToDelete.flags.SetTrue(marked)
  5078  				isMarked = true
  5079  			}
  5080  			// Accomplish the physical deletion.
  5081  			var (
  5082  				highestLocked        = -1 // the highest level being locked by this process
  5083  				valid                = true
  5084  				pred, succ, prevPred *int8NodeDesc
  5085  			)
  5086  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5087  				pred, succ = preds[layer], succs[layer]
  5088  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5089  					pred.mu.Lock()
  5090  					highestLocked = layer
  5091  					prevPred = pred
  5092  				}
  5093  				// valid check if there is another node has inserted into the skip list in this layer
  5094  				// during this process, or the previous is deleted by another process.
  5095  				// It is valid if:
  5096  				// 1. the previous node exists.
  5097  				// 2. no another node has inserted into the skip list in this layer.
  5098  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  5099  			}
  5100  			if !valid {
  5101  				unlockInt8Desc(preds, highestLocked)
  5102  				continue
  5103  			}
  5104  			for i := topLayer; i >= 0; i-- {
  5105  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  5106  				// So we don't need `nodeToDelete.loadNext`
  5107  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  5108  			}
  5109  			nodeToDelete.mu.Unlock()
  5110  			unlockInt8Desc(preds, highestLocked)
  5111  			atomic.AddInt64(&s.length, -1)
  5112  			return true
  5113  		}
  5114  		return false
  5115  	}
  5116  }
  5117  
  5118  // Range calls f sequentially for each key and value present in the skipmap.
  5119  // If f returns false, range stops the iteration.
  5120  //
  5121  // Range does not necessarily correspond to any consistent snapshot of the Map's
  5122  // contents: no key will be visited more than once, but if the value for any key
  5123  // is stored or deleted concurrently, Range may reflect any mapping for that key
  5124  // from any point during the Range call.
  5125  func (s *Int8MapDesc) Range(f func(key int8, value interface{}) bool) {
  5126  	x := s.header.atomicLoadNext(0)
  5127  	for x != nil {
  5128  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  5129  			x = x.atomicLoadNext(0)
  5130  			continue
  5131  		}
  5132  		if !f(x.key, x.loadVal()) {
  5133  			break
  5134  		}
  5135  		x = x.atomicLoadNext(0)
  5136  	}
  5137  }
  5138  
  5139  // Len return the length of this skipmap.
  5140  func (s *Int8MapDesc) Len() int {
  5141  	return int(atomic.LoadInt64(&s.length))
  5142  }
  5143  
  5144  // Int16Map represents a map based on skip list in ascending order.
  5145  type Int16Map struct {
  5146  	header       *int16Node
  5147  	length       int64
  5148  	highestLevel int64 // highest level for now
  5149  }
  5150  
  5151  type int16Node struct {
  5152  	key   int16
  5153  	value unsafe.Pointer // *interface{}
  5154  	next  optionalArray  // [level]*int16Node
  5155  	mu    sync.Mutex
  5156  	flags bitflag
  5157  	level uint32
  5158  }
  5159  
  5160  func newInt16Node(key int16, value interface{}, level int) *int16Node {
  5161  	node := &int16Node{
  5162  		key:   key,
  5163  		level: uint32(level),
  5164  	}
  5165  	node.storeVal(value)
  5166  	if level > op1 {
  5167  		node.next.extra = new([op2]unsafe.Pointer)
  5168  	}
  5169  	return node
  5170  }
  5171  
  5172  func (n *int16Node) storeVal(value interface{}) {
  5173  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  5174  }
  5175  
  5176  func (n *int16Node) loadVal() interface{} {
  5177  	return *(*interface{})(atomic.LoadPointer(&n.value))
  5178  }
  5179  
  5180  func (n *int16Node) loadNext(i int) *int16Node {
  5181  	return (*int16Node)(n.next.load(i))
  5182  }
  5183  
  5184  func (n *int16Node) storeNext(i int, node *int16Node) {
  5185  	n.next.store(i, unsafe.Pointer(node))
  5186  }
  5187  
  5188  func (n *int16Node) atomicLoadNext(i int) *int16Node {
  5189  	return (*int16Node)(n.next.atomicLoad(i))
  5190  }
  5191  
  5192  func (n *int16Node) atomicStoreNext(i int, node *int16Node) {
  5193  	n.next.atomicStore(i, unsafe.Pointer(node))
  5194  }
  5195  
  5196  func (n *int16Node) lessthan(key int16) bool {
  5197  	return n.key < key
  5198  }
  5199  
  5200  func (n *int16Node) equal(key int16) bool {
  5201  	return n.key == key
  5202  }
  5203  
  5204  // NewInt16 return an empty int16 skipmap.
  5205  func NewInt16() *Int16Map {
  5206  	h := newInt16Node(0, "", maxLevel)
  5207  	h.flags.SetTrue(fullyLinked)
  5208  	return &Int16Map{
  5209  		header:       h,
  5210  		highestLevel: defaultHighestLevel,
  5211  	}
  5212  }
  5213  
  5214  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  5215  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  5216  // (without fullpath, if find the node will return immediately)
  5217  func (s *Int16Map) findNode(key int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) *int16Node {
  5218  	x := s.header
  5219  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5220  		succ := x.atomicLoadNext(i)
  5221  		for succ != nil && succ.lessthan(key) {
  5222  			x = succ
  5223  			succ = x.atomicLoadNext(i)
  5224  		}
  5225  		preds[i] = x
  5226  		succs[i] = succ
  5227  
  5228  		// Check if the key already in the skipmap.
  5229  		if succ != nil && succ.equal(key) {
  5230  			return succ
  5231  		}
  5232  	}
  5233  	return nil
  5234  }
  5235  
  5236  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  5237  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  5238  func (s *Int16Map) findNodeDelete(key int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int {
  5239  	// lFound represents the index of the first layer at which it found a node.
  5240  	lFound, x := -1, s.header
  5241  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5242  		succ := x.atomicLoadNext(i)
  5243  		for succ != nil && succ.lessthan(key) {
  5244  			x = succ
  5245  			succ = x.atomicLoadNext(i)
  5246  		}
  5247  		preds[i] = x
  5248  		succs[i] = succ
  5249  
  5250  		// Check if the key already in the skip list.
  5251  		if lFound == -1 && succ != nil && succ.equal(key) {
  5252  			lFound = i
  5253  		}
  5254  	}
  5255  	return lFound
  5256  }
  5257  
  5258  func unlockInt16(preds [maxLevel]*int16Node, highestLevel int) {
  5259  	var prevPred *int16Node
  5260  	for i := highestLevel; i >= 0; i-- {
  5261  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5262  			preds[i].mu.Unlock()
  5263  			prevPred = preds[i]
  5264  		}
  5265  	}
  5266  }
  5267  
  5268  // Store sets the value for a key.
  5269  func (s *Int16Map) Store(key int16, value interface{}) {
  5270  	level := s.randomlevel()
  5271  	var preds, succs [maxLevel]*int16Node
  5272  	for {
  5273  		nodeFound := s.findNode(key, &preds, &succs)
  5274  		if nodeFound != nil { // indicating the key is already in the skip-list
  5275  			if !nodeFound.flags.Get(marked) {
  5276  				// We don't need to care about whether or not the node is fully linked,
  5277  				// just replace the value.
  5278  				nodeFound.storeVal(value)
  5279  				return
  5280  			}
  5281  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  5282  			// we need to add this node in next loop.
  5283  			continue
  5284  		}
  5285  
  5286  		// Add this node into skip list.
  5287  		var (
  5288  			highestLocked        = -1 // the highest level being locked by this process
  5289  			valid                = true
  5290  			pred, succ, prevPred *int16Node
  5291  		)
  5292  		for layer := 0; valid && layer < level; layer++ {
  5293  			pred = preds[layer]   // target node's previous node
  5294  			succ = succs[layer]   // target node's next node
  5295  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5296  				pred.mu.Lock()
  5297  				highestLocked = layer
  5298  				prevPred = pred
  5299  			}
  5300  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5301  			// It is valid if:
  5302  			// 1. The previous node and next node both are not marked.
  5303  			// 2. The previous node's next node is succ in this layer.
  5304  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5305  		}
  5306  		if !valid {
  5307  			unlockInt16(preds, highestLocked)
  5308  			continue
  5309  		}
  5310  
  5311  		nn := newInt16Node(key, value, level)
  5312  		for layer := 0; layer < level; layer++ {
  5313  			nn.storeNext(layer, succs[layer])
  5314  			preds[layer].atomicStoreNext(layer, nn)
  5315  		}
  5316  		nn.flags.SetTrue(fullyLinked)
  5317  		unlockInt16(preds, highestLocked)
  5318  		atomic.AddInt64(&s.length, 1)
  5319  		return
  5320  	}
  5321  }
  5322  
  5323  func (s *Int16Map) randomlevel() int {
  5324  	// Generate random level.
  5325  	level := randomLevel()
  5326  	// Update highest level if possible.
  5327  	for {
  5328  		hl := atomic.LoadInt64(&s.highestLevel)
  5329  		if int64(level) <= hl {
  5330  			break
  5331  		}
  5332  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5333  			break
  5334  		}
  5335  	}
  5336  	return level
  5337  }
  5338  
  5339  // Load returns the value stored in the map for a key, or nil if no
  5340  // value is present.
  5341  // The ok result indicates whether value was found in the map.
  5342  func (s *Int16Map) Load(key int16) (value interface{}, ok bool) {
  5343  	x := s.header
  5344  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5345  		nex := x.atomicLoadNext(i)
  5346  		for nex != nil && nex.lessthan(key) {
  5347  			x = nex
  5348  			nex = x.atomicLoadNext(i)
  5349  		}
  5350  
  5351  		// Check if the key already in the skip list.
  5352  		if nex != nil && nex.equal(key) {
  5353  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  5354  				return nex.loadVal(), true
  5355  			}
  5356  			return nil, false
  5357  		}
  5358  	}
  5359  	return nil, false
  5360  }
  5361  
  5362  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  5363  // The loaded result reports whether the key was present.
  5364  // (Modified from Delete)
  5365  func (s *Int16Map) LoadAndDelete(key int16) (value interface{}, loaded bool) {
  5366  	var (
  5367  		nodeToDelete *int16Node
  5368  		isMarked     bool // represents if this operation mark the node
  5369  		topLayer     = -1
  5370  		preds, succs [maxLevel]*int16Node
  5371  	)
  5372  	for {
  5373  		lFound := s.findNodeDelete(key, &preds, &succs)
  5374  		if isMarked || // this process mark this node or we can find this node in the skip list
  5375  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5376  			if !isMarked { // we don't mark this node for now
  5377  				nodeToDelete = succs[lFound]
  5378  				topLayer = lFound
  5379  				nodeToDelete.mu.Lock()
  5380  				if nodeToDelete.flags.Get(marked) {
  5381  					// The node is marked by another process,
  5382  					// the physical deletion will be accomplished by another process.
  5383  					nodeToDelete.mu.Unlock()
  5384  					return nil, false
  5385  				}
  5386  				nodeToDelete.flags.SetTrue(marked)
  5387  				isMarked = true
  5388  			}
  5389  			// Accomplish the physical deletion.
  5390  			var (
  5391  				highestLocked        = -1 // the highest level being locked by this process
  5392  				valid                = true
  5393  				pred, succ, prevPred *int16Node
  5394  			)
  5395  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5396  				pred, succ = preds[layer], succs[layer]
  5397  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5398  					pred.mu.Lock()
  5399  					highestLocked = layer
  5400  					prevPred = pred
  5401  				}
  5402  				// valid check if there is another node has inserted into the skip list in this layer
  5403  				// during this process, or the previous is deleted by another process.
  5404  				// It is valid if:
  5405  				// 1. the previous node exists.
  5406  				// 2. no another node has inserted into the skip list in this layer.
  5407  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  5408  			}
  5409  			if !valid {
  5410  				unlockInt16(preds, highestLocked)
  5411  				continue
  5412  			}
  5413  			for i := topLayer; i >= 0; i-- {
  5414  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  5415  				// So we don't need `nodeToDelete.loadNext`
  5416  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  5417  			}
  5418  			nodeToDelete.mu.Unlock()
  5419  			unlockInt16(preds, highestLocked)
  5420  			atomic.AddInt64(&s.length, -1)
  5421  			return nodeToDelete.loadVal(), true
  5422  		}
  5423  		return nil, false
  5424  	}
  5425  }
  5426  
  5427  // LoadOrStore returns the existing value for the key if present.
  5428  // Otherwise, it stores and returns the given value.
  5429  // The loaded result is true if the value was loaded, false if stored.
  5430  // (Modified from Store)
  5431  func (s *Int16Map) LoadOrStore(key int16, value interface{}) (actual interface{}, loaded bool) {
  5432  	var (
  5433  		level        int
  5434  		preds, succs [maxLevel]*int16Node
  5435  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  5436  	)
  5437  	for {
  5438  		nodeFound := s.findNode(key, &preds, &succs)
  5439  		if nodeFound != nil { // indicating the key is already in the skip-list
  5440  			if !nodeFound.flags.Get(marked) {
  5441  				// We don't need to care about whether or not the node is fully linked,
  5442  				// just return the value.
  5443  				return nodeFound.loadVal(), true
  5444  			}
  5445  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  5446  			// we need to add this node in next loop.
  5447  			continue
  5448  		}
  5449  
  5450  		// Add this node into skip list.
  5451  		var (
  5452  			highestLocked        = -1 // the highest level being locked by this process
  5453  			valid                = true
  5454  			pred, succ, prevPred *int16Node
  5455  		)
  5456  		if level == 0 {
  5457  			level = s.randomlevel()
  5458  			if level > hl {
  5459  				// If the highest level is updated, usually means that many goroutines
  5460  				// are inserting items. Hopefully we can find a better path in next loop.
  5461  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  5462  				// but this strategy's performance is almost the same as the existing method.
  5463  				continue
  5464  			}
  5465  		}
  5466  		for layer := 0; valid && layer < level; layer++ {
  5467  			pred = preds[layer]   // target node's previous node
  5468  			succ = succs[layer]   // target node's next node
  5469  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5470  				pred.mu.Lock()
  5471  				highestLocked = layer
  5472  				prevPred = pred
  5473  			}
  5474  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5475  			// It is valid if:
  5476  			// 1. The previous node and next node both are not marked.
  5477  			// 2. The previous node's next node is succ in this layer.
  5478  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5479  		}
  5480  		if !valid {
  5481  			unlockInt16(preds, highestLocked)
  5482  			continue
  5483  		}
  5484  
  5485  		nn := newInt16Node(key, value, level)
  5486  		for layer := 0; layer < level; layer++ {
  5487  			nn.storeNext(layer, succs[layer])
  5488  			preds[layer].atomicStoreNext(layer, nn)
  5489  		}
  5490  		nn.flags.SetTrue(fullyLinked)
  5491  		unlockInt16(preds, highestLocked)
  5492  		atomic.AddInt64(&s.length, 1)
  5493  		return value, false
  5494  	}
  5495  }
  5496  
  5497  // LoadOrStoreLazy returns the existing value for the key if present.
  5498  // Otherwise, it stores and returns the given value from f, f will only be called once.
  5499  // The loaded result is true if the value was loaded, false if stored.
  5500  // (Modified from LoadOrStore)
  5501  func (s *Int16Map) LoadOrStoreLazy(key int16, f func() interface{}) (actual interface{}, loaded bool) {
  5502  	var (
  5503  		level        int
  5504  		preds, succs [maxLevel]*int16Node
  5505  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  5506  	)
  5507  	for {
  5508  		nodeFound := s.findNode(key, &preds, &succs)
  5509  		if nodeFound != nil { // indicating the key is already in the skip-list
  5510  			if !nodeFound.flags.Get(marked) {
  5511  				// We don't need to care about whether or not the node is fully linked,
  5512  				// just return the value.
  5513  				return nodeFound.loadVal(), true
  5514  			}
  5515  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  5516  			// we need to add this node in next loop.
  5517  			continue
  5518  		}
  5519  
  5520  		// Add this node into skip list.
  5521  		var (
  5522  			highestLocked        = -1 // the highest level being locked by this process
  5523  			valid                = true
  5524  			pred, succ, prevPred *int16Node
  5525  		)
  5526  		if level == 0 {
  5527  			level = s.randomlevel()
  5528  			if level > hl {
  5529  				// If the highest level is updated, usually means that many goroutines
  5530  				// are inserting items. Hopefully we can find a better path in next loop.
  5531  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  5532  				// but this strategy's performance is almost the same as the existing method.
  5533  				continue
  5534  			}
  5535  		}
  5536  		for layer := 0; valid && layer < level; layer++ {
  5537  			pred = preds[layer]   // target node's previous node
  5538  			succ = succs[layer]   // target node's next node
  5539  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5540  				pred.mu.Lock()
  5541  				highestLocked = layer
  5542  				prevPred = pred
  5543  			}
  5544  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5545  			// It is valid if:
  5546  			// 1. The previous node and next node both are not marked.
  5547  			// 2. The previous node's next node is succ in this layer.
  5548  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  5549  		}
  5550  		if !valid {
  5551  			unlockInt16(preds, highestLocked)
  5552  			continue
  5553  		}
  5554  		value := f()
  5555  		nn := newInt16Node(key, value, level)
  5556  		for layer := 0; layer < level; layer++ {
  5557  			nn.storeNext(layer, succs[layer])
  5558  			preds[layer].atomicStoreNext(layer, nn)
  5559  		}
  5560  		nn.flags.SetTrue(fullyLinked)
  5561  		unlockInt16(preds, highestLocked)
  5562  		atomic.AddInt64(&s.length, 1)
  5563  		return value, false
  5564  	}
  5565  }
  5566  
  5567  // Delete deletes the value for a key.
  5568  func (s *Int16Map) Delete(key int16) bool {
  5569  	var (
  5570  		nodeToDelete *int16Node
  5571  		isMarked     bool // represents if this operation mark the node
  5572  		topLayer     = -1
  5573  		preds, succs [maxLevel]*int16Node
  5574  	)
  5575  	for {
  5576  		lFound := s.findNodeDelete(key, &preds, &succs)
  5577  		if isMarked || // this process mark this node or we can find this node in the skip list
  5578  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5579  			if !isMarked { // we don't mark this node for now
  5580  				nodeToDelete = succs[lFound]
  5581  				topLayer = lFound
  5582  				nodeToDelete.mu.Lock()
  5583  				if nodeToDelete.flags.Get(marked) {
  5584  					// The node is marked by another process,
  5585  					// the physical deletion will be accomplished by another process.
  5586  					nodeToDelete.mu.Unlock()
  5587  					return false
  5588  				}
  5589  				nodeToDelete.flags.SetTrue(marked)
  5590  				isMarked = true
  5591  			}
  5592  			// Accomplish the physical deletion.
  5593  			var (
  5594  				highestLocked        = -1 // the highest level being locked by this process
  5595  				valid                = true
  5596  				pred, succ, prevPred *int16Node
  5597  			)
  5598  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5599  				pred, succ = preds[layer], succs[layer]
  5600  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5601  					pred.mu.Lock()
  5602  					highestLocked = layer
  5603  					prevPred = pred
  5604  				}
  5605  				// valid check if there is another node has inserted into the skip list in this layer
  5606  				// during this process, or the previous is deleted by another process.
  5607  				// It is valid if:
  5608  				// 1. the previous node exists.
  5609  				// 2. no another node has inserted into the skip list in this layer.
  5610  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  5611  			}
  5612  			if !valid {
  5613  				unlockInt16(preds, highestLocked)
  5614  				continue
  5615  			}
  5616  			for i := topLayer; i >= 0; i-- {
  5617  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  5618  				// So we don't need `nodeToDelete.loadNext`
  5619  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  5620  			}
  5621  			nodeToDelete.mu.Unlock()
  5622  			unlockInt16(preds, highestLocked)
  5623  			atomic.AddInt64(&s.length, -1)
  5624  			return true
  5625  		}
  5626  		return false
  5627  	}
  5628  }
  5629  
  5630  // Range calls f sequentially for each key and value present in the skipmap.
  5631  // If f returns false, range stops the iteration.
  5632  //
  5633  // Range does not necessarily correspond to any consistent snapshot of the Map's
  5634  // contents: no key will be visited more than once, but if the value for any key
  5635  // is stored or deleted concurrently, Range may reflect any mapping for that key
  5636  // from any point during the Range call.
  5637  func (s *Int16Map) Range(f func(key int16, value interface{}) bool) {
  5638  	x := s.header.atomicLoadNext(0)
  5639  	for x != nil {
  5640  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  5641  			x = x.atomicLoadNext(0)
  5642  			continue
  5643  		}
  5644  		if !f(x.key, x.loadVal()) {
  5645  			break
  5646  		}
  5647  		x = x.atomicLoadNext(0)
  5648  	}
  5649  }
  5650  
  5651  // Len return the length of this skipmap.
  5652  func (s *Int16Map) Len() int {
  5653  	return int(atomic.LoadInt64(&s.length))
  5654  }
  5655  
  5656  // Int16MapDesc represents a map based on skip list in descending order.
  5657  type Int16MapDesc struct {
  5658  	header       *int16NodeDesc
  5659  	length       int64
  5660  	highestLevel int64 // highest level for now
  5661  }
  5662  
  5663  type int16NodeDesc struct {
  5664  	key   int16
  5665  	value unsafe.Pointer // *interface{}
  5666  	next  optionalArray  // [level]*int16NodeDesc
  5667  	mu    sync.Mutex
  5668  	flags bitflag
  5669  	level uint32
  5670  }
  5671  
  5672  func newInt16NodeDesc(key int16, value interface{}, level int) *int16NodeDesc {
  5673  	node := &int16NodeDesc{
  5674  		key:   key,
  5675  		level: uint32(level),
  5676  	}
  5677  	node.storeVal(value)
  5678  	if level > op1 {
  5679  		node.next.extra = new([op2]unsafe.Pointer)
  5680  	}
  5681  	return node
  5682  }
  5683  
  5684  func (n *int16NodeDesc) storeVal(value interface{}) {
  5685  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  5686  }
  5687  
  5688  func (n *int16NodeDesc) loadVal() interface{} {
  5689  	return *(*interface{})(atomic.LoadPointer(&n.value))
  5690  }
  5691  
  5692  func (n *int16NodeDesc) loadNext(i int) *int16NodeDesc {
  5693  	return (*int16NodeDesc)(n.next.load(i))
  5694  }
  5695  
  5696  func (n *int16NodeDesc) storeNext(i int, node *int16NodeDesc) {
  5697  	n.next.store(i, unsafe.Pointer(node))
  5698  }
  5699  
  5700  func (n *int16NodeDesc) atomicLoadNext(i int) *int16NodeDesc {
  5701  	return (*int16NodeDesc)(n.next.atomicLoad(i))
  5702  }
  5703  
  5704  func (n *int16NodeDesc) atomicStoreNext(i int, node *int16NodeDesc) {
  5705  	n.next.atomicStore(i, unsafe.Pointer(node))
  5706  }
  5707  
  5708  func (n *int16NodeDesc) lessthan(key int16) bool {
  5709  	return n.key > key
  5710  }
  5711  
  5712  func (n *int16NodeDesc) equal(key int16) bool {
  5713  	return n.key == key
  5714  }
  5715  
  5716  // NewInt16Desc return an empty int16 skipmap.
  5717  func NewInt16Desc() *Int16MapDesc {
  5718  	h := newInt16NodeDesc(0, "", maxLevel)
  5719  	h.flags.SetTrue(fullyLinked)
  5720  	return &Int16MapDesc{
  5721  		header:       h,
  5722  		highestLevel: defaultHighestLevel,
  5723  	}
  5724  }
  5725  
  5726  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  5727  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  5728  // (without fullpath, if find the node will return immediately)
  5729  func (s *Int16MapDesc) findNode(key int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) *int16NodeDesc {
  5730  	x := s.header
  5731  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5732  		succ := x.atomicLoadNext(i)
  5733  		for succ != nil && succ.lessthan(key) {
  5734  			x = succ
  5735  			succ = x.atomicLoadNext(i)
  5736  		}
  5737  		preds[i] = x
  5738  		succs[i] = succ
  5739  
  5740  		// Check if the key already in the skipmap.
  5741  		if succ != nil && succ.equal(key) {
  5742  			return succ
  5743  		}
  5744  	}
  5745  	return nil
  5746  }
  5747  
  5748  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  5749  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  5750  func (s *Int16MapDesc) findNodeDelete(key int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int {
  5751  	// lFound represents the index of the first layer at which it found a node.
  5752  	lFound, x := -1, s.header
  5753  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5754  		succ := x.atomicLoadNext(i)
  5755  		for succ != nil && succ.lessthan(key) {
  5756  			x = succ
  5757  			succ = x.atomicLoadNext(i)
  5758  		}
  5759  		preds[i] = x
  5760  		succs[i] = succ
  5761  
  5762  		// Check if the key already in the skip list.
  5763  		if lFound == -1 && succ != nil && succ.equal(key) {
  5764  			lFound = i
  5765  		}
  5766  	}
  5767  	return lFound
  5768  }
  5769  
  5770  func unlockInt16Desc(preds [maxLevel]*int16NodeDesc, highestLevel int) {
  5771  	var prevPred *int16NodeDesc
  5772  	for i := highestLevel; i >= 0; i-- {
  5773  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5774  			preds[i].mu.Unlock()
  5775  			prevPred = preds[i]
  5776  		}
  5777  	}
  5778  }
  5779  
  5780  // Store sets the value for a key.
  5781  func (s *Int16MapDesc) Store(key int16, value interface{}) {
  5782  	level := s.randomlevel()
  5783  	var preds, succs [maxLevel]*int16NodeDesc
  5784  	for {
  5785  		nodeFound := s.findNode(key, &preds, &succs)
  5786  		if nodeFound != nil { // indicating the key is already in the skip-list
  5787  			if !nodeFound.flags.Get(marked) {
  5788  				// We don't need to care about whether or not the node is fully linked,
  5789  				// just replace the value.
  5790  				nodeFound.storeVal(value)
  5791  				return
  5792  			}
  5793  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  5794  			// we need to add this node in next loop.
  5795  			continue
  5796  		}
  5797  
  5798  		// Add this node into skip list.
  5799  		var (
  5800  			highestLocked        = -1 // the highest level being locked by this process
  5801  			valid                = true
  5802  			pred, succ, prevPred *int16NodeDesc
  5803  		)
  5804  		for layer := 0; valid && layer < level; layer++ {
  5805  			pred = preds[layer]   // target node's previous node
  5806  			succ = succs[layer]   // target node's next node
  5807  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5808  				pred.mu.Lock()
  5809  				highestLocked = layer
  5810  				prevPred = pred
  5811  			}
  5812  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5813  			// It is valid if:
  5814  			// 1. The previous node and next node both are not marked.
  5815  			// 2. The previous node's next node is succ in this layer.
  5816  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5817  		}
  5818  		if !valid {
  5819  			unlockInt16Desc(preds, highestLocked)
  5820  			continue
  5821  		}
  5822  
  5823  		nn := newInt16NodeDesc(key, value, level)
  5824  		for layer := 0; layer < level; layer++ {
  5825  			nn.storeNext(layer, succs[layer])
  5826  			preds[layer].atomicStoreNext(layer, nn)
  5827  		}
  5828  		nn.flags.SetTrue(fullyLinked)
  5829  		unlockInt16Desc(preds, highestLocked)
  5830  		atomic.AddInt64(&s.length, 1)
  5831  		return
  5832  	}
  5833  }
  5834  
  5835  func (s *Int16MapDesc) randomlevel() int {
  5836  	// Generate random level.
  5837  	level := randomLevel()
  5838  	// Update highest level if possible.
  5839  	for {
  5840  		hl := atomic.LoadInt64(&s.highestLevel)
  5841  		if int64(level) <= hl {
  5842  			break
  5843  		}
  5844  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5845  			break
  5846  		}
  5847  	}
  5848  	return level
  5849  }
  5850  
  5851  // Load returns the value stored in the map for a key, or nil if no
  5852  // value is present.
  5853  // The ok result indicates whether value was found in the map.
  5854  func (s *Int16MapDesc) Load(key int16) (value interface{}, ok bool) {
  5855  	x := s.header
  5856  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5857  		nex := x.atomicLoadNext(i)
  5858  		for nex != nil && nex.lessthan(key) {
  5859  			x = nex
  5860  			nex = x.atomicLoadNext(i)
  5861  		}
  5862  
  5863  		// Check if the key already in the skip list.
  5864  		if nex != nil && nex.equal(key) {
  5865  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  5866  				return nex.loadVal(), true
  5867  			}
  5868  			return nil, false
  5869  		}
  5870  	}
  5871  	return nil, false
  5872  }
  5873  
  5874  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  5875  // The loaded result reports whether the key was present.
  5876  // (Modified from Delete)
  5877  func (s *Int16MapDesc) LoadAndDelete(key int16) (value interface{}, loaded bool) {
  5878  	var (
  5879  		nodeToDelete *int16NodeDesc
  5880  		isMarked     bool // represents if this operation mark the node
  5881  		topLayer     = -1
  5882  		preds, succs [maxLevel]*int16NodeDesc
  5883  	)
  5884  	for {
  5885  		lFound := s.findNodeDelete(key, &preds, &succs)
  5886  		if isMarked || // this process mark this node or we can find this node in the skip list
  5887  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5888  			if !isMarked { // we don't mark this node for now
  5889  				nodeToDelete = succs[lFound]
  5890  				topLayer = lFound
  5891  				nodeToDelete.mu.Lock()
  5892  				if nodeToDelete.flags.Get(marked) {
  5893  					// The node is marked by another process,
  5894  					// the physical deletion will be accomplished by another process.
  5895  					nodeToDelete.mu.Unlock()
  5896  					return nil, false
  5897  				}
  5898  				nodeToDelete.flags.SetTrue(marked)
  5899  				isMarked = true
  5900  			}
  5901  			// Accomplish the physical deletion.
  5902  			var (
  5903  				highestLocked        = -1 // the highest level being locked by this process
  5904  				valid                = true
  5905  				pred, succ, prevPred *int16NodeDesc
  5906  			)
  5907  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5908  				pred, succ = preds[layer], succs[layer]
  5909  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5910  					pred.mu.Lock()
  5911  					highestLocked = layer
  5912  					prevPred = pred
  5913  				}
  5914  				// valid check if there is another node has inserted into the skip list in this layer
  5915  				// during this process, or the previous is deleted by another process.
  5916  				// It is valid if:
  5917  				// 1. the previous node exists.
  5918  				// 2. no another node has inserted into the skip list in this layer.
  5919  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  5920  			}
  5921  			if !valid {
  5922  				unlockInt16Desc(preds, highestLocked)
  5923  				continue
  5924  			}
  5925  			for i := topLayer; i >= 0; i-- {
  5926  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  5927  				// So we don't need `nodeToDelete.loadNext`
  5928  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  5929  			}
  5930  			nodeToDelete.mu.Unlock()
  5931  			unlockInt16Desc(preds, highestLocked)
  5932  			atomic.AddInt64(&s.length, -1)
  5933  			return nodeToDelete.loadVal(), true
  5934  		}
  5935  		return nil, false
  5936  	}
  5937  }
  5938  
  5939  // LoadOrStore returns the existing value for the key if present.
  5940  // Otherwise, it stores and returns the given value.
  5941  // The loaded result is true if the value was loaded, false if stored.
  5942  // (Modified from Store)
  5943  func (s *Int16MapDesc) LoadOrStore(key int16, value interface{}) (actual interface{}, loaded bool) {
  5944  	var (
  5945  		level        int
  5946  		preds, succs [maxLevel]*int16NodeDesc
  5947  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  5948  	)
  5949  	for {
  5950  		nodeFound := s.findNode(key, &preds, &succs)
  5951  		if nodeFound != nil { // indicating the key is already in the skip-list
  5952  			if !nodeFound.flags.Get(marked) {
  5953  				// We don't need to care about whether or not the node is fully linked,
  5954  				// just return the value.
  5955  				return nodeFound.loadVal(), true
  5956  			}
  5957  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  5958  			// we need to add this node in next loop.
  5959  			continue
  5960  		}
  5961  
  5962  		// Add this node into skip list.
  5963  		var (
  5964  			highestLocked        = -1 // the highest level being locked by this process
  5965  			valid                = true
  5966  			pred, succ, prevPred *int16NodeDesc
  5967  		)
  5968  		if level == 0 {
  5969  			level = s.randomlevel()
  5970  			if level > hl {
  5971  				// If the highest level is updated, usually means that many goroutines
  5972  				// are inserting items. Hopefully we can find a better path in next loop.
  5973  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  5974  				// but this strategy's performance is almost the same as the existing method.
  5975  				continue
  5976  			}
  5977  		}
  5978  		for layer := 0; valid && layer < level; layer++ {
  5979  			pred = preds[layer]   // target node's previous node
  5980  			succ = succs[layer]   // target node's next node
  5981  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5982  				pred.mu.Lock()
  5983  				highestLocked = layer
  5984  				prevPred = pred
  5985  			}
  5986  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5987  			// It is valid if:
  5988  			// 1. The previous node and next node both are not marked.
  5989  			// 2. The previous node's next node is succ in this layer.
  5990  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5991  		}
  5992  		if !valid {
  5993  			unlockInt16Desc(preds, highestLocked)
  5994  			continue
  5995  		}
  5996  
  5997  		nn := newInt16NodeDesc(key, value, level)
  5998  		for layer := 0; layer < level; layer++ {
  5999  			nn.storeNext(layer, succs[layer])
  6000  			preds[layer].atomicStoreNext(layer, nn)
  6001  		}
  6002  		nn.flags.SetTrue(fullyLinked)
  6003  		unlockInt16Desc(preds, highestLocked)
  6004  		atomic.AddInt64(&s.length, 1)
  6005  		return value, false
  6006  	}
  6007  }
  6008  
  6009  // LoadOrStoreLazy returns the existing value for the key if present.
  6010  // Otherwise, it stores and returns the given value from f, f will only be called once.
  6011  // The loaded result is true if the value was loaded, false if stored.
  6012  // (Modified from LoadOrStore)
  6013  func (s *Int16MapDesc) LoadOrStoreLazy(key int16, f func() interface{}) (actual interface{}, loaded bool) {
  6014  	var (
  6015  		level        int
  6016  		preds, succs [maxLevel]*int16NodeDesc
  6017  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  6018  	)
  6019  	for {
  6020  		nodeFound := s.findNode(key, &preds, &succs)
  6021  		if nodeFound != nil { // indicating the key is already in the skip-list
  6022  			if !nodeFound.flags.Get(marked) {
  6023  				// We don't need to care about whether or not the node is fully linked,
  6024  				// just return the value.
  6025  				return nodeFound.loadVal(), true
  6026  			}
  6027  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  6028  			// we need to add this node in next loop.
  6029  			continue
  6030  		}
  6031  
  6032  		// Add this node into skip list.
  6033  		var (
  6034  			highestLocked        = -1 // the highest level being locked by this process
  6035  			valid                = true
  6036  			pred, succ, prevPred *int16NodeDesc
  6037  		)
  6038  		if level == 0 {
  6039  			level = s.randomlevel()
  6040  			if level > hl {
  6041  				// If the highest level is updated, usually means that many goroutines
  6042  				// are inserting items. Hopefully we can find a better path in next loop.
  6043  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  6044  				// but this strategy's performance is almost the same as the existing method.
  6045  				continue
  6046  			}
  6047  		}
  6048  		for layer := 0; valid && layer < level; layer++ {
  6049  			pred = preds[layer]   // target node's previous node
  6050  			succ = succs[layer]   // target node's next node
  6051  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6052  				pred.mu.Lock()
  6053  				highestLocked = layer
  6054  				prevPred = pred
  6055  			}
  6056  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6057  			// It is valid if:
  6058  			// 1. The previous node and next node both are not marked.
  6059  			// 2. The previous node's next node is succ in this layer.
  6060  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  6061  		}
  6062  		if !valid {
  6063  			unlockInt16Desc(preds, highestLocked)
  6064  			continue
  6065  		}
  6066  		value := f()
  6067  		nn := newInt16NodeDesc(key, value, level)
  6068  		for layer := 0; layer < level; layer++ {
  6069  			nn.storeNext(layer, succs[layer])
  6070  			preds[layer].atomicStoreNext(layer, nn)
  6071  		}
  6072  		nn.flags.SetTrue(fullyLinked)
  6073  		unlockInt16Desc(preds, highestLocked)
  6074  		atomic.AddInt64(&s.length, 1)
  6075  		return value, false
  6076  	}
  6077  }
  6078  
  6079  // Delete deletes the value for a key.
  6080  func (s *Int16MapDesc) Delete(key int16) bool {
  6081  	var (
  6082  		nodeToDelete *int16NodeDesc
  6083  		isMarked     bool // represents if this operation mark the node
  6084  		topLayer     = -1
  6085  		preds, succs [maxLevel]*int16NodeDesc
  6086  	)
  6087  	for {
  6088  		lFound := s.findNodeDelete(key, &preds, &succs)
  6089  		if isMarked || // this process mark this node or we can find this node in the skip list
  6090  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6091  			if !isMarked { // we don't mark this node for now
  6092  				nodeToDelete = succs[lFound]
  6093  				topLayer = lFound
  6094  				nodeToDelete.mu.Lock()
  6095  				if nodeToDelete.flags.Get(marked) {
  6096  					// The node is marked by another process,
  6097  					// the physical deletion will be accomplished by another process.
  6098  					nodeToDelete.mu.Unlock()
  6099  					return false
  6100  				}
  6101  				nodeToDelete.flags.SetTrue(marked)
  6102  				isMarked = true
  6103  			}
  6104  			// Accomplish the physical deletion.
  6105  			var (
  6106  				highestLocked        = -1 // the highest level being locked by this process
  6107  				valid                = true
  6108  				pred, succ, prevPred *int16NodeDesc
  6109  			)
  6110  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6111  				pred, succ = preds[layer], succs[layer]
  6112  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6113  					pred.mu.Lock()
  6114  					highestLocked = layer
  6115  					prevPred = pred
  6116  				}
  6117  				// valid check if there is another node has inserted into the skip list in this layer
  6118  				// during this process, or the previous is deleted by another process.
  6119  				// It is valid if:
  6120  				// 1. the previous node exists.
  6121  				// 2. no another node has inserted into the skip list in this layer.
  6122  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  6123  			}
  6124  			if !valid {
  6125  				unlockInt16Desc(preds, highestLocked)
  6126  				continue
  6127  			}
  6128  			for i := topLayer; i >= 0; i-- {
  6129  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  6130  				// So we don't need `nodeToDelete.loadNext`
  6131  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  6132  			}
  6133  			nodeToDelete.mu.Unlock()
  6134  			unlockInt16Desc(preds, highestLocked)
  6135  			atomic.AddInt64(&s.length, -1)
  6136  			return true
  6137  		}
  6138  		return false
  6139  	}
  6140  }
  6141  
  6142  // Range calls f sequentially for each key and value present in the skipmap.
  6143  // If f returns false, range stops the iteration.
  6144  //
  6145  // Range does not necessarily correspond to any consistent snapshot of the Map's
  6146  // contents: no key will be visited more than once, but if the value for any key
  6147  // is stored or deleted concurrently, Range may reflect any mapping for that key
  6148  // from any point during the Range call.
  6149  func (s *Int16MapDesc) Range(f func(key int16, value interface{}) bool) {
  6150  	x := s.header.atomicLoadNext(0)
  6151  	for x != nil {
  6152  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  6153  			x = x.atomicLoadNext(0)
  6154  			continue
  6155  		}
  6156  		if !f(x.key, x.loadVal()) {
  6157  			break
  6158  		}
  6159  		x = x.atomicLoadNext(0)
  6160  	}
  6161  }
  6162  
  6163  // Len return the length of this skipmap.
  6164  func (s *Int16MapDesc) Len() int {
  6165  	return int(atomic.LoadInt64(&s.length))
  6166  }
  6167  
  6168  // Int32Map represents a map based on skip list in ascending order.
  6169  type Int32Map struct {
  6170  	header       *int32Node
  6171  	length       int64
  6172  	highestLevel int64 // highest level for now
  6173  }
  6174  
  6175  type int32Node struct {
  6176  	key   int32
  6177  	value unsafe.Pointer // *interface{}
  6178  	next  optionalArray  // [level]*int32Node
  6179  	mu    sync.Mutex
  6180  	flags bitflag
  6181  	level uint32
  6182  }
  6183  
  6184  func newInt32Node(key int32, value interface{}, level int) *int32Node {
  6185  	node := &int32Node{
  6186  		key:   key,
  6187  		level: uint32(level),
  6188  	}
  6189  	node.storeVal(value)
  6190  	if level > op1 {
  6191  		node.next.extra = new([op2]unsafe.Pointer)
  6192  	}
  6193  	return node
  6194  }
  6195  
  6196  func (n *int32Node) storeVal(value interface{}) {
  6197  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  6198  }
  6199  
  6200  func (n *int32Node) loadVal() interface{} {
  6201  	return *(*interface{})(atomic.LoadPointer(&n.value))
  6202  }
  6203  
  6204  func (n *int32Node) loadNext(i int) *int32Node {
  6205  	return (*int32Node)(n.next.load(i))
  6206  }
  6207  
  6208  func (n *int32Node) storeNext(i int, node *int32Node) {
  6209  	n.next.store(i, unsafe.Pointer(node))
  6210  }
  6211  
  6212  func (n *int32Node) atomicLoadNext(i int) *int32Node {
  6213  	return (*int32Node)(n.next.atomicLoad(i))
  6214  }
  6215  
  6216  func (n *int32Node) atomicStoreNext(i int, node *int32Node) {
  6217  	n.next.atomicStore(i, unsafe.Pointer(node))
  6218  }
  6219  
  6220  func (n *int32Node) lessthan(key int32) bool {
  6221  	return n.key < key
  6222  }
  6223  
  6224  func (n *int32Node) equal(key int32) bool {
  6225  	return n.key == key
  6226  }
  6227  
  6228  // NewInt32 return an empty int32 skipmap.
  6229  func NewInt32() *Int32Map {
  6230  	h := newInt32Node(0, "", maxLevel)
  6231  	h.flags.SetTrue(fullyLinked)
  6232  	return &Int32Map{
  6233  		header:       h,
  6234  		highestLevel: defaultHighestLevel,
  6235  	}
  6236  }
  6237  
  6238  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  6239  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  6240  // (without fullpath, if find the node will return immediately)
  6241  func (s *Int32Map) findNode(key int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) *int32Node {
  6242  	x := s.header
  6243  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6244  		succ := x.atomicLoadNext(i)
  6245  		for succ != nil && succ.lessthan(key) {
  6246  			x = succ
  6247  			succ = x.atomicLoadNext(i)
  6248  		}
  6249  		preds[i] = x
  6250  		succs[i] = succ
  6251  
  6252  		// Check if the key already in the skipmap.
  6253  		if succ != nil && succ.equal(key) {
  6254  			return succ
  6255  		}
  6256  	}
  6257  	return nil
  6258  }
  6259  
  6260  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  6261  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  6262  func (s *Int32Map) findNodeDelete(key int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int {
  6263  	// lFound represents the index of the first layer at which it found a node.
  6264  	lFound, x := -1, s.header
  6265  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6266  		succ := x.atomicLoadNext(i)
  6267  		for succ != nil && succ.lessthan(key) {
  6268  			x = succ
  6269  			succ = x.atomicLoadNext(i)
  6270  		}
  6271  		preds[i] = x
  6272  		succs[i] = succ
  6273  
  6274  		// Check if the key already in the skip list.
  6275  		if lFound == -1 && succ != nil && succ.equal(key) {
  6276  			lFound = i
  6277  		}
  6278  	}
  6279  	return lFound
  6280  }
  6281  
  6282  func unlockInt32(preds [maxLevel]*int32Node, highestLevel int) {
  6283  	var prevPred *int32Node
  6284  	for i := highestLevel; i >= 0; i-- {
  6285  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  6286  			preds[i].mu.Unlock()
  6287  			prevPred = preds[i]
  6288  		}
  6289  	}
  6290  }
  6291  
  6292  // Store sets the value for a key.
  6293  func (s *Int32Map) Store(key int32, value interface{}) {
  6294  	level := s.randomlevel()
  6295  	var preds, succs [maxLevel]*int32Node
  6296  	for {
  6297  		nodeFound := s.findNode(key, &preds, &succs)
  6298  		if nodeFound != nil { // indicating the key is already in the skip-list
  6299  			if !nodeFound.flags.Get(marked) {
  6300  				// We don't need to care about whether or not the node is fully linked,
  6301  				// just replace the value.
  6302  				nodeFound.storeVal(value)
  6303  				return
  6304  			}
  6305  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  6306  			// we need to add this node in next loop.
  6307  			continue
  6308  		}
  6309  
  6310  		// Add this node into skip list.
  6311  		var (
  6312  			highestLocked        = -1 // the highest level being locked by this process
  6313  			valid                = true
  6314  			pred, succ, prevPred *int32Node
  6315  		)
  6316  		for layer := 0; valid && layer < level; layer++ {
  6317  			pred = preds[layer]   // target node's previous node
  6318  			succ = succs[layer]   // target node's next node
  6319  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6320  				pred.mu.Lock()
  6321  				highestLocked = layer
  6322  				prevPred = pred
  6323  			}
  6324  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6325  			// It is valid if:
  6326  			// 1. The previous node and next node both are not marked.
  6327  			// 2. The previous node's next node is succ in this layer.
  6328  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  6329  		}
  6330  		if !valid {
  6331  			unlockInt32(preds, highestLocked)
  6332  			continue
  6333  		}
  6334  
  6335  		nn := newInt32Node(key, value, level)
  6336  		for layer := 0; layer < level; layer++ {
  6337  			nn.storeNext(layer, succs[layer])
  6338  			preds[layer].atomicStoreNext(layer, nn)
  6339  		}
  6340  		nn.flags.SetTrue(fullyLinked)
  6341  		unlockInt32(preds, highestLocked)
  6342  		atomic.AddInt64(&s.length, 1)
  6343  		return
  6344  	}
  6345  }
  6346  
  6347  func (s *Int32Map) randomlevel() int {
  6348  	// Generate random level.
  6349  	level := randomLevel()
  6350  	// Update highest level if possible.
  6351  	for {
  6352  		hl := atomic.LoadInt64(&s.highestLevel)
  6353  		if int64(level) <= hl {
  6354  			break
  6355  		}
  6356  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  6357  			break
  6358  		}
  6359  	}
  6360  	return level
  6361  }
  6362  
  6363  // Load returns the value stored in the map for a key, or nil if no
  6364  // value is present.
  6365  // The ok result indicates whether value was found in the map.
  6366  func (s *Int32Map) Load(key int32) (value interface{}, ok bool) {
  6367  	x := s.header
  6368  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6369  		nex := x.atomicLoadNext(i)
  6370  		for nex != nil && nex.lessthan(key) {
  6371  			x = nex
  6372  			nex = x.atomicLoadNext(i)
  6373  		}
  6374  
  6375  		// Check if the key already in the skip list.
  6376  		if nex != nil && nex.equal(key) {
  6377  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  6378  				return nex.loadVal(), true
  6379  			}
  6380  			return nil, false
  6381  		}
  6382  	}
  6383  	return nil, false
  6384  }
  6385  
  6386  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  6387  // The loaded result reports whether the key was present.
  6388  // (Modified from Delete)
  6389  func (s *Int32Map) LoadAndDelete(key int32) (value interface{}, loaded bool) {
  6390  	var (
  6391  		nodeToDelete *int32Node
  6392  		isMarked     bool // represents if this operation mark the node
  6393  		topLayer     = -1
  6394  		preds, succs [maxLevel]*int32Node
  6395  	)
  6396  	for {
  6397  		lFound := s.findNodeDelete(key, &preds, &succs)
  6398  		if isMarked || // this process mark this node or we can find this node in the skip list
  6399  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6400  			if !isMarked { // we don't mark this node for now
  6401  				nodeToDelete = succs[lFound]
  6402  				topLayer = lFound
  6403  				nodeToDelete.mu.Lock()
  6404  				if nodeToDelete.flags.Get(marked) {
  6405  					// The node is marked by another process,
  6406  					// the physical deletion will be accomplished by another process.
  6407  					nodeToDelete.mu.Unlock()
  6408  					return nil, false
  6409  				}
  6410  				nodeToDelete.flags.SetTrue(marked)
  6411  				isMarked = true
  6412  			}
  6413  			// Accomplish the physical deletion.
  6414  			var (
  6415  				highestLocked        = -1 // the highest level being locked by this process
  6416  				valid                = true
  6417  				pred, succ, prevPred *int32Node
  6418  			)
  6419  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6420  				pred, succ = preds[layer], succs[layer]
  6421  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6422  					pred.mu.Lock()
  6423  					highestLocked = layer
  6424  					prevPred = pred
  6425  				}
  6426  				// valid check if there is another node has inserted into the skip list in this layer
  6427  				// during this process, or the previous is deleted by another process.
  6428  				// It is valid if:
  6429  				// 1. the previous node exists.
  6430  				// 2. no another node has inserted into the skip list in this layer.
  6431  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  6432  			}
  6433  			if !valid {
  6434  				unlockInt32(preds, highestLocked)
  6435  				continue
  6436  			}
  6437  			for i := topLayer; i >= 0; i-- {
  6438  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  6439  				// So we don't need `nodeToDelete.loadNext`
  6440  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  6441  			}
  6442  			nodeToDelete.mu.Unlock()
  6443  			unlockInt32(preds, highestLocked)
  6444  			atomic.AddInt64(&s.length, -1)
  6445  			return nodeToDelete.loadVal(), true
  6446  		}
  6447  		return nil, false
  6448  	}
  6449  }
  6450  
  6451  // LoadOrStore returns the existing value for the key if present.
  6452  // Otherwise, it stores and returns the given value.
  6453  // The loaded result is true if the value was loaded, false if stored.
  6454  // (Modified from Store)
  6455  func (s *Int32Map) LoadOrStore(key int32, value interface{}) (actual interface{}, loaded bool) {
  6456  	var (
  6457  		level        int
  6458  		preds, succs [maxLevel]*int32Node
  6459  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  6460  	)
  6461  	for {
  6462  		nodeFound := s.findNode(key, &preds, &succs)
  6463  		if nodeFound != nil { // indicating the key is already in the skip-list
  6464  			if !nodeFound.flags.Get(marked) {
  6465  				// We don't need to care about whether or not the node is fully linked,
  6466  				// just return the value.
  6467  				return nodeFound.loadVal(), true
  6468  			}
  6469  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  6470  			// we need to add this node in next loop.
  6471  			continue
  6472  		}
  6473  
  6474  		// Add this node into skip list.
  6475  		var (
  6476  			highestLocked        = -1 // the highest level being locked by this process
  6477  			valid                = true
  6478  			pred, succ, prevPred *int32Node
  6479  		)
  6480  		if level == 0 {
  6481  			level = s.randomlevel()
  6482  			if level > hl {
  6483  				// If the highest level is updated, usually means that many goroutines
  6484  				// are inserting items. Hopefully we can find a better path in next loop.
  6485  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  6486  				// but this strategy's performance is almost the same as the existing method.
  6487  				continue
  6488  			}
  6489  		}
  6490  		for layer := 0; valid && layer < level; layer++ {
  6491  			pred = preds[layer]   // target node's previous node
  6492  			succ = succs[layer]   // target node's next node
  6493  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6494  				pred.mu.Lock()
  6495  				highestLocked = layer
  6496  				prevPred = pred
  6497  			}
  6498  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6499  			// It is valid if:
  6500  			// 1. The previous node and next node both are not marked.
  6501  			// 2. The previous node's next node is succ in this layer.
  6502  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  6503  		}
  6504  		if !valid {
  6505  			unlockInt32(preds, highestLocked)
  6506  			continue
  6507  		}
  6508  
  6509  		nn := newInt32Node(key, value, level)
  6510  		for layer := 0; layer < level; layer++ {
  6511  			nn.storeNext(layer, succs[layer])
  6512  			preds[layer].atomicStoreNext(layer, nn)
  6513  		}
  6514  		nn.flags.SetTrue(fullyLinked)
  6515  		unlockInt32(preds, highestLocked)
  6516  		atomic.AddInt64(&s.length, 1)
  6517  		return value, false
  6518  	}
  6519  }
  6520  
  6521  // LoadOrStoreLazy returns the existing value for the key if present.
  6522  // Otherwise, it stores and returns the given value from f, f will only be called once.
  6523  // The loaded result is true if the value was loaded, false if stored.
  6524  // (Modified from LoadOrStore)
  6525  func (s *Int32Map) LoadOrStoreLazy(key int32, f func() interface{}) (actual interface{}, loaded bool) {
  6526  	var (
  6527  		level        int
  6528  		preds, succs [maxLevel]*int32Node
  6529  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  6530  	)
  6531  	for {
  6532  		nodeFound := s.findNode(key, &preds, &succs)
  6533  		if nodeFound != nil { // indicating the key is already in the skip-list
  6534  			if !nodeFound.flags.Get(marked) {
  6535  				// We don't need to care about whether or not the node is fully linked,
  6536  				// just return the value.
  6537  				return nodeFound.loadVal(), true
  6538  			}
  6539  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  6540  			// we need to add this node in next loop.
  6541  			continue
  6542  		}
  6543  
  6544  		// Add this node into skip list.
  6545  		var (
  6546  			highestLocked        = -1 // the highest level being locked by this process
  6547  			valid                = true
  6548  			pred, succ, prevPred *int32Node
  6549  		)
  6550  		if level == 0 {
  6551  			level = s.randomlevel()
  6552  			if level > hl {
  6553  				// If the highest level is updated, usually means that many goroutines
  6554  				// are inserting items. Hopefully we can find a better path in next loop.
  6555  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  6556  				// but this strategy's performance is almost the same as the existing method.
  6557  				continue
  6558  			}
  6559  		}
  6560  		for layer := 0; valid && layer < level; layer++ {
  6561  			pred = preds[layer]   // target node's previous node
  6562  			succ = succs[layer]   // target node's next node
  6563  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6564  				pred.mu.Lock()
  6565  				highestLocked = layer
  6566  				prevPred = pred
  6567  			}
  6568  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6569  			// It is valid if:
  6570  			// 1. The previous node and next node both are not marked.
  6571  			// 2. The previous node's next node is succ in this layer.
  6572  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  6573  		}
  6574  		if !valid {
  6575  			unlockInt32(preds, highestLocked)
  6576  			continue
  6577  		}
  6578  		value := f()
  6579  		nn := newInt32Node(key, value, level)
  6580  		for layer := 0; layer < level; layer++ {
  6581  			nn.storeNext(layer, succs[layer])
  6582  			preds[layer].atomicStoreNext(layer, nn)
  6583  		}
  6584  		nn.flags.SetTrue(fullyLinked)
  6585  		unlockInt32(preds, highestLocked)
  6586  		atomic.AddInt64(&s.length, 1)
  6587  		return value, false
  6588  	}
  6589  }
  6590  
  6591  // Delete deletes the value for a key.
  6592  func (s *Int32Map) Delete(key int32) bool {
  6593  	var (
  6594  		nodeToDelete *int32Node
  6595  		isMarked     bool // represents if this operation mark the node
  6596  		topLayer     = -1
  6597  		preds, succs [maxLevel]*int32Node
  6598  	)
  6599  	for {
  6600  		lFound := s.findNodeDelete(key, &preds, &succs)
  6601  		if isMarked || // this process mark this node or we can find this node in the skip list
  6602  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6603  			if !isMarked { // we don't mark this node for now
  6604  				nodeToDelete = succs[lFound]
  6605  				topLayer = lFound
  6606  				nodeToDelete.mu.Lock()
  6607  				if nodeToDelete.flags.Get(marked) {
  6608  					// The node is marked by another process,
  6609  					// the physical deletion will be accomplished by another process.
  6610  					nodeToDelete.mu.Unlock()
  6611  					return false
  6612  				}
  6613  				nodeToDelete.flags.SetTrue(marked)
  6614  				isMarked = true
  6615  			}
  6616  			// Accomplish the physical deletion.
  6617  			var (
  6618  				highestLocked        = -1 // the highest level being locked by this process
  6619  				valid                = true
  6620  				pred, succ, prevPred *int32Node
  6621  			)
  6622  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6623  				pred, succ = preds[layer], succs[layer]
  6624  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6625  					pred.mu.Lock()
  6626  					highestLocked = layer
  6627  					prevPred = pred
  6628  				}
  6629  				// valid check if there is another node has inserted into the skip list in this layer
  6630  				// during this process, or the previous is deleted by another process.
  6631  				// It is valid if:
  6632  				// 1. the previous node exists.
  6633  				// 2. no another node has inserted into the skip list in this layer.
  6634  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  6635  			}
  6636  			if !valid {
  6637  				unlockInt32(preds, highestLocked)
  6638  				continue
  6639  			}
  6640  			for i := topLayer; i >= 0; i-- {
  6641  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  6642  				// So we don't need `nodeToDelete.loadNext`
  6643  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  6644  			}
  6645  			nodeToDelete.mu.Unlock()
  6646  			unlockInt32(preds, highestLocked)
  6647  			atomic.AddInt64(&s.length, -1)
  6648  			return true
  6649  		}
  6650  		return false
  6651  	}
  6652  }
  6653  
  6654  // Range calls f sequentially for each key and value present in the skipmap.
  6655  // If f returns false, range stops the iteration.
  6656  //
  6657  // Range does not necessarily correspond to any consistent snapshot of the Map's
  6658  // contents: no key will be visited more than once, but if the value for any key
  6659  // is stored or deleted concurrently, Range may reflect any mapping for that key
  6660  // from any point during the Range call.
  6661  func (s *Int32Map) Range(f func(key int32, value interface{}) bool) {
  6662  	x := s.header.atomicLoadNext(0)
  6663  	for x != nil {
  6664  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  6665  			x = x.atomicLoadNext(0)
  6666  			continue
  6667  		}
  6668  		if !f(x.key, x.loadVal()) {
  6669  			break
  6670  		}
  6671  		x = x.atomicLoadNext(0)
  6672  	}
  6673  }
  6674  
  6675  // Len return the length of this skipmap.
  6676  func (s *Int32Map) Len() int {
  6677  	return int(atomic.LoadInt64(&s.length))
  6678  }
  6679  
  6680  // Int32MapDesc represents a map based on skip list in descending order.
  6681  type Int32MapDesc struct {
  6682  	header       *int32NodeDesc
  6683  	length       int64
  6684  	highestLevel int64 // highest level for now
  6685  }
  6686  
  6687  type int32NodeDesc struct {
  6688  	key   int32
  6689  	value unsafe.Pointer // *interface{}
  6690  	next  optionalArray  // [level]*int32NodeDesc
  6691  	mu    sync.Mutex
  6692  	flags bitflag
  6693  	level uint32
  6694  }
  6695  
  6696  func newInt32NodeDesc(key int32, value interface{}, level int) *int32NodeDesc {
  6697  	node := &int32NodeDesc{
  6698  		key:   key,
  6699  		level: uint32(level),
  6700  	}
  6701  	node.storeVal(value)
  6702  	if level > op1 {
  6703  		node.next.extra = new([op2]unsafe.Pointer)
  6704  	}
  6705  	return node
  6706  }
  6707  
  6708  func (n *int32NodeDesc) storeVal(value interface{}) {
  6709  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  6710  }
  6711  
  6712  func (n *int32NodeDesc) loadVal() interface{} {
  6713  	return *(*interface{})(atomic.LoadPointer(&n.value))
  6714  }
  6715  
  6716  func (n *int32NodeDesc) loadNext(i int) *int32NodeDesc {
  6717  	return (*int32NodeDesc)(n.next.load(i))
  6718  }
  6719  
  6720  func (n *int32NodeDesc) storeNext(i int, node *int32NodeDesc) {
  6721  	n.next.store(i, unsafe.Pointer(node))
  6722  }
  6723  
  6724  func (n *int32NodeDesc) atomicLoadNext(i int) *int32NodeDesc {
  6725  	return (*int32NodeDesc)(n.next.atomicLoad(i))
  6726  }
  6727  
  6728  func (n *int32NodeDesc) atomicStoreNext(i int, node *int32NodeDesc) {
  6729  	n.next.atomicStore(i, unsafe.Pointer(node))
  6730  }
  6731  
  6732  func (n *int32NodeDesc) lessthan(key int32) bool {
  6733  	return n.key > key
  6734  }
  6735  
  6736  func (n *int32NodeDesc) equal(key int32) bool {
  6737  	return n.key == key
  6738  }
  6739  
  6740  // NewInt32Desc return an empty int32 skipmap.
  6741  func NewInt32Desc() *Int32MapDesc {
  6742  	h := newInt32NodeDesc(0, "", maxLevel)
  6743  	h.flags.SetTrue(fullyLinked)
  6744  	return &Int32MapDesc{
  6745  		header:       h,
  6746  		highestLevel: defaultHighestLevel,
  6747  	}
  6748  }
  6749  
  6750  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  6751  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  6752  // (without fullpath, if find the node will return immediately)
  6753  func (s *Int32MapDesc) findNode(key int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) *int32NodeDesc {
  6754  	x := s.header
  6755  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6756  		succ := x.atomicLoadNext(i)
  6757  		for succ != nil && succ.lessthan(key) {
  6758  			x = succ
  6759  			succ = x.atomicLoadNext(i)
  6760  		}
  6761  		preds[i] = x
  6762  		succs[i] = succ
  6763  
  6764  		// Check if the key already in the skipmap.
  6765  		if succ != nil && succ.equal(key) {
  6766  			return succ
  6767  		}
  6768  	}
  6769  	return nil
  6770  }
  6771  
  6772  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  6773  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  6774  func (s *Int32MapDesc) findNodeDelete(key int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int {
  6775  	// lFound represents the index of the first layer at which it found a node.
  6776  	lFound, x := -1, s.header
  6777  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6778  		succ := x.atomicLoadNext(i)
  6779  		for succ != nil && succ.lessthan(key) {
  6780  			x = succ
  6781  			succ = x.atomicLoadNext(i)
  6782  		}
  6783  		preds[i] = x
  6784  		succs[i] = succ
  6785  
  6786  		// Check if the key already in the skip list.
  6787  		if lFound == -1 && succ != nil && succ.equal(key) {
  6788  			lFound = i
  6789  		}
  6790  	}
  6791  	return lFound
  6792  }
  6793  
  6794  func unlockInt32Desc(preds [maxLevel]*int32NodeDesc, highestLevel int) {
  6795  	var prevPred *int32NodeDesc
  6796  	for i := highestLevel; i >= 0; i-- {
  6797  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  6798  			preds[i].mu.Unlock()
  6799  			prevPred = preds[i]
  6800  		}
  6801  	}
  6802  }
  6803  
  6804  // Store sets the value for a key.
  6805  func (s *Int32MapDesc) Store(key int32, value interface{}) {
  6806  	level := s.randomlevel()
  6807  	var preds, succs [maxLevel]*int32NodeDesc
  6808  	for {
  6809  		nodeFound := s.findNode(key, &preds, &succs)
  6810  		if nodeFound != nil { // indicating the key is already in the skip-list
  6811  			if !nodeFound.flags.Get(marked) {
  6812  				// We don't need to care about whether or not the node is fully linked,
  6813  				// just replace the value.
  6814  				nodeFound.storeVal(value)
  6815  				return
  6816  			}
  6817  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  6818  			// we need to add this node in next loop.
  6819  			continue
  6820  		}
  6821  
  6822  		// Add this node into skip list.
  6823  		var (
  6824  			highestLocked        = -1 // the highest level being locked by this process
  6825  			valid                = true
  6826  			pred, succ, prevPred *int32NodeDesc
  6827  		)
  6828  		for layer := 0; valid && layer < level; layer++ {
  6829  			pred = preds[layer]   // target node's previous node
  6830  			succ = succs[layer]   // target node's next node
  6831  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6832  				pred.mu.Lock()
  6833  				highestLocked = layer
  6834  				prevPred = pred
  6835  			}
  6836  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6837  			// It is valid if:
  6838  			// 1. The previous node and next node both are not marked.
  6839  			// 2. The previous node's next node is succ in this layer.
  6840  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  6841  		}
  6842  		if !valid {
  6843  			unlockInt32Desc(preds, highestLocked)
  6844  			continue
  6845  		}
  6846  
  6847  		nn := newInt32NodeDesc(key, value, level)
  6848  		for layer := 0; layer < level; layer++ {
  6849  			nn.storeNext(layer, succs[layer])
  6850  			preds[layer].atomicStoreNext(layer, nn)
  6851  		}
  6852  		nn.flags.SetTrue(fullyLinked)
  6853  		unlockInt32Desc(preds, highestLocked)
  6854  		atomic.AddInt64(&s.length, 1)
  6855  		return
  6856  	}
  6857  }
  6858  
  6859  func (s *Int32MapDesc) randomlevel() int {
  6860  	// Generate random level.
  6861  	level := randomLevel()
  6862  	// Update highest level if possible.
  6863  	for {
  6864  		hl := atomic.LoadInt64(&s.highestLevel)
  6865  		if int64(level) <= hl {
  6866  			break
  6867  		}
  6868  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  6869  			break
  6870  		}
  6871  	}
  6872  	return level
  6873  }
  6874  
  6875  // Load returns the value stored in the map for a key, or nil if no
  6876  // value is present.
  6877  // The ok result indicates whether value was found in the map.
  6878  func (s *Int32MapDesc) Load(key int32) (value interface{}, ok bool) {
  6879  	x := s.header
  6880  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6881  		nex := x.atomicLoadNext(i)
  6882  		for nex != nil && nex.lessthan(key) {
  6883  			x = nex
  6884  			nex = x.atomicLoadNext(i)
  6885  		}
  6886  
  6887  		// Check if the key already in the skip list.
  6888  		if nex != nil && nex.equal(key) {
  6889  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  6890  				return nex.loadVal(), true
  6891  			}
  6892  			return nil, false
  6893  		}
  6894  	}
  6895  	return nil, false
  6896  }
  6897  
  6898  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  6899  // The loaded result reports whether the key was present.
  6900  // (Modified from Delete)
  6901  func (s *Int32MapDesc) LoadAndDelete(key int32) (value interface{}, loaded bool) {
  6902  	var (
  6903  		nodeToDelete *int32NodeDesc
  6904  		isMarked     bool // represents if this operation mark the node
  6905  		topLayer     = -1
  6906  		preds, succs [maxLevel]*int32NodeDesc
  6907  	)
  6908  	for {
  6909  		lFound := s.findNodeDelete(key, &preds, &succs)
  6910  		if isMarked || // this process mark this node or we can find this node in the skip list
  6911  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6912  			if !isMarked { // we don't mark this node for now
  6913  				nodeToDelete = succs[lFound]
  6914  				topLayer = lFound
  6915  				nodeToDelete.mu.Lock()
  6916  				if nodeToDelete.flags.Get(marked) {
  6917  					// The node is marked by another process,
  6918  					// the physical deletion will be accomplished by another process.
  6919  					nodeToDelete.mu.Unlock()
  6920  					return nil, false
  6921  				}
  6922  				nodeToDelete.flags.SetTrue(marked)
  6923  				isMarked = true
  6924  			}
  6925  			// Accomplish the physical deletion.
  6926  			var (
  6927  				highestLocked        = -1 // the highest level being locked by this process
  6928  				valid                = true
  6929  				pred, succ, prevPred *int32NodeDesc
  6930  			)
  6931  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6932  				pred, succ = preds[layer], succs[layer]
  6933  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6934  					pred.mu.Lock()
  6935  					highestLocked = layer
  6936  					prevPred = pred
  6937  				}
  6938  				// valid check if there is another node has inserted into the skip list in this layer
  6939  				// during this process, or the previous is deleted by another process.
  6940  				// It is valid if:
  6941  				// 1. the previous node exists.
  6942  				// 2. no another node has inserted into the skip list in this layer.
  6943  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  6944  			}
  6945  			if !valid {
  6946  				unlockInt32Desc(preds, highestLocked)
  6947  				continue
  6948  			}
  6949  			for i := topLayer; i >= 0; i-- {
  6950  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  6951  				// So we don't need `nodeToDelete.loadNext`
  6952  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  6953  			}
  6954  			nodeToDelete.mu.Unlock()
  6955  			unlockInt32Desc(preds, highestLocked)
  6956  			atomic.AddInt64(&s.length, -1)
  6957  			return nodeToDelete.loadVal(), true
  6958  		}
  6959  		return nil, false
  6960  	}
  6961  }
  6962  
  6963  // LoadOrStore returns the existing value for the key if present.
  6964  // Otherwise, it stores and returns the given value.
  6965  // The loaded result is true if the value was loaded, false if stored.
  6966  // (Modified from Store)
  6967  func (s *Int32MapDesc) LoadOrStore(key int32, value interface{}) (actual interface{}, loaded bool) {
  6968  	var (
  6969  		level        int
  6970  		preds, succs [maxLevel]*int32NodeDesc
  6971  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  6972  	)
  6973  	for {
  6974  		nodeFound := s.findNode(key, &preds, &succs)
  6975  		if nodeFound != nil { // indicating the key is already in the skip-list
  6976  			if !nodeFound.flags.Get(marked) {
  6977  				// We don't need to care about whether or not the node is fully linked,
  6978  				// just return the value.
  6979  				return nodeFound.loadVal(), true
  6980  			}
  6981  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  6982  			// we need to add this node in next loop.
  6983  			continue
  6984  		}
  6985  
  6986  		// Add this node into skip list.
  6987  		var (
  6988  			highestLocked        = -1 // the highest level being locked by this process
  6989  			valid                = true
  6990  			pred, succ, prevPred *int32NodeDesc
  6991  		)
  6992  		if level == 0 {
  6993  			level = s.randomlevel()
  6994  			if level > hl {
  6995  				// If the highest level is updated, usually means that many goroutines
  6996  				// are inserting items. Hopefully we can find a better path in next loop.
  6997  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  6998  				// but this strategy's performance is almost the same as the existing method.
  6999  				continue
  7000  			}
  7001  		}
  7002  		for layer := 0; valid && layer < level; layer++ {
  7003  			pred = preds[layer]   // target node's previous node
  7004  			succ = succs[layer]   // target node's next node
  7005  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7006  				pred.mu.Lock()
  7007  				highestLocked = layer
  7008  				prevPred = pred
  7009  			}
  7010  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7011  			// It is valid if:
  7012  			// 1. The previous node and next node both are not marked.
  7013  			// 2. The previous node's next node is succ in this layer.
  7014  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7015  		}
  7016  		if !valid {
  7017  			unlockInt32Desc(preds, highestLocked)
  7018  			continue
  7019  		}
  7020  
  7021  		nn := newInt32NodeDesc(key, value, level)
  7022  		for layer := 0; layer < level; layer++ {
  7023  			nn.storeNext(layer, succs[layer])
  7024  			preds[layer].atomicStoreNext(layer, nn)
  7025  		}
  7026  		nn.flags.SetTrue(fullyLinked)
  7027  		unlockInt32Desc(preds, highestLocked)
  7028  		atomic.AddInt64(&s.length, 1)
  7029  		return value, false
  7030  	}
  7031  }
  7032  
  7033  // LoadOrStoreLazy returns the existing value for the key if present.
  7034  // Otherwise, it stores and returns the given value from f, f will only be called once.
  7035  // The loaded result is true if the value was loaded, false if stored.
  7036  // (Modified from LoadOrStore)
  7037  func (s *Int32MapDesc) LoadOrStoreLazy(key int32, f func() interface{}) (actual interface{}, loaded bool) {
  7038  	var (
  7039  		level        int
  7040  		preds, succs [maxLevel]*int32NodeDesc
  7041  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  7042  	)
  7043  	for {
  7044  		nodeFound := s.findNode(key, &preds, &succs)
  7045  		if nodeFound != nil { // indicating the key is already in the skip-list
  7046  			if !nodeFound.flags.Get(marked) {
  7047  				// We don't need to care about whether or not the node is fully linked,
  7048  				// just return the value.
  7049  				return nodeFound.loadVal(), true
  7050  			}
  7051  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  7052  			// we need to add this node in next loop.
  7053  			continue
  7054  		}
  7055  
  7056  		// Add this node into skip list.
  7057  		var (
  7058  			highestLocked        = -1 // the highest level being locked by this process
  7059  			valid                = true
  7060  			pred, succ, prevPred *int32NodeDesc
  7061  		)
  7062  		if level == 0 {
  7063  			level = s.randomlevel()
  7064  			if level > hl {
  7065  				// If the highest level is updated, usually means that many goroutines
  7066  				// are inserting items. Hopefully we can find a better path in next loop.
  7067  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  7068  				// but this strategy's performance is almost the same as the existing method.
  7069  				continue
  7070  			}
  7071  		}
  7072  		for layer := 0; valid && layer < level; layer++ {
  7073  			pred = preds[layer]   // target node's previous node
  7074  			succ = succs[layer]   // target node's next node
  7075  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7076  				pred.mu.Lock()
  7077  				highestLocked = layer
  7078  				prevPred = pred
  7079  			}
  7080  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7081  			// It is valid if:
  7082  			// 1. The previous node and next node both are not marked.
  7083  			// 2. The previous node's next node is succ in this layer.
  7084  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  7085  		}
  7086  		if !valid {
  7087  			unlockInt32Desc(preds, highestLocked)
  7088  			continue
  7089  		}
  7090  		value := f()
  7091  		nn := newInt32NodeDesc(key, value, level)
  7092  		for layer := 0; layer < level; layer++ {
  7093  			nn.storeNext(layer, succs[layer])
  7094  			preds[layer].atomicStoreNext(layer, nn)
  7095  		}
  7096  		nn.flags.SetTrue(fullyLinked)
  7097  		unlockInt32Desc(preds, highestLocked)
  7098  		atomic.AddInt64(&s.length, 1)
  7099  		return value, false
  7100  	}
  7101  }
  7102  
  7103  // Delete deletes the value for a key.
  7104  func (s *Int32MapDesc) Delete(key int32) bool {
  7105  	var (
  7106  		nodeToDelete *int32NodeDesc
  7107  		isMarked     bool // represents if this operation mark the node
  7108  		topLayer     = -1
  7109  		preds, succs [maxLevel]*int32NodeDesc
  7110  	)
  7111  	for {
  7112  		lFound := s.findNodeDelete(key, &preds, &succs)
  7113  		if isMarked || // this process mark this node or we can find this node in the skip list
  7114  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  7115  			if !isMarked { // we don't mark this node for now
  7116  				nodeToDelete = succs[lFound]
  7117  				topLayer = lFound
  7118  				nodeToDelete.mu.Lock()
  7119  				if nodeToDelete.flags.Get(marked) {
  7120  					// The node is marked by another process,
  7121  					// the physical deletion will be accomplished by another process.
  7122  					nodeToDelete.mu.Unlock()
  7123  					return false
  7124  				}
  7125  				nodeToDelete.flags.SetTrue(marked)
  7126  				isMarked = true
  7127  			}
  7128  			// Accomplish the physical deletion.
  7129  			var (
  7130  				highestLocked        = -1 // the highest level being locked by this process
  7131  				valid                = true
  7132  				pred, succ, prevPred *int32NodeDesc
  7133  			)
  7134  			for layer := 0; valid && (layer <= topLayer); layer++ {
  7135  				pred, succ = preds[layer], succs[layer]
  7136  				if pred != prevPred { // the node in this layer could be locked by previous loop
  7137  					pred.mu.Lock()
  7138  					highestLocked = layer
  7139  					prevPred = pred
  7140  				}
  7141  				// valid check if there is another node has inserted into the skip list in this layer
  7142  				// during this process, or the previous is deleted by another process.
  7143  				// It is valid if:
  7144  				// 1. the previous node exists.
  7145  				// 2. no another node has inserted into the skip list in this layer.
  7146  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  7147  			}
  7148  			if !valid {
  7149  				unlockInt32Desc(preds, highestLocked)
  7150  				continue
  7151  			}
  7152  			for i := topLayer; i >= 0; i-- {
  7153  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  7154  				// So we don't need `nodeToDelete.loadNext`
  7155  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  7156  			}
  7157  			nodeToDelete.mu.Unlock()
  7158  			unlockInt32Desc(preds, highestLocked)
  7159  			atomic.AddInt64(&s.length, -1)
  7160  			return true
  7161  		}
  7162  		return false
  7163  	}
  7164  }
  7165  
  7166  // Range calls f sequentially for each key and value present in the skipmap.
  7167  // If f returns false, range stops the iteration.
  7168  //
  7169  // Range does not necessarily correspond to any consistent snapshot of the Map's
  7170  // contents: no key will be visited more than once, but if the value for any key
  7171  // is stored or deleted concurrently, Range may reflect any mapping for that key
  7172  // from any point during the Range call.
  7173  func (s *Int32MapDesc) Range(f func(key int32, value interface{}) bool) {
  7174  	x := s.header.atomicLoadNext(0)
  7175  	for x != nil {
  7176  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  7177  			x = x.atomicLoadNext(0)
  7178  			continue
  7179  		}
  7180  		if !f(x.key, x.loadVal()) {
  7181  			break
  7182  		}
  7183  		x = x.atomicLoadNext(0)
  7184  	}
  7185  }
  7186  
  7187  // Len return the length of this skipmap.
  7188  func (s *Int32MapDesc) Len() int {
  7189  	return int(atomic.LoadInt64(&s.length))
  7190  }
  7191  
  7192  // RuneMap represents a map based on skip list in ascending order.
  7193  type RuneMap struct {
  7194  	header       *runeNode
  7195  	length       int64
  7196  	highestLevel int64 // highest level for now
  7197  }
  7198  
  7199  type runeNode struct {
  7200  	key   rune
  7201  	value unsafe.Pointer // *interface{}
  7202  	next  optionalArray  // [level]*runeNode
  7203  	mu    sync.Mutex
  7204  	flags bitflag
  7205  	level uint32
  7206  }
  7207  
  7208  func newRuneNode(key rune, value interface{}, level int) *runeNode {
  7209  	node := &runeNode{
  7210  		key:   key,
  7211  		level: uint32(level),
  7212  	}
  7213  	node.storeVal(value)
  7214  	if level > op1 {
  7215  		node.next.extra = new([op2]unsafe.Pointer)
  7216  	}
  7217  	return node
  7218  }
  7219  
  7220  func (n *runeNode) storeVal(value interface{}) {
  7221  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  7222  }
  7223  
  7224  func (n *runeNode) loadVal() interface{} {
  7225  	return *(*interface{})(atomic.LoadPointer(&n.value))
  7226  }
  7227  
  7228  func (n *runeNode) loadNext(i int) *runeNode {
  7229  	return (*runeNode)(n.next.load(i))
  7230  }
  7231  
  7232  func (n *runeNode) storeNext(i int, node *runeNode) {
  7233  	n.next.store(i, unsafe.Pointer(node))
  7234  }
  7235  
  7236  func (n *runeNode) atomicLoadNext(i int) *runeNode {
  7237  	return (*runeNode)(n.next.atomicLoad(i))
  7238  }
  7239  
  7240  func (n *runeNode) atomicStoreNext(i int, node *runeNode) {
  7241  	n.next.atomicStore(i, unsafe.Pointer(node))
  7242  }
  7243  
  7244  func (n *runeNode) lessthan(key rune) bool {
  7245  	return n.key < key
  7246  }
  7247  
  7248  func (n *runeNode) equal(key rune) bool {
  7249  	return n.key == key
  7250  }
  7251  
  7252  // NewRune return an empty rune skipmap.
  7253  func NewRune() *RuneMap {
  7254  	h := newRuneNode(0, "", maxLevel)
  7255  	h.flags.SetTrue(fullyLinked)
  7256  	return &RuneMap{
  7257  		header:       h,
  7258  		highestLevel: defaultHighestLevel,
  7259  	}
  7260  }
  7261  
  7262  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  7263  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  7264  // (without fullpath, if find the node will return immediately)
  7265  func (s *RuneMap) findNode(key rune, preds *[maxLevel]*runeNode, succs *[maxLevel]*runeNode) *runeNode {
  7266  	x := s.header
  7267  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7268  		succ := x.atomicLoadNext(i)
  7269  		for succ != nil && succ.lessthan(key) {
  7270  			x = succ
  7271  			succ = x.atomicLoadNext(i)
  7272  		}
  7273  		preds[i] = x
  7274  		succs[i] = succ
  7275  
  7276  		// Check if the key already in the skipmap.
  7277  		if succ != nil && succ.equal(key) {
  7278  			return succ
  7279  		}
  7280  	}
  7281  	return nil
  7282  }
  7283  
  7284  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  7285  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  7286  func (s *RuneMap) findNodeDelete(key rune, preds *[maxLevel]*runeNode, succs *[maxLevel]*runeNode) int {
  7287  	// lFound represents the index of the first layer at which it found a node.
  7288  	lFound, x := -1, s.header
  7289  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7290  		succ := x.atomicLoadNext(i)
  7291  		for succ != nil && succ.lessthan(key) {
  7292  			x = succ
  7293  			succ = x.atomicLoadNext(i)
  7294  		}
  7295  		preds[i] = x
  7296  		succs[i] = succ
  7297  
  7298  		// Check if the key already in the skip list.
  7299  		if lFound == -1 && succ != nil && succ.equal(key) {
  7300  			lFound = i
  7301  		}
  7302  	}
  7303  	return lFound
  7304  }
  7305  
  7306  func unlockRune(preds [maxLevel]*runeNode, highestLevel int) {
  7307  	var prevPred *runeNode
  7308  	for i := highestLevel; i >= 0; i-- {
  7309  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  7310  			preds[i].mu.Unlock()
  7311  			prevPred = preds[i]
  7312  		}
  7313  	}
  7314  }
  7315  
  7316  // Store sets the value for a key.
  7317  func (s *RuneMap) Store(key rune, value interface{}) {
  7318  	level := s.randomlevel()
  7319  	var preds, succs [maxLevel]*runeNode
  7320  	for {
  7321  		nodeFound := s.findNode(key, &preds, &succs)
  7322  		if nodeFound != nil { // indicating the key is already in the skip-list
  7323  			if !nodeFound.flags.Get(marked) {
  7324  				// We don't need to care about whether or not the node is fully linked,
  7325  				// just replace the value.
  7326  				nodeFound.storeVal(value)
  7327  				return
  7328  			}
  7329  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  7330  			// we need to add this node in next loop.
  7331  			continue
  7332  		}
  7333  
  7334  		// Add this node into skip list.
  7335  		var (
  7336  			highestLocked        = -1 // the highest level being locked by this process
  7337  			valid                = true
  7338  			pred, succ, prevPred *runeNode
  7339  		)
  7340  		for layer := 0; valid && layer < level; layer++ {
  7341  			pred = preds[layer]   // target node's previous node
  7342  			succ = succs[layer]   // target node's next node
  7343  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7344  				pred.mu.Lock()
  7345  				highestLocked = layer
  7346  				prevPred = pred
  7347  			}
  7348  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7349  			// It is valid if:
  7350  			// 1. The previous node and next node both are not marked.
  7351  			// 2. The previous node's next node is succ in this layer.
  7352  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7353  		}
  7354  		if !valid {
  7355  			unlockRune(preds, highestLocked)
  7356  			continue
  7357  		}
  7358  
  7359  		nn := newRuneNode(key, value, level)
  7360  		for layer := 0; layer < level; layer++ {
  7361  			nn.storeNext(layer, succs[layer])
  7362  			preds[layer].atomicStoreNext(layer, nn)
  7363  		}
  7364  		nn.flags.SetTrue(fullyLinked)
  7365  		unlockRune(preds, highestLocked)
  7366  		atomic.AddInt64(&s.length, 1)
  7367  		return
  7368  	}
  7369  }
  7370  
  7371  func (s *RuneMap) randomlevel() int {
  7372  	// Generate random level.
  7373  	level := randomLevel()
  7374  	// Update highest level if possible.
  7375  	for {
  7376  		hl := atomic.LoadInt64(&s.highestLevel)
  7377  		if int64(level) <= hl {
  7378  			break
  7379  		}
  7380  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  7381  			break
  7382  		}
  7383  	}
  7384  	return level
  7385  }
  7386  
  7387  // Load returns the value stored in the map for a key, or nil if no
  7388  // value is present.
  7389  // The ok result indicates whether value was found in the map.
  7390  func (s *RuneMap) Load(key rune) (value interface{}, ok bool) {
  7391  	x := s.header
  7392  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7393  		nex := x.atomicLoadNext(i)
  7394  		for nex != nil && nex.lessthan(key) {
  7395  			x = nex
  7396  			nex = x.atomicLoadNext(i)
  7397  		}
  7398  
  7399  		// Check if the key already in the skip list.
  7400  		if nex != nil && nex.equal(key) {
  7401  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  7402  				return nex.loadVal(), true
  7403  			}
  7404  			return nil, false
  7405  		}
  7406  	}
  7407  	return nil, false
  7408  }
  7409  
  7410  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  7411  // The loaded result reports whether the key was present.
  7412  // (Modified from Delete)
  7413  func (s *RuneMap) LoadAndDelete(key rune) (value interface{}, loaded bool) {
  7414  	var (
  7415  		nodeToDelete *runeNode
  7416  		isMarked     bool // represents if this operation mark the node
  7417  		topLayer     = -1
  7418  		preds, succs [maxLevel]*runeNode
  7419  	)
  7420  	for {
  7421  		lFound := s.findNodeDelete(key, &preds, &succs)
  7422  		if isMarked || // this process mark this node or we can find this node in the skip list
  7423  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  7424  			if !isMarked { // we don't mark this node for now
  7425  				nodeToDelete = succs[lFound]
  7426  				topLayer = lFound
  7427  				nodeToDelete.mu.Lock()
  7428  				if nodeToDelete.flags.Get(marked) {
  7429  					// The node is marked by another process,
  7430  					// the physical deletion will be accomplished by another process.
  7431  					nodeToDelete.mu.Unlock()
  7432  					return nil, false
  7433  				}
  7434  				nodeToDelete.flags.SetTrue(marked)
  7435  				isMarked = true
  7436  			}
  7437  			// Accomplish the physical deletion.
  7438  			var (
  7439  				highestLocked        = -1 // the highest level being locked by this process
  7440  				valid                = true
  7441  				pred, succ, prevPred *runeNode
  7442  			)
  7443  			for layer := 0; valid && (layer <= topLayer); layer++ {
  7444  				pred, succ = preds[layer], succs[layer]
  7445  				if pred != prevPred { // the node in this layer could be locked by previous loop
  7446  					pred.mu.Lock()
  7447  					highestLocked = layer
  7448  					prevPred = pred
  7449  				}
  7450  				// valid check if there is another node has inserted into the skip list in this layer
  7451  				// during this process, or the previous is deleted by another process.
  7452  				// It is valid if:
  7453  				// 1. the previous node exists.
  7454  				// 2. no another node has inserted into the skip list in this layer.
  7455  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  7456  			}
  7457  			if !valid {
  7458  				unlockRune(preds, highestLocked)
  7459  				continue
  7460  			}
  7461  			for i := topLayer; i >= 0; i-- {
  7462  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  7463  				// So we don't need `nodeToDelete.loadNext`
  7464  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  7465  			}
  7466  			nodeToDelete.mu.Unlock()
  7467  			unlockRune(preds, highestLocked)
  7468  			atomic.AddInt64(&s.length, -1)
  7469  			return nodeToDelete.loadVal(), true
  7470  		}
  7471  		return nil, false
  7472  	}
  7473  }
  7474  
  7475  // LoadOrStore returns the existing value for the key if present.
  7476  // Otherwise, it stores and returns the given value.
  7477  // The loaded result is true if the value was loaded, false if stored.
  7478  // (Modified from Store)
  7479  func (s *RuneMap) LoadOrStore(key rune, value interface{}) (actual interface{}, loaded bool) {
  7480  	var (
  7481  		level        int
  7482  		preds, succs [maxLevel]*runeNode
  7483  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  7484  	)
  7485  	for {
  7486  		nodeFound := s.findNode(key, &preds, &succs)
  7487  		if nodeFound != nil { // indicating the key is already in the skip-list
  7488  			if !nodeFound.flags.Get(marked) {
  7489  				// We don't need to care about whether or not the node is fully linked,
  7490  				// just return the value.
  7491  				return nodeFound.loadVal(), true
  7492  			}
  7493  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  7494  			// we need to add this node in next loop.
  7495  			continue
  7496  		}
  7497  
  7498  		// Add this node into skip list.
  7499  		var (
  7500  			highestLocked        = -1 // the highest level being locked by this process
  7501  			valid                = true
  7502  			pred, succ, prevPred *runeNode
  7503  		)
  7504  		if level == 0 {
  7505  			level = s.randomlevel()
  7506  			if level > hl {
  7507  				// If the highest level is updated, usually means that many goroutines
  7508  				// are inserting items. Hopefully we can find a better path in next loop.
  7509  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  7510  				// but this strategy's performance is almost the same as the existing method.
  7511  				continue
  7512  			}
  7513  		}
  7514  		for layer := 0; valid && layer < level; layer++ {
  7515  			pred = preds[layer]   // target node's previous node
  7516  			succ = succs[layer]   // target node's next node
  7517  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7518  				pred.mu.Lock()
  7519  				highestLocked = layer
  7520  				prevPred = pred
  7521  			}
  7522  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7523  			// It is valid if:
  7524  			// 1. The previous node and next node both are not marked.
  7525  			// 2. The previous node's next node is succ in this layer.
  7526  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7527  		}
  7528  		if !valid {
  7529  			unlockRune(preds, highestLocked)
  7530  			continue
  7531  		}
  7532  
  7533  		nn := newRuneNode(key, value, level)
  7534  		for layer := 0; layer < level; layer++ {
  7535  			nn.storeNext(layer, succs[layer])
  7536  			preds[layer].atomicStoreNext(layer, nn)
  7537  		}
  7538  		nn.flags.SetTrue(fullyLinked)
  7539  		unlockRune(preds, highestLocked)
  7540  		atomic.AddInt64(&s.length, 1)
  7541  		return value, false
  7542  	}
  7543  }
  7544  
  7545  // LoadOrStoreLazy returns the existing value for the key if present.
  7546  // Otherwise, it stores and returns the given value from f, f will only be called once.
  7547  // The loaded result is true if the value was loaded, false if stored.
  7548  // (Modified from LoadOrStore)
  7549  func (s *RuneMap) LoadOrStoreLazy(key rune, f func() interface{}) (actual interface{}, loaded bool) {
  7550  	var (
  7551  		level        int
  7552  		preds, succs [maxLevel]*runeNode
  7553  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  7554  	)
  7555  	for {
  7556  		nodeFound := s.findNode(key, &preds, &succs)
  7557  		if nodeFound != nil { // indicating the key is already in the skip-list
  7558  			if !nodeFound.flags.Get(marked) {
  7559  				// We don't need to care about whether or not the node is fully linked,
  7560  				// just return the value.
  7561  				return nodeFound.loadVal(), true
  7562  			}
  7563  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  7564  			// we need to add this node in next loop.
  7565  			continue
  7566  		}
  7567  
  7568  		// Add this node into skip list.
  7569  		var (
  7570  			highestLocked        = -1 // the highest level being locked by this process
  7571  			valid                = true
  7572  			pred, succ, prevPred *runeNode
  7573  		)
  7574  		if level == 0 {
  7575  			level = s.randomlevel()
  7576  			if level > hl {
  7577  				// If the highest level is updated, usually means that many goroutines
  7578  				// are inserting items. Hopefully we can find a better path in next loop.
  7579  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  7580  				// but this strategy's performance is almost the same as the existing method.
  7581  				continue
  7582  			}
  7583  		}
  7584  		for layer := 0; valid && layer < level; layer++ {
  7585  			pred = preds[layer]   // target node's previous node
  7586  			succ = succs[layer]   // target node's next node
  7587  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7588  				pred.mu.Lock()
  7589  				highestLocked = layer
  7590  				prevPred = pred
  7591  			}
  7592  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7593  			// It is valid if:
  7594  			// 1. The previous node and next node both are not marked.
  7595  			// 2. The previous node's next node is succ in this layer.
  7596  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  7597  		}
  7598  		if !valid {
  7599  			unlockRune(preds, highestLocked)
  7600  			continue
  7601  		}
  7602  		value := f()
  7603  		nn := newRuneNode(key, value, level)
  7604  		for layer := 0; layer < level; layer++ {
  7605  			nn.storeNext(layer, succs[layer])
  7606  			preds[layer].atomicStoreNext(layer, nn)
  7607  		}
  7608  		nn.flags.SetTrue(fullyLinked)
  7609  		unlockRune(preds, highestLocked)
  7610  		atomic.AddInt64(&s.length, 1)
  7611  		return value, false
  7612  	}
  7613  }
  7614  
  7615  // Delete deletes the value for a key.
  7616  func (s *RuneMap) Delete(key rune) bool {
  7617  	var (
  7618  		nodeToDelete *runeNode
  7619  		isMarked     bool // represents if this operation mark the node
  7620  		topLayer     = -1
  7621  		preds, succs [maxLevel]*runeNode
  7622  	)
  7623  	for {
  7624  		lFound := s.findNodeDelete(key, &preds, &succs)
  7625  		if isMarked || // this process mark this node or we can find this node in the skip list
  7626  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  7627  			if !isMarked { // we don't mark this node for now
  7628  				nodeToDelete = succs[lFound]
  7629  				topLayer = lFound
  7630  				nodeToDelete.mu.Lock()
  7631  				if nodeToDelete.flags.Get(marked) {
  7632  					// The node is marked by another process,
  7633  					// the physical deletion will be accomplished by another process.
  7634  					nodeToDelete.mu.Unlock()
  7635  					return false
  7636  				}
  7637  				nodeToDelete.flags.SetTrue(marked)
  7638  				isMarked = true
  7639  			}
  7640  			// Accomplish the physical deletion.
  7641  			var (
  7642  				highestLocked        = -1 // the highest level being locked by this process
  7643  				valid                = true
  7644  				pred, succ, prevPred *runeNode
  7645  			)
  7646  			for layer := 0; valid && (layer <= topLayer); layer++ {
  7647  				pred, succ = preds[layer], succs[layer]
  7648  				if pred != prevPred { // the node in this layer could be locked by previous loop
  7649  					pred.mu.Lock()
  7650  					highestLocked = layer
  7651  					prevPred = pred
  7652  				}
  7653  				// valid check if there is another node has inserted into the skip list in this layer
  7654  				// during this process, or the previous is deleted by another process.
  7655  				// It is valid if:
  7656  				// 1. the previous node exists.
  7657  				// 2. no another node has inserted into the skip list in this layer.
  7658  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  7659  			}
  7660  			if !valid {
  7661  				unlockRune(preds, highestLocked)
  7662  				continue
  7663  			}
  7664  			for i := topLayer; i >= 0; i-- {
  7665  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  7666  				// So we don't need `nodeToDelete.loadNext`
  7667  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  7668  			}
  7669  			nodeToDelete.mu.Unlock()
  7670  			unlockRune(preds, highestLocked)
  7671  			atomic.AddInt64(&s.length, -1)
  7672  			return true
  7673  		}
  7674  		return false
  7675  	}
  7676  }
  7677  
  7678  // Range calls f sequentially for each key and value present in the skipmap.
  7679  // If f returns false, range stops the iteration.
  7680  //
  7681  // Range does not necessarily correspond to any consistent snapshot of the Map's
  7682  // contents: no key will be visited more than once, but if the value for any key
  7683  // is stored or deleted concurrently, Range may reflect any mapping for that key
  7684  // from any point during the Range call.
  7685  func (s *RuneMap) Range(f func(key rune, value interface{}) bool) {
  7686  	x := s.header.atomicLoadNext(0)
  7687  	for x != nil {
  7688  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  7689  			x = x.atomicLoadNext(0)
  7690  			continue
  7691  		}
  7692  		if !f(x.key, x.loadVal()) {
  7693  			break
  7694  		}
  7695  		x = x.atomicLoadNext(0)
  7696  	}
  7697  }
  7698  
  7699  // Len return the length of this skipmap.
  7700  func (s *RuneMap) Len() int {
  7701  	return int(atomic.LoadInt64(&s.length))
  7702  }
  7703  
  7704  // RuneMapDesc represents a map based on skip list in descending order.
  7705  type RuneMapDesc struct {
  7706  	header       *runeNodeDesc
  7707  	length       int64
  7708  	highestLevel int64 // highest level for now
  7709  }
  7710  
  7711  type runeNodeDesc struct {
  7712  	key   rune
  7713  	value unsafe.Pointer // *interface{}
  7714  	next  optionalArray  // [level]*runeNodeDesc
  7715  	mu    sync.Mutex
  7716  	flags bitflag
  7717  	level uint32
  7718  }
  7719  
  7720  func newRuneNodeDesc(key rune, value interface{}, level int) *runeNodeDesc {
  7721  	node := &runeNodeDesc{
  7722  		key:   key,
  7723  		level: uint32(level),
  7724  	}
  7725  	node.storeVal(value)
  7726  	if level > op1 {
  7727  		node.next.extra = new([op2]unsafe.Pointer)
  7728  	}
  7729  	return node
  7730  }
  7731  
  7732  func (n *runeNodeDesc) storeVal(value interface{}) {
  7733  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  7734  }
  7735  
  7736  func (n *runeNodeDesc) loadVal() interface{} {
  7737  	return *(*interface{})(atomic.LoadPointer(&n.value))
  7738  }
  7739  
  7740  func (n *runeNodeDesc) loadNext(i int) *runeNodeDesc {
  7741  	return (*runeNodeDesc)(n.next.load(i))
  7742  }
  7743  
  7744  func (n *runeNodeDesc) storeNext(i int, node *runeNodeDesc) {
  7745  	n.next.store(i, unsafe.Pointer(node))
  7746  }
  7747  
  7748  func (n *runeNodeDesc) atomicLoadNext(i int) *runeNodeDesc {
  7749  	return (*runeNodeDesc)(n.next.atomicLoad(i))
  7750  }
  7751  
  7752  func (n *runeNodeDesc) atomicStoreNext(i int, node *runeNodeDesc) {
  7753  	n.next.atomicStore(i, unsafe.Pointer(node))
  7754  }
  7755  
  7756  func (n *runeNodeDesc) lessthan(key rune) bool {
  7757  	return n.key > key
  7758  }
  7759  
  7760  func (n *runeNodeDesc) equal(key rune) bool {
  7761  	return n.key == key
  7762  }
  7763  
  7764  // NewRuneDesc return an empty rune skipmap.
  7765  func NewRuneDesc() *RuneMapDesc {
  7766  	h := newRuneNodeDesc(0, "", maxLevel)
  7767  	h.flags.SetTrue(fullyLinked)
  7768  	return &RuneMapDesc{
  7769  		header:       h,
  7770  		highestLevel: defaultHighestLevel,
  7771  	}
  7772  }
  7773  
  7774  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  7775  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  7776  // (without fullpath, if find the node will return immediately)
  7777  func (s *RuneMapDesc) findNode(key rune, preds *[maxLevel]*runeNodeDesc, succs *[maxLevel]*runeNodeDesc) *runeNodeDesc {
  7778  	x := s.header
  7779  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7780  		succ := x.atomicLoadNext(i)
  7781  		for succ != nil && succ.lessthan(key) {
  7782  			x = succ
  7783  			succ = x.atomicLoadNext(i)
  7784  		}
  7785  		preds[i] = x
  7786  		succs[i] = succ
  7787  
  7788  		// Check if the key already in the skipmap.
  7789  		if succ != nil && succ.equal(key) {
  7790  			return succ
  7791  		}
  7792  	}
  7793  	return nil
  7794  }
  7795  
  7796  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  7797  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  7798  func (s *RuneMapDesc) findNodeDelete(key rune, preds *[maxLevel]*runeNodeDesc, succs *[maxLevel]*runeNodeDesc) int {
  7799  	// lFound represents the index of the first layer at which it found a node.
  7800  	lFound, x := -1, s.header
  7801  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7802  		succ := x.atomicLoadNext(i)
  7803  		for succ != nil && succ.lessthan(key) {
  7804  			x = succ
  7805  			succ = x.atomicLoadNext(i)
  7806  		}
  7807  		preds[i] = x
  7808  		succs[i] = succ
  7809  
  7810  		// Check if the key already in the skip list.
  7811  		if lFound == -1 && succ != nil && succ.equal(key) {
  7812  			lFound = i
  7813  		}
  7814  	}
  7815  	return lFound
  7816  }
  7817  
  7818  func unlockRuneDesc(preds [maxLevel]*runeNodeDesc, highestLevel int) {
  7819  	var prevPred *runeNodeDesc
  7820  	for i := highestLevel; i >= 0; i-- {
  7821  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  7822  			preds[i].mu.Unlock()
  7823  			prevPred = preds[i]
  7824  		}
  7825  	}
  7826  }
  7827  
  7828  // Store sets the value for a key.
  7829  func (s *RuneMapDesc) Store(key rune, value interface{}) {
  7830  	level := s.randomlevel()
  7831  	var preds, succs [maxLevel]*runeNodeDesc
  7832  	for {
  7833  		nodeFound := s.findNode(key, &preds, &succs)
  7834  		if nodeFound != nil { // indicating the key is already in the skip-list
  7835  			if !nodeFound.flags.Get(marked) {
  7836  				// We don't need to care about whether or not the node is fully linked,
  7837  				// just replace the value.
  7838  				nodeFound.storeVal(value)
  7839  				return
  7840  			}
  7841  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  7842  			// we need to add this node in next loop.
  7843  			continue
  7844  		}
  7845  
  7846  		// Add this node into skip list.
  7847  		var (
  7848  			highestLocked        = -1 // the highest level being locked by this process
  7849  			valid                = true
  7850  			pred, succ, prevPred *runeNodeDesc
  7851  		)
  7852  		for layer := 0; valid && layer < level; layer++ {
  7853  			pred = preds[layer]   // target node's previous node
  7854  			succ = succs[layer]   // target node's next node
  7855  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7856  				pred.mu.Lock()
  7857  				highestLocked = layer
  7858  				prevPred = pred
  7859  			}
  7860  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7861  			// It is valid if:
  7862  			// 1. The previous node and next node both are not marked.
  7863  			// 2. The previous node's next node is succ in this layer.
  7864  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7865  		}
  7866  		if !valid {
  7867  			unlockRuneDesc(preds, highestLocked)
  7868  			continue
  7869  		}
  7870  
  7871  		nn := newRuneNodeDesc(key, value, level)
  7872  		for layer := 0; layer < level; layer++ {
  7873  			nn.storeNext(layer, succs[layer])
  7874  			preds[layer].atomicStoreNext(layer, nn)
  7875  		}
  7876  		nn.flags.SetTrue(fullyLinked)
  7877  		unlockRuneDesc(preds, highestLocked)
  7878  		atomic.AddInt64(&s.length, 1)
  7879  		return
  7880  	}
  7881  }
  7882  
  7883  func (s *RuneMapDesc) randomlevel() int {
  7884  	// Generate random level.
  7885  	level := randomLevel()
  7886  	// Update highest level if possible.
  7887  	for {
  7888  		hl := atomic.LoadInt64(&s.highestLevel)
  7889  		if int64(level) <= hl {
  7890  			break
  7891  		}
  7892  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  7893  			break
  7894  		}
  7895  	}
  7896  	return level
  7897  }
  7898  
  7899  // Load returns the value stored in the map for a key, or nil if no
  7900  // value is present.
  7901  // The ok result indicates whether value was found in the map.
  7902  func (s *RuneMapDesc) Load(key rune) (value interface{}, ok bool) {
  7903  	x := s.header
  7904  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7905  		nex := x.atomicLoadNext(i)
  7906  		for nex != nil && nex.lessthan(key) {
  7907  			x = nex
  7908  			nex = x.atomicLoadNext(i)
  7909  		}
  7910  
  7911  		// Check if the key already in the skip list.
  7912  		if nex != nil && nex.equal(key) {
  7913  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  7914  				return nex.loadVal(), true
  7915  			}
  7916  			return nil, false
  7917  		}
  7918  	}
  7919  	return nil, false
  7920  }
  7921  
  7922  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  7923  // The loaded result reports whether the key was present.
  7924  // (Modified from Delete)
  7925  func (s *RuneMapDesc) LoadAndDelete(key rune) (value interface{}, loaded bool) {
  7926  	var (
  7927  		nodeToDelete *runeNodeDesc
  7928  		isMarked     bool // represents if this operation mark the node
  7929  		topLayer     = -1
  7930  		preds, succs [maxLevel]*runeNodeDesc
  7931  	)
  7932  	for {
  7933  		lFound := s.findNodeDelete(key, &preds, &succs)
  7934  		if isMarked || // this process mark this node or we can find this node in the skip list
  7935  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  7936  			if !isMarked { // we don't mark this node for now
  7937  				nodeToDelete = succs[lFound]
  7938  				topLayer = lFound
  7939  				nodeToDelete.mu.Lock()
  7940  				if nodeToDelete.flags.Get(marked) {
  7941  					// The node is marked by another process,
  7942  					// the physical deletion will be accomplished by another process.
  7943  					nodeToDelete.mu.Unlock()
  7944  					return nil, false
  7945  				}
  7946  				nodeToDelete.flags.SetTrue(marked)
  7947  				isMarked = true
  7948  			}
  7949  			// Accomplish the physical deletion.
  7950  			var (
  7951  				highestLocked        = -1 // the highest level being locked by this process
  7952  				valid                = true
  7953  				pred, succ, prevPred *runeNodeDesc
  7954  			)
  7955  			for layer := 0; valid && (layer <= topLayer); layer++ {
  7956  				pred, succ = preds[layer], succs[layer]
  7957  				if pred != prevPred { // the node in this layer could be locked by previous loop
  7958  					pred.mu.Lock()
  7959  					highestLocked = layer
  7960  					prevPred = pred
  7961  				}
  7962  				// valid check if there is another node has inserted into the skip list in this layer
  7963  				// during this process, or the previous is deleted by another process.
  7964  				// It is valid if:
  7965  				// 1. the previous node exists.
  7966  				// 2. no another node has inserted into the skip list in this layer.
  7967  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  7968  			}
  7969  			if !valid {
  7970  				unlockRuneDesc(preds, highestLocked)
  7971  				continue
  7972  			}
  7973  			for i := topLayer; i >= 0; i-- {
  7974  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  7975  				// So we don't need `nodeToDelete.loadNext`
  7976  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  7977  			}
  7978  			nodeToDelete.mu.Unlock()
  7979  			unlockRuneDesc(preds, highestLocked)
  7980  			atomic.AddInt64(&s.length, -1)
  7981  			return nodeToDelete.loadVal(), true
  7982  		}
  7983  		return nil, false
  7984  	}
  7985  }
  7986  
  7987  // LoadOrStore returns the existing value for the key if present.
  7988  // Otherwise, it stores and returns the given value.
  7989  // The loaded result is true if the value was loaded, false if stored.
  7990  // (Modified from Store)
  7991  func (s *RuneMapDesc) LoadOrStore(key rune, value interface{}) (actual interface{}, loaded bool) {
  7992  	var (
  7993  		level        int
  7994  		preds, succs [maxLevel]*runeNodeDesc
  7995  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  7996  	)
  7997  	for {
  7998  		nodeFound := s.findNode(key, &preds, &succs)
  7999  		if nodeFound != nil { // indicating the key is already in the skip-list
  8000  			if !nodeFound.flags.Get(marked) {
  8001  				// We don't need to care about whether or not the node is fully linked,
  8002  				// just return the value.
  8003  				return nodeFound.loadVal(), true
  8004  			}
  8005  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  8006  			// we need to add this node in next loop.
  8007  			continue
  8008  		}
  8009  
  8010  		// Add this node into skip list.
  8011  		var (
  8012  			highestLocked        = -1 // the highest level being locked by this process
  8013  			valid                = true
  8014  			pred, succ, prevPred *runeNodeDesc
  8015  		)
  8016  		if level == 0 {
  8017  			level = s.randomlevel()
  8018  			if level > hl {
  8019  				// If the highest level is updated, usually means that many goroutines
  8020  				// are inserting items. Hopefully we can find a better path in next loop.
  8021  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  8022  				// but this strategy's performance is almost the same as the existing method.
  8023  				continue
  8024  			}
  8025  		}
  8026  		for layer := 0; valid && layer < level; layer++ {
  8027  			pred = preds[layer]   // target node's previous node
  8028  			succ = succs[layer]   // target node's next node
  8029  			if pred != prevPred { // the node in this layer could be locked by previous loop
  8030  				pred.mu.Lock()
  8031  				highestLocked = layer
  8032  				prevPred = pred
  8033  			}
  8034  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  8035  			// It is valid if:
  8036  			// 1. The previous node and next node both are not marked.
  8037  			// 2. The previous node's next node is succ in this layer.
  8038  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  8039  		}
  8040  		if !valid {
  8041  			unlockRuneDesc(preds, highestLocked)
  8042  			continue
  8043  		}
  8044  
  8045  		nn := newRuneNodeDesc(key, value, level)
  8046  		for layer := 0; layer < level; layer++ {
  8047  			nn.storeNext(layer, succs[layer])
  8048  			preds[layer].atomicStoreNext(layer, nn)
  8049  		}
  8050  		nn.flags.SetTrue(fullyLinked)
  8051  		unlockRuneDesc(preds, highestLocked)
  8052  		atomic.AddInt64(&s.length, 1)
  8053  		return value, false
  8054  	}
  8055  }
  8056  
  8057  // LoadOrStoreLazy returns the existing value for the key if present.
  8058  // Otherwise, it stores and returns the given value from f, f will only be called once.
  8059  // The loaded result is true if the value was loaded, false if stored.
  8060  // (Modified from LoadOrStore)
  8061  func (s *RuneMapDesc) LoadOrStoreLazy(key rune, f func() interface{}) (actual interface{}, loaded bool) {
  8062  	var (
  8063  		level        int
  8064  		preds, succs [maxLevel]*runeNodeDesc
  8065  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  8066  	)
  8067  	for {
  8068  		nodeFound := s.findNode(key, &preds, &succs)
  8069  		if nodeFound != nil { // indicating the key is already in the skip-list
  8070  			if !nodeFound.flags.Get(marked) {
  8071  				// We don't need to care about whether or not the node is fully linked,
  8072  				// just return the value.
  8073  				return nodeFound.loadVal(), true
  8074  			}
  8075  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  8076  			// we need to add this node in next loop.
  8077  			continue
  8078  		}
  8079  
  8080  		// Add this node into skip list.
  8081  		var (
  8082  			highestLocked        = -1 // the highest level being locked by this process
  8083  			valid                = true
  8084  			pred, succ, prevPred *runeNodeDesc
  8085  		)
  8086  		if level == 0 {
  8087  			level = s.randomlevel()
  8088  			if level > hl {
  8089  				// If the highest level is updated, usually means that many goroutines
  8090  				// are inserting items. Hopefully we can find a better path in next loop.
  8091  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  8092  				// but this strategy's performance is almost the same as the existing method.
  8093  				continue
  8094  			}
  8095  		}
  8096  		for layer := 0; valid && layer < level; layer++ {
  8097  			pred = preds[layer]   // target node's previous node
  8098  			succ = succs[layer]   // target node's next node
  8099  			if pred != prevPred { // the node in this layer could be locked by previous loop
  8100  				pred.mu.Lock()
  8101  				highestLocked = layer
  8102  				prevPred = pred
  8103  			}
  8104  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  8105  			// It is valid if:
  8106  			// 1. The previous node and next node both are not marked.
  8107  			// 2. The previous node's next node is succ in this layer.
  8108  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  8109  		}
  8110  		if !valid {
  8111  			unlockRuneDesc(preds, highestLocked)
  8112  			continue
  8113  		}
  8114  		value := f()
  8115  		nn := newRuneNodeDesc(key, value, level)
  8116  		for layer := 0; layer < level; layer++ {
  8117  			nn.storeNext(layer, succs[layer])
  8118  			preds[layer].atomicStoreNext(layer, nn)
  8119  		}
  8120  		nn.flags.SetTrue(fullyLinked)
  8121  		unlockRuneDesc(preds, highestLocked)
  8122  		atomic.AddInt64(&s.length, 1)
  8123  		return value, false
  8124  	}
  8125  }
  8126  
  8127  // Delete deletes the value for a key.
  8128  func (s *RuneMapDesc) Delete(key rune) bool {
  8129  	var (
  8130  		nodeToDelete *runeNodeDesc
  8131  		isMarked     bool // represents if this operation mark the node
  8132  		topLayer     = -1
  8133  		preds, succs [maxLevel]*runeNodeDesc
  8134  	)
  8135  	for {
  8136  		lFound := s.findNodeDelete(key, &preds, &succs)
  8137  		if isMarked || // this process mark this node or we can find this node in the skip list
  8138  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  8139  			if !isMarked { // we don't mark this node for now
  8140  				nodeToDelete = succs[lFound]
  8141  				topLayer = lFound
  8142  				nodeToDelete.mu.Lock()
  8143  				if nodeToDelete.flags.Get(marked) {
  8144  					// The node is marked by another process,
  8145  					// the physical deletion will be accomplished by another process.
  8146  					nodeToDelete.mu.Unlock()
  8147  					return false
  8148  				}
  8149  				nodeToDelete.flags.SetTrue(marked)
  8150  				isMarked = true
  8151  			}
  8152  			// Accomplish the physical deletion.
  8153  			var (
  8154  				highestLocked        = -1 // the highest level being locked by this process
  8155  				valid                = true
  8156  				pred, succ, prevPred *runeNodeDesc
  8157  			)
  8158  			for layer := 0; valid && (layer <= topLayer); layer++ {
  8159  				pred, succ = preds[layer], succs[layer]
  8160  				if pred != prevPred { // the node in this layer could be locked by previous loop
  8161  					pred.mu.Lock()
  8162  					highestLocked = layer
  8163  					prevPred = pred
  8164  				}
  8165  				// valid check if there is another node has inserted into the skip list in this layer
  8166  				// during this process, or the previous is deleted by another process.
  8167  				// It is valid if:
  8168  				// 1. the previous node exists.
  8169  				// 2. no another node has inserted into the skip list in this layer.
  8170  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  8171  			}
  8172  			if !valid {
  8173  				unlockRuneDesc(preds, highestLocked)
  8174  				continue
  8175  			}
  8176  			for i := topLayer; i >= 0; i-- {
  8177  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  8178  				// So we don't need `nodeToDelete.loadNext`
  8179  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  8180  			}
  8181  			nodeToDelete.mu.Unlock()
  8182  			unlockRuneDesc(preds, highestLocked)
  8183  			atomic.AddInt64(&s.length, -1)
  8184  			return true
  8185  		}
  8186  		return false
  8187  	}
  8188  }
  8189  
  8190  // Range calls f sequentially for each key and value present in the skipmap.
  8191  // If f returns false, range stops the iteration.
  8192  //
  8193  // Range does not necessarily correspond to any consistent snapshot of the Map's
  8194  // contents: no key will be visited more than once, but if the value for any key
  8195  // is stored or deleted concurrently, Range may reflect any mapping for that key
  8196  // from any point during the Range call.
  8197  func (s *RuneMapDesc) Range(f func(key rune, value interface{}) bool) {
  8198  	x := s.header.atomicLoadNext(0)
  8199  	for x != nil {
  8200  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  8201  			x = x.atomicLoadNext(0)
  8202  			continue
  8203  		}
  8204  		if !f(x.key, x.loadVal()) {
  8205  			break
  8206  		}
  8207  		x = x.atomicLoadNext(0)
  8208  	}
  8209  }
  8210  
  8211  // Len return the length of this skipmap.
  8212  func (s *RuneMapDesc) Len() int {
  8213  	return int(atomic.LoadInt64(&s.length))
  8214  }
  8215  
  8216  // UintMap represents a map based on skip list in ascending order.
  8217  type UintMap struct {
  8218  	header       *uintNode
  8219  	length       int64
  8220  	highestLevel int64 // highest level for now
  8221  }
  8222  
  8223  type uintNode struct {
  8224  	key   uint
  8225  	value unsafe.Pointer // *interface{}
  8226  	next  optionalArray  // [level]*uintNode
  8227  	mu    sync.Mutex
  8228  	flags bitflag
  8229  	level uint32
  8230  }
  8231  
  8232  func newUintNode(key uint, value interface{}, level int) *uintNode {
  8233  	node := &uintNode{
  8234  		key:   key,
  8235  		level: uint32(level),
  8236  	}
  8237  	node.storeVal(value)
  8238  	if level > op1 {
  8239  		node.next.extra = new([op2]unsafe.Pointer)
  8240  	}
  8241  	return node
  8242  }
  8243  
  8244  func (n *uintNode) storeVal(value interface{}) {
  8245  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  8246  }
  8247  
  8248  func (n *uintNode) loadVal() interface{} {
  8249  	return *(*interface{})(atomic.LoadPointer(&n.value))
  8250  }
  8251  
  8252  func (n *uintNode) loadNext(i int) *uintNode {
  8253  	return (*uintNode)(n.next.load(i))
  8254  }
  8255  
  8256  func (n *uintNode) storeNext(i int, node *uintNode) {
  8257  	n.next.store(i, unsafe.Pointer(node))
  8258  }
  8259  
  8260  func (n *uintNode) atomicLoadNext(i int) *uintNode {
  8261  	return (*uintNode)(n.next.atomicLoad(i))
  8262  }
  8263  
  8264  func (n *uintNode) atomicStoreNext(i int, node *uintNode) {
  8265  	n.next.atomicStore(i, unsafe.Pointer(node))
  8266  }
  8267  
  8268  func (n *uintNode) lessthan(key uint) bool {
  8269  	return n.key < key
  8270  }
  8271  
  8272  func (n *uintNode) equal(key uint) bool {
  8273  	return n.key == key
  8274  }
  8275  
  8276  // NewUint return an empty uint skipmap.
  8277  func NewUint() *UintMap {
  8278  	h := newUintNode(0, "", maxLevel)
  8279  	h.flags.SetTrue(fullyLinked)
  8280  	return &UintMap{
  8281  		header:       h,
  8282  		highestLevel: defaultHighestLevel,
  8283  	}
  8284  }
  8285  
  8286  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  8287  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  8288  // (without fullpath, if find the node will return immediately)
  8289  func (s *UintMap) findNode(key uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) *uintNode {
  8290  	x := s.header
  8291  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8292  		succ := x.atomicLoadNext(i)
  8293  		for succ != nil && succ.lessthan(key) {
  8294  			x = succ
  8295  			succ = x.atomicLoadNext(i)
  8296  		}
  8297  		preds[i] = x
  8298  		succs[i] = succ
  8299  
  8300  		// Check if the key already in the skipmap.
  8301  		if succ != nil && succ.equal(key) {
  8302  			return succ
  8303  		}
  8304  	}
  8305  	return nil
  8306  }
  8307  
  8308  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  8309  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  8310  func (s *UintMap) findNodeDelete(key uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int {
  8311  	// lFound represents the index of the first layer at which it found a node.
  8312  	lFound, x := -1, s.header
  8313  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8314  		succ := x.atomicLoadNext(i)
  8315  		for succ != nil && succ.lessthan(key) {
  8316  			x = succ
  8317  			succ = x.atomicLoadNext(i)
  8318  		}
  8319  		preds[i] = x
  8320  		succs[i] = succ
  8321  
  8322  		// Check if the key already in the skip list.
  8323  		if lFound == -1 && succ != nil && succ.equal(key) {
  8324  			lFound = i
  8325  		}
  8326  	}
  8327  	return lFound
  8328  }
  8329  
  8330  func unlockUint(preds [maxLevel]*uintNode, highestLevel int) {
  8331  	var prevPred *uintNode
  8332  	for i := highestLevel; i >= 0; i-- {
  8333  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  8334  			preds[i].mu.Unlock()
  8335  			prevPred = preds[i]
  8336  		}
  8337  	}
  8338  }
  8339  
  8340  // Store sets the value for a key.
  8341  func (s *UintMap) Store(key uint, value interface{}) {
  8342  	level := s.randomlevel()
  8343  	var preds, succs [maxLevel]*uintNode
  8344  	for {
  8345  		nodeFound := s.findNode(key, &preds, &succs)
  8346  		if nodeFound != nil { // indicating the key is already in the skip-list
  8347  			if !nodeFound.flags.Get(marked) {
  8348  				// We don't need to care about whether or not the node is fully linked,
  8349  				// just replace the value.
  8350  				nodeFound.storeVal(value)
  8351  				return
  8352  			}
  8353  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  8354  			// we need to add this node in next loop.
  8355  			continue
  8356  		}
  8357  
  8358  		// Add this node into skip list.
  8359  		var (
  8360  			highestLocked        = -1 // the highest level being locked by this process
  8361  			valid                = true
  8362  			pred, succ, prevPred *uintNode
  8363  		)
  8364  		for layer := 0; valid && layer < level; layer++ {
  8365  			pred = preds[layer]   // target node's previous node
  8366  			succ = succs[layer]   // target node's next node
  8367  			if pred != prevPred { // the node in this layer could be locked by previous loop
  8368  				pred.mu.Lock()
  8369  				highestLocked = layer
  8370  				prevPred = pred
  8371  			}
  8372  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  8373  			// It is valid if:
  8374  			// 1. The previous node and next node both are not marked.
  8375  			// 2. The previous node's next node is succ in this layer.
  8376  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  8377  		}
  8378  		if !valid {
  8379  			unlockUint(preds, highestLocked)
  8380  			continue
  8381  		}
  8382  
  8383  		nn := newUintNode(key, value, level)
  8384  		for layer := 0; layer < level; layer++ {
  8385  			nn.storeNext(layer, succs[layer])
  8386  			preds[layer].atomicStoreNext(layer, nn)
  8387  		}
  8388  		nn.flags.SetTrue(fullyLinked)
  8389  		unlockUint(preds, highestLocked)
  8390  		atomic.AddInt64(&s.length, 1)
  8391  		return
  8392  	}
  8393  }
  8394  
  8395  func (s *UintMap) randomlevel() int {
  8396  	// Generate random level.
  8397  	level := randomLevel()
  8398  	// Update highest level if possible.
  8399  	for {
  8400  		hl := atomic.LoadInt64(&s.highestLevel)
  8401  		if int64(level) <= hl {
  8402  			break
  8403  		}
  8404  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  8405  			break
  8406  		}
  8407  	}
  8408  	return level
  8409  }
  8410  
  8411  // Load returns the value stored in the map for a key, or nil if no
  8412  // value is present.
  8413  // The ok result indicates whether value was found in the map.
  8414  func (s *UintMap) Load(key uint) (value interface{}, ok bool) {
  8415  	x := s.header
  8416  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8417  		nex := x.atomicLoadNext(i)
  8418  		for nex != nil && nex.lessthan(key) {
  8419  			x = nex
  8420  			nex = x.atomicLoadNext(i)
  8421  		}
  8422  
  8423  		// Check if the key already in the skip list.
  8424  		if nex != nil && nex.equal(key) {
  8425  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  8426  				return nex.loadVal(), true
  8427  			}
  8428  			return nil, false
  8429  		}
  8430  	}
  8431  	return nil, false
  8432  }
  8433  
  8434  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  8435  // The loaded result reports whether the key was present.
  8436  // (Modified from Delete)
  8437  func (s *UintMap) LoadAndDelete(key uint) (value interface{}, loaded bool) {
  8438  	var (
  8439  		nodeToDelete *uintNode
  8440  		isMarked     bool // represents if this operation mark the node
  8441  		topLayer     = -1
  8442  		preds, succs [maxLevel]*uintNode
  8443  	)
  8444  	for {
  8445  		lFound := s.findNodeDelete(key, &preds, &succs)
  8446  		if isMarked || // this process mark this node or we can find this node in the skip list
  8447  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  8448  			if !isMarked { // we don't mark this node for now
  8449  				nodeToDelete = succs[lFound]
  8450  				topLayer = lFound
  8451  				nodeToDelete.mu.Lock()
  8452  				if nodeToDelete.flags.Get(marked) {
  8453  					// The node is marked by another process,
  8454  					// the physical deletion will be accomplished by another process.
  8455  					nodeToDelete.mu.Unlock()
  8456  					return nil, false
  8457  				}
  8458  				nodeToDelete.flags.SetTrue(marked)
  8459  				isMarked = true
  8460  			}
  8461  			// Accomplish the physical deletion.
  8462  			var (
  8463  				highestLocked        = -1 // the highest level being locked by this process
  8464  				valid                = true
  8465  				pred, succ, prevPred *uintNode
  8466  			)
  8467  			for layer := 0; valid && (layer <= topLayer); layer++ {
  8468  				pred, succ = preds[layer], succs[layer]
  8469  				if pred != prevPred { // the node in this layer could be locked by previous loop
  8470  					pred.mu.Lock()
  8471  					highestLocked = layer
  8472  					prevPred = pred
  8473  				}
  8474  				// valid check if there is another node has inserted into the skip list in this layer
  8475  				// during this process, or the previous is deleted by another process.
  8476  				// It is valid if:
  8477  				// 1. the previous node exists.
  8478  				// 2. no another node has inserted into the skip list in this layer.
  8479  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  8480  			}
  8481  			if !valid {
  8482  				unlockUint(preds, highestLocked)
  8483  				continue
  8484  			}
  8485  			for i := topLayer; i >= 0; i-- {
  8486  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  8487  				// So we don't need `nodeToDelete.loadNext`
  8488  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  8489  			}
  8490  			nodeToDelete.mu.Unlock()
  8491  			unlockUint(preds, highestLocked)
  8492  			atomic.AddInt64(&s.length, -1)
  8493  			return nodeToDelete.loadVal(), true
  8494  		}
  8495  		return nil, false
  8496  	}
  8497  }
  8498  
  8499  // LoadOrStore returns the existing value for the key if present.
  8500  // Otherwise, it stores and returns the given value.
  8501  // The loaded result is true if the value was loaded, false if stored.
  8502  // (Modified from Store)
  8503  func (s *UintMap) LoadOrStore(key uint, value interface{}) (actual interface{}, loaded bool) {
  8504  	var (
  8505  		level        int
  8506  		preds, succs [maxLevel]*uintNode
  8507  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  8508  	)
  8509  	for {
  8510  		nodeFound := s.findNode(key, &preds, &succs)
  8511  		if nodeFound != nil { // indicating the key is already in the skip-list
  8512  			if !nodeFound.flags.Get(marked) {
  8513  				// We don't need to care about whether or not the node is fully linked,
  8514  				// just return the value.
  8515  				return nodeFound.loadVal(), true
  8516  			}
  8517  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  8518  			// we need to add this node in next loop.
  8519  			continue
  8520  		}
  8521  
  8522  		// Add this node into skip list.
  8523  		var (
  8524  			highestLocked        = -1 // the highest level being locked by this process
  8525  			valid                = true
  8526  			pred, succ, prevPred *uintNode
  8527  		)
  8528  		if level == 0 {
  8529  			level = s.randomlevel()
  8530  			if level > hl {
  8531  				// If the highest level is updated, usually means that many goroutines
  8532  				// are inserting items. Hopefully we can find a better path in next loop.
  8533  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  8534  				// but this strategy's performance is almost the same as the existing method.
  8535  				continue
  8536  			}
  8537  		}
  8538  		for layer := 0; valid && layer < level; layer++ {
  8539  			pred = preds[layer]   // target node's previous node
  8540  			succ = succs[layer]   // target node's next node
  8541  			if pred != prevPred { // the node in this layer could be locked by previous loop
  8542  				pred.mu.Lock()
  8543  				highestLocked = layer
  8544  				prevPred = pred
  8545  			}
  8546  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  8547  			// It is valid if:
  8548  			// 1. The previous node and next node both are not marked.
  8549  			// 2. The previous node's next node is succ in this layer.
  8550  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  8551  		}
  8552  		if !valid {
  8553  			unlockUint(preds, highestLocked)
  8554  			continue
  8555  		}
  8556  
  8557  		nn := newUintNode(key, value, level)
  8558  		for layer := 0; layer < level; layer++ {
  8559  			nn.storeNext(layer, succs[layer])
  8560  			preds[layer].atomicStoreNext(layer, nn)
  8561  		}
  8562  		nn.flags.SetTrue(fullyLinked)
  8563  		unlockUint(preds, highestLocked)
  8564  		atomic.AddInt64(&s.length, 1)
  8565  		return value, false
  8566  	}
  8567  }
  8568  
  8569  // LoadOrStoreLazy returns the existing value for the key if present.
  8570  // Otherwise, it stores and returns the given value from f, f will only be called once.
  8571  // The loaded result is true if the value was loaded, false if stored.
  8572  // (Modified from LoadOrStore)
  8573  func (s *UintMap) LoadOrStoreLazy(key uint, f func() interface{}) (actual interface{}, loaded bool) {
  8574  	var (
  8575  		level        int
  8576  		preds, succs [maxLevel]*uintNode
  8577  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  8578  	)
  8579  	for {
  8580  		nodeFound := s.findNode(key, &preds, &succs)
  8581  		if nodeFound != nil { // indicating the key is already in the skip-list
  8582  			if !nodeFound.flags.Get(marked) {
  8583  				// We don't need to care about whether or not the node is fully linked,
  8584  				// just return the value.
  8585  				return nodeFound.loadVal(), true
  8586  			}
  8587  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  8588  			// we need to add this node in next loop.
  8589  			continue
  8590  		}
  8591  
  8592  		// Add this node into skip list.
  8593  		var (
  8594  			highestLocked        = -1 // the highest level being locked by this process
  8595  			valid                = true
  8596  			pred, succ, prevPred *uintNode
  8597  		)
  8598  		if level == 0 {
  8599  			level = s.randomlevel()
  8600  			if level > hl {
  8601  				// If the highest level is updated, usually means that many goroutines
  8602  				// are inserting items. Hopefully we can find a better path in next loop.
  8603  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  8604  				// but this strategy's performance is almost the same as the existing method.
  8605  				continue
  8606  			}
  8607  		}
  8608  		for layer := 0; valid && layer < level; layer++ {
  8609  			pred = preds[layer]   // target node's previous node
  8610  			succ = succs[layer]   // target node's next node
  8611  			if pred != prevPred { // the node in this layer could be locked by previous loop
  8612  				pred.mu.Lock()
  8613  				highestLocked = layer
  8614  				prevPred = pred
  8615  			}
  8616  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  8617  			// It is valid if:
  8618  			// 1. The previous node and next node both are not marked.
  8619  			// 2. The previous node's next node is succ in this layer.
  8620  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  8621  		}
  8622  		if !valid {
  8623  			unlockUint(preds, highestLocked)
  8624  			continue
  8625  		}
  8626  		value := f()
  8627  		nn := newUintNode(key, value, level)
  8628  		for layer := 0; layer < level; layer++ {
  8629  			nn.storeNext(layer, succs[layer])
  8630  			preds[layer].atomicStoreNext(layer, nn)
  8631  		}
  8632  		nn.flags.SetTrue(fullyLinked)
  8633  		unlockUint(preds, highestLocked)
  8634  		atomic.AddInt64(&s.length, 1)
  8635  		return value, false
  8636  	}
  8637  }
  8638  
  8639  // Delete deletes the value for a key.
  8640  func (s *UintMap) Delete(key uint) bool {
  8641  	var (
  8642  		nodeToDelete *uintNode
  8643  		isMarked     bool // represents if this operation mark the node
  8644  		topLayer     = -1
  8645  		preds, succs [maxLevel]*uintNode
  8646  	)
  8647  	for {
  8648  		lFound := s.findNodeDelete(key, &preds, &succs)
  8649  		if isMarked || // this process mark this node or we can find this node in the skip list
  8650  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  8651  			if !isMarked { // we don't mark this node for now
  8652  				nodeToDelete = succs[lFound]
  8653  				topLayer = lFound
  8654  				nodeToDelete.mu.Lock()
  8655  				if nodeToDelete.flags.Get(marked) {
  8656  					// The node is marked by another process,
  8657  					// the physical deletion will be accomplished by another process.
  8658  					nodeToDelete.mu.Unlock()
  8659  					return false
  8660  				}
  8661  				nodeToDelete.flags.SetTrue(marked)
  8662  				isMarked = true
  8663  			}
  8664  			// Accomplish the physical deletion.
  8665  			var (
  8666  				highestLocked        = -1 // the highest level being locked by this process
  8667  				valid                = true
  8668  				pred, succ, prevPred *uintNode
  8669  			)
  8670  			for layer := 0; valid && (layer <= topLayer); layer++ {
  8671  				pred, succ = preds[layer], succs[layer]
  8672  				if pred != prevPred { // the node in this layer could be locked by previous loop
  8673  					pred.mu.Lock()
  8674  					highestLocked = layer
  8675  					prevPred = pred
  8676  				}
  8677  				// valid check if there is another node has inserted into the skip list in this layer
  8678  				// during this process, or the previous is deleted by another process.
  8679  				// It is valid if:
  8680  				// 1. the previous node exists.
  8681  				// 2. no another node has inserted into the skip list in this layer.
  8682  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  8683  			}
  8684  			if !valid {
  8685  				unlockUint(preds, highestLocked)
  8686  				continue
  8687  			}
  8688  			for i := topLayer; i >= 0; i-- {
  8689  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  8690  				// So we don't need `nodeToDelete.loadNext`
  8691  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  8692  			}
  8693  			nodeToDelete.mu.Unlock()
  8694  			unlockUint(preds, highestLocked)
  8695  			atomic.AddInt64(&s.length, -1)
  8696  			return true
  8697  		}
  8698  		return false
  8699  	}
  8700  }
  8701  
  8702  // Range calls f sequentially for each key and value present in the skipmap.
  8703  // If f returns false, range stops the iteration.
  8704  //
  8705  // Range does not necessarily correspond to any consistent snapshot of the Map's
  8706  // contents: no key will be visited more than once, but if the value for any key
  8707  // is stored or deleted concurrently, Range may reflect any mapping for that key
  8708  // from any point during the Range call.
  8709  func (s *UintMap) Range(f func(key uint, value interface{}) bool) {
  8710  	x := s.header.atomicLoadNext(0)
  8711  	for x != nil {
  8712  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  8713  			x = x.atomicLoadNext(0)
  8714  			continue
  8715  		}
  8716  		if !f(x.key, x.loadVal()) {
  8717  			break
  8718  		}
  8719  		x = x.atomicLoadNext(0)
  8720  	}
  8721  }
  8722  
  8723  // Len return the length of this skipmap.
  8724  func (s *UintMap) Len() int {
  8725  	return int(atomic.LoadInt64(&s.length))
  8726  }
  8727  
  8728  // UintMapDesc represents a map based on skip list in descending order.
  8729  type UintMapDesc struct {
  8730  	header       *uintNodeDesc
  8731  	length       int64
  8732  	highestLevel int64 // highest level for now
  8733  }
  8734  
  8735  type uintNodeDesc struct {
  8736  	key   uint
  8737  	value unsafe.Pointer // *interface{}
  8738  	next  optionalArray  // [level]*uintNodeDesc
  8739  	mu    sync.Mutex
  8740  	flags bitflag
  8741  	level uint32
  8742  }
  8743  
  8744  func newUintNodeDesc(key uint, value interface{}, level int) *uintNodeDesc {
  8745  	node := &uintNodeDesc{
  8746  		key:   key,
  8747  		level: uint32(level),
  8748  	}
  8749  	node.storeVal(value)
  8750  	if level > op1 {
  8751  		node.next.extra = new([op2]unsafe.Pointer)
  8752  	}
  8753  	return node
  8754  }
  8755  
  8756  func (n *uintNodeDesc) storeVal(value interface{}) {
  8757  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  8758  }
  8759  
  8760  func (n *uintNodeDesc) loadVal() interface{} {
  8761  	return *(*interface{})(atomic.LoadPointer(&n.value))
  8762  }
  8763  
  8764  func (n *uintNodeDesc) loadNext(i int) *uintNodeDesc {
  8765  	return (*uintNodeDesc)(n.next.load(i))
  8766  }
  8767  
  8768  func (n *uintNodeDesc) storeNext(i int, node *uintNodeDesc) {
  8769  	n.next.store(i, unsafe.Pointer(node))
  8770  }
  8771  
  8772  func (n *uintNodeDesc) atomicLoadNext(i int) *uintNodeDesc {
  8773  	return (*uintNodeDesc)(n.next.atomicLoad(i))
  8774  }
  8775  
  8776  func (n *uintNodeDesc) atomicStoreNext(i int, node *uintNodeDesc) {
  8777  	n.next.atomicStore(i, unsafe.Pointer(node))
  8778  }
  8779  
  8780  func (n *uintNodeDesc) lessthan(key uint) bool {
  8781  	return n.key > key
  8782  }
  8783  
  8784  func (n *uintNodeDesc) equal(key uint) bool {
  8785  	return n.key == key
  8786  }
  8787  
  8788  // NewUintDesc return an empty uint skipmap.
  8789  func NewUintDesc() *UintMapDesc {
  8790  	h := newUintNodeDesc(0, "", maxLevel)
  8791  	h.flags.SetTrue(fullyLinked)
  8792  	return &UintMapDesc{
  8793  		header:       h,
  8794  		highestLevel: defaultHighestLevel,
  8795  	}
  8796  }
  8797  
  8798  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  8799  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  8800  // (without fullpath, if find the node will return immediately)
  8801  func (s *UintMapDesc) findNode(key uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) *uintNodeDesc {
  8802  	x := s.header
  8803  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8804  		succ := x.atomicLoadNext(i)
  8805  		for succ != nil && succ.lessthan(key) {
  8806  			x = succ
  8807  			succ = x.atomicLoadNext(i)
  8808  		}
  8809  		preds[i] = x
  8810  		succs[i] = succ
  8811  
  8812  		// Check if the key already in the skipmap.
  8813  		if succ != nil && succ.equal(key) {
  8814  			return succ
  8815  		}
  8816  	}
  8817  	return nil
  8818  }
  8819  
  8820  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  8821  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  8822  func (s *UintMapDesc) findNodeDelete(key uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int {
  8823  	// lFound represents the index of the first layer at which it found a node.
  8824  	lFound, x := -1, s.header
  8825  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8826  		succ := x.atomicLoadNext(i)
  8827  		for succ != nil && succ.lessthan(key) {
  8828  			x = succ
  8829  			succ = x.atomicLoadNext(i)
  8830  		}
  8831  		preds[i] = x
  8832  		succs[i] = succ
  8833  
  8834  		// Check if the key already in the skip list.
  8835  		if lFound == -1 && succ != nil && succ.equal(key) {
  8836  			lFound = i
  8837  		}
  8838  	}
  8839  	return lFound
  8840  }
  8841  
  8842  func unlockUintDesc(preds [maxLevel]*uintNodeDesc, highestLevel int) {
  8843  	var prevPred *uintNodeDesc
  8844  	for i := highestLevel; i >= 0; i-- {
  8845  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  8846  			preds[i].mu.Unlock()
  8847  			prevPred = preds[i]
  8848  		}
  8849  	}
  8850  }
  8851  
  8852  // Store sets the value for a key.
  8853  func (s *UintMapDesc) Store(key uint, value interface{}) {
  8854  	level := s.randomlevel()
  8855  	var preds, succs [maxLevel]*uintNodeDesc
  8856  	for {
  8857  		nodeFound := s.findNode(key, &preds, &succs)
  8858  		if nodeFound != nil { // indicating the key is already in the skip-list
  8859  			if !nodeFound.flags.Get(marked) {
  8860  				// We don't need to care about whether or not the node is fully linked,
  8861  				// just replace the value.
  8862  				nodeFound.storeVal(value)
  8863  				return
  8864  			}
  8865  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  8866  			// we need to add this node in next loop.
  8867  			continue
  8868  		}
  8869  
  8870  		// Add this node into skip list.
  8871  		var (
  8872  			highestLocked        = -1 // the highest level being locked by this process
  8873  			valid                = true
  8874  			pred, succ, prevPred *uintNodeDesc
  8875  		)
  8876  		for layer := 0; valid && layer < level; layer++ {
  8877  			pred = preds[layer]   // target node's previous node
  8878  			succ = succs[layer]   // target node's next node
  8879  			if pred != prevPred { // the node in this layer could be locked by previous loop
  8880  				pred.mu.Lock()
  8881  				highestLocked = layer
  8882  				prevPred = pred
  8883  			}
  8884  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  8885  			// It is valid if:
  8886  			// 1. The previous node and next node both are not marked.
  8887  			// 2. The previous node's next node is succ in this layer.
  8888  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  8889  		}
  8890  		if !valid {
  8891  			unlockUintDesc(preds, highestLocked)
  8892  			continue
  8893  		}
  8894  
  8895  		nn := newUintNodeDesc(key, value, level)
  8896  		for layer := 0; layer < level; layer++ {
  8897  			nn.storeNext(layer, succs[layer])
  8898  			preds[layer].atomicStoreNext(layer, nn)
  8899  		}
  8900  		nn.flags.SetTrue(fullyLinked)
  8901  		unlockUintDesc(preds, highestLocked)
  8902  		atomic.AddInt64(&s.length, 1)
  8903  		return
  8904  	}
  8905  }
  8906  
  8907  func (s *UintMapDesc) randomlevel() int {
  8908  	// Generate random level.
  8909  	level := randomLevel()
  8910  	// Update highest level if possible.
  8911  	for {
  8912  		hl := atomic.LoadInt64(&s.highestLevel)
  8913  		if int64(level) <= hl {
  8914  			break
  8915  		}
  8916  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  8917  			break
  8918  		}
  8919  	}
  8920  	return level
  8921  }
  8922  
  8923  // Load returns the value stored in the map for a key, or nil if no
  8924  // value is present.
  8925  // The ok result indicates whether value was found in the map.
  8926  func (s *UintMapDesc) Load(key uint) (value interface{}, ok bool) {
  8927  	x := s.header
  8928  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8929  		nex := x.atomicLoadNext(i)
  8930  		for nex != nil && nex.lessthan(key) {
  8931  			x = nex
  8932  			nex = x.atomicLoadNext(i)
  8933  		}
  8934  
  8935  		// Check if the key already in the skip list.
  8936  		if nex != nil && nex.equal(key) {
  8937  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  8938  				return nex.loadVal(), true
  8939  			}
  8940  			return nil, false
  8941  		}
  8942  	}
  8943  	return nil, false
  8944  }
  8945  
  8946  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  8947  // The loaded result reports whether the key was present.
  8948  // (Modified from Delete)
  8949  func (s *UintMapDesc) LoadAndDelete(key uint) (value interface{}, loaded bool) {
  8950  	var (
  8951  		nodeToDelete *uintNodeDesc
  8952  		isMarked     bool // represents if this operation mark the node
  8953  		topLayer     = -1
  8954  		preds, succs [maxLevel]*uintNodeDesc
  8955  	)
  8956  	for {
  8957  		lFound := s.findNodeDelete(key, &preds, &succs)
  8958  		if isMarked || // this process mark this node or we can find this node in the skip list
  8959  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  8960  			if !isMarked { // we don't mark this node for now
  8961  				nodeToDelete = succs[lFound]
  8962  				topLayer = lFound
  8963  				nodeToDelete.mu.Lock()
  8964  				if nodeToDelete.flags.Get(marked) {
  8965  					// The node is marked by another process,
  8966  					// the physical deletion will be accomplished by another process.
  8967  					nodeToDelete.mu.Unlock()
  8968  					return nil, false
  8969  				}
  8970  				nodeToDelete.flags.SetTrue(marked)
  8971  				isMarked = true
  8972  			}
  8973  			// Accomplish the physical deletion.
  8974  			var (
  8975  				highestLocked        = -1 // the highest level being locked by this process
  8976  				valid                = true
  8977  				pred, succ, prevPred *uintNodeDesc
  8978  			)
  8979  			for layer := 0; valid && (layer <= topLayer); layer++ {
  8980  				pred, succ = preds[layer], succs[layer]
  8981  				if pred != prevPred { // the node in this layer could be locked by previous loop
  8982  					pred.mu.Lock()
  8983  					highestLocked = layer
  8984  					prevPred = pred
  8985  				}
  8986  				// valid check if there is another node has inserted into the skip list in this layer
  8987  				// during this process, or the previous is deleted by another process.
  8988  				// It is valid if:
  8989  				// 1. the previous node exists.
  8990  				// 2. no another node has inserted into the skip list in this layer.
  8991  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  8992  			}
  8993  			if !valid {
  8994  				unlockUintDesc(preds, highestLocked)
  8995  				continue
  8996  			}
  8997  			for i := topLayer; i >= 0; i-- {
  8998  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  8999  				// So we don't need `nodeToDelete.loadNext`
  9000  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  9001  			}
  9002  			nodeToDelete.mu.Unlock()
  9003  			unlockUintDesc(preds, highestLocked)
  9004  			atomic.AddInt64(&s.length, -1)
  9005  			return nodeToDelete.loadVal(), true
  9006  		}
  9007  		return nil, false
  9008  	}
  9009  }
  9010  
  9011  // LoadOrStore returns the existing value for the key if present.
  9012  // Otherwise, it stores and returns the given value.
  9013  // The loaded result is true if the value was loaded, false if stored.
  9014  // (Modified from Store)
  9015  func (s *UintMapDesc) LoadOrStore(key uint, value interface{}) (actual interface{}, loaded bool) {
  9016  	var (
  9017  		level        int
  9018  		preds, succs [maxLevel]*uintNodeDesc
  9019  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  9020  	)
  9021  	for {
  9022  		nodeFound := s.findNode(key, &preds, &succs)
  9023  		if nodeFound != nil { // indicating the key is already in the skip-list
  9024  			if !nodeFound.flags.Get(marked) {
  9025  				// We don't need to care about whether or not the node is fully linked,
  9026  				// just return the value.
  9027  				return nodeFound.loadVal(), true
  9028  			}
  9029  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  9030  			// we need to add this node in next loop.
  9031  			continue
  9032  		}
  9033  
  9034  		// Add this node into skip list.
  9035  		var (
  9036  			highestLocked        = -1 // the highest level being locked by this process
  9037  			valid                = true
  9038  			pred, succ, prevPred *uintNodeDesc
  9039  		)
  9040  		if level == 0 {
  9041  			level = s.randomlevel()
  9042  			if level > hl {
  9043  				// If the highest level is updated, usually means that many goroutines
  9044  				// are inserting items. Hopefully we can find a better path in next loop.
  9045  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  9046  				// but this strategy's performance is almost the same as the existing method.
  9047  				continue
  9048  			}
  9049  		}
  9050  		for layer := 0; valid && layer < level; layer++ {
  9051  			pred = preds[layer]   // target node's previous node
  9052  			succ = succs[layer]   // target node's next node
  9053  			if pred != prevPred { // the node in this layer could be locked by previous loop
  9054  				pred.mu.Lock()
  9055  				highestLocked = layer
  9056  				prevPred = pred
  9057  			}
  9058  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  9059  			// It is valid if:
  9060  			// 1. The previous node and next node both are not marked.
  9061  			// 2. The previous node's next node is succ in this layer.
  9062  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  9063  		}
  9064  		if !valid {
  9065  			unlockUintDesc(preds, highestLocked)
  9066  			continue
  9067  		}
  9068  
  9069  		nn := newUintNodeDesc(key, value, level)
  9070  		for layer := 0; layer < level; layer++ {
  9071  			nn.storeNext(layer, succs[layer])
  9072  			preds[layer].atomicStoreNext(layer, nn)
  9073  		}
  9074  		nn.flags.SetTrue(fullyLinked)
  9075  		unlockUintDesc(preds, highestLocked)
  9076  		atomic.AddInt64(&s.length, 1)
  9077  		return value, false
  9078  	}
  9079  }
  9080  
  9081  // LoadOrStoreLazy returns the existing value for the key if present.
  9082  // Otherwise, it stores and returns the given value from f, f will only be called once.
  9083  // The loaded result is true if the value was loaded, false if stored.
  9084  // (Modified from LoadOrStore)
  9085  func (s *UintMapDesc) LoadOrStoreLazy(key uint, f func() interface{}) (actual interface{}, loaded bool) {
  9086  	var (
  9087  		level        int
  9088  		preds, succs [maxLevel]*uintNodeDesc
  9089  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  9090  	)
  9091  	for {
  9092  		nodeFound := s.findNode(key, &preds, &succs)
  9093  		if nodeFound != nil { // indicating the key is already in the skip-list
  9094  			if !nodeFound.flags.Get(marked) {
  9095  				// We don't need to care about whether or not the node is fully linked,
  9096  				// just return the value.
  9097  				return nodeFound.loadVal(), true
  9098  			}
  9099  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  9100  			// we need to add this node in next loop.
  9101  			continue
  9102  		}
  9103  
  9104  		// Add this node into skip list.
  9105  		var (
  9106  			highestLocked        = -1 // the highest level being locked by this process
  9107  			valid                = true
  9108  			pred, succ, prevPred *uintNodeDesc
  9109  		)
  9110  		if level == 0 {
  9111  			level = s.randomlevel()
  9112  			if level > hl {
  9113  				// If the highest level is updated, usually means that many goroutines
  9114  				// are inserting items. Hopefully we can find a better path in next loop.
  9115  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  9116  				// but this strategy's performance is almost the same as the existing method.
  9117  				continue
  9118  			}
  9119  		}
  9120  		for layer := 0; valid && layer < level; layer++ {
  9121  			pred = preds[layer]   // target node's previous node
  9122  			succ = succs[layer]   // target node's next node
  9123  			if pred != prevPred { // the node in this layer could be locked by previous loop
  9124  				pred.mu.Lock()
  9125  				highestLocked = layer
  9126  				prevPred = pred
  9127  			}
  9128  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  9129  			// It is valid if:
  9130  			// 1. The previous node and next node both are not marked.
  9131  			// 2. The previous node's next node is succ in this layer.
  9132  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  9133  		}
  9134  		if !valid {
  9135  			unlockUintDesc(preds, highestLocked)
  9136  			continue
  9137  		}
  9138  		value := f()
  9139  		nn := newUintNodeDesc(key, value, level)
  9140  		for layer := 0; layer < level; layer++ {
  9141  			nn.storeNext(layer, succs[layer])
  9142  			preds[layer].atomicStoreNext(layer, nn)
  9143  		}
  9144  		nn.flags.SetTrue(fullyLinked)
  9145  		unlockUintDesc(preds, highestLocked)
  9146  		atomic.AddInt64(&s.length, 1)
  9147  		return value, false
  9148  	}
  9149  }
  9150  
  9151  // Delete deletes the value for a key.
  9152  func (s *UintMapDesc) Delete(key uint) bool {
  9153  	var (
  9154  		nodeToDelete *uintNodeDesc
  9155  		isMarked     bool // represents if this operation mark the node
  9156  		topLayer     = -1
  9157  		preds, succs [maxLevel]*uintNodeDesc
  9158  	)
  9159  	for {
  9160  		lFound := s.findNodeDelete(key, &preds, &succs)
  9161  		if isMarked || // this process mark this node or we can find this node in the skip list
  9162  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  9163  			if !isMarked { // we don't mark this node for now
  9164  				nodeToDelete = succs[lFound]
  9165  				topLayer = lFound
  9166  				nodeToDelete.mu.Lock()
  9167  				if nodeToDelete.flags.Get(marked) {
  9168  					// The node is marked by another process,
  9169  					// the physical deletion will be accomplished by another process.
  9170  					nodeToDelete.mu.Unlock()
  9171  					return false
  9172  				}
  9173  				nodeToDelete.flags.SetTrue(marked)
  9174  				isMarked = true
  9175  			}
  9176  			// Accomplish the physical deletion.
  9177  			var (
  9178  				highestLocked        = -1 // the highest level being locked by this process
  9179  				valid                = true
  9180  				pred, succ, prevPred *uintNodeDesc
  9181  			)
  9182  			for layer := 0; valid && (layer <= topLayer); layer++ {
  9183  				pred, succ = preds[layer], succs[layer]
  9184  				if pred != prevPred { // the node in this layer could be locked by previous loop
  9185  					pred.mu.Lock()
  9186  					highestLocked = layer
  9187  					prevPred = pred
  9188  				}
  9189  				// valid check if there is another node has inserted into the skip list in this layer
  9190  				// during this process, or the previous is deleted by another process.
  9191  				// It is valid if:
  9192  				// 1. the previous node exists.
  9193  				// 2. no another node has inserted into the skip list in this layer.
  9194  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  9195  			}
  9196  			if !valid {
  9197  				unlockUintDesc(preds, highestLocked)
  9198  				continue
  9199  			}
  9200  			for i := topLayer; i >= 0; i-- {
  9201  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  9202  				// So we don't need `nodeToDelete.loadNext`
  9203  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  9204  			}
  9205  			nodeToDelete.mu.Unlock()
  9206  			unlockUintDesc(preds, highestLocked)
  9207  			atomic.AddInt64(&s.length, -1)
  9208  			return true
  9209  		}
  9210  		return false
  9211  	}
  9212  }
  9213  
  9214  // Range calls f sequentially for each key and value present in the skipmap.
  9215  // If f returns false, range stops the iteration.
  9216  //
  9217  // Range does not necessarily correspond to any consistent snapshot of the Map's
  9218  // contents: no key will be visited more than once, but if the value for any key
  9219  // is stored or deleted concurrently, Range may reflect any mapping for that key
  9220  // from any point during the Range call.
  9221  func (s *UintMapDesc) Range(f func(key uint, value interface{}) bool) {
  9222  	x := s.header.atomicLoadNext(0)
  9223  	for x != nil {
  9224  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  9225  			x = x.atomicLoadNext(0)
  9226  			continue
  9227  		}
  9228  		if !f(x.key, x.loadVal()) {
  9229  			break
  9230  		}
  9231  		x = x.atomicLoadNext(0)
  9232  	}
  9233  }
  9234  
  9235  // Len return the length of this skipmap.
  9236  func (s *UintMapDesc) Len() int {
  9237  	return int(atomic.LoadInt64(&s.length))
  9238  }
  9239  
  9240  // Uint8Map represents a map based on skip list in ascending order.
  9241  type Uint8Map struct {
  9242  	header       *uint8Node
  9243  	length       int64
  9244  	highestLevel int64 // highest level for now
  9245  }
  9246  
  9247  type uint8Node struct {
  9248  	key   uint8
  9249  	value unsafe.Pointer // *interface{}
  9250  	next  optionalArray  // [level]*uint8Node
  9251  	mu    sync.Mutex
  9252  	flags bitflag
  9253  	level uint32
  9254  }
  9255  
  9256  func newUint8Node(key uint8, value interface{}, level int) *uint8Node {
  9257  	node := &uint8Node{
  9258  		key:   key,
  9259  		level: uint32(level),
  9260  	}
  9261  	node.storeVal(value)
  9262  	if level > op1 {
  9263  		node.next.extra = new([op2]unsafe.Pointer)
  9264  	}
  9265  	return node
  9266  }
  9267  
  9268  func (n *uint8Node) storeVal(value interface{}) {
  9269  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  9270  }
  9271  
  9272  func (n *uint8Node) loadVal() interface{} {
  9273  	return *(*interface{})(atomic.LoadPointer(&n.value))
  9274  }
  9275  
  9276  func (n *uint8Node) loadNext(i int) *uint8Node {
  9277  	return (*uint8Node)(n.next.load(i))
  9278  }
  9279  
  9280  func (n *uint8Node) storeNext(i int, node *uint8Node) {
  9281  	n.next.store(i, unsafe.Pointer(node))
  9282  }
  9283  
  9284  func (n *uint8Node) atomicLoadNext(i int) *uint8Node {
  9285  	return (*uint8Node)(n.next.atomicLoad(i))
  9286  }
  9287  
  9288  func (n *uint8Node) atomicStoreNext(i int, node *uint8Node) {
  9289  	n.next.atomicStore(i, unsafe.Pointer(node))
  9290  }
  9291  
  9292  func (n *uint8Node) lessthan(key uint8) bool {
  9293  	return n.key < key
  9294  }
  9295  
  9296  func (n *uint8Node) equal(key uint8) bool {
  9297  	return n.key == key
  9298  }
  9299  
  9300  // NewUint8 return an empty uint8 skipmap.
  9301  func NewUint8() *Uint8Map {
  9302  	h := newUint8Node(0, "", maxLevel)
  9303  	h.flags.SetTrue(fullyLinked)
  9304  	return &Uint8Map{
  9305  		header:       h,
  9306  		highestLevel: defaultHighestLevel,
  9307  	}
  9308  }
  9309  
  9310  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  9311  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  9312  // (without fullpath, if find the node will return immediately)
  9313  func (s *Uint8Map) findNode(key uint8, preds *[maxLevel]*uint8Node, succs *[maxLevel]*uint8Node) *uint8Node {
  9314  	x := s.header
  9315  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  9316  		succ := x.atomicLoadNext(i)
  9317  		for succ != nil && succ.lessthan(key) {
  9318  			x = succ
  9319  			succ = x.atomicLoadNext(i)
  9320  		}
  9321  		preds[i] = x
  9322  		succs[i] = succ
  9323  
  9324  		// Check if the key already in the skipmap.
  9325  		if succ != nil && succ.equal(key) {
  9326  			return succ
  9327  		}
  9328  	}
  9329  	return nil
  9330  }
  9331  
  9332  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  9333  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  9334  func (s *Uint8Map) findNodeDelete(key uint8, preds *[maxLevel]*uint8Node, succs *[maxLevel]*uint8Node) int {
  9335  	// lFound represents the index of the first layer at which it found a node.
  9336  	lFound, x := -1, s.header
  9337  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  9338  		succ := x.atomicLoadNext(i)
  9339  		for succ != nil && succ.lessthan(key) {
  9340  			x = succ
  9341  			succ = x.atomicLoadNext(i)
  9342  		}
  9343  		preds[i] = x
  9344  		succs[i] = succ
  9345  
  9346  		// Check if the key already in the skip list.
  9347  		if lFound == -1 && succ != nil && succ.equal(key) {
  9348  			lFound = i
  9349  		}
  9350  	}
  9351  	return lFound
  9352  }
  9353  
  9354  func unlockUint8(preds [maxLevel]*uint8Node, highestLevel int) {
  9355  	var prevPred *uint8Node
  9356  	for i := highestLevel; i >= 0; i-- {
  9357  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  9358  			preds[i].mu.Unlock()
  9359  			prevPred = preds[i]
  9360  		}
  9361  	}
  9362  }
  9363  
  9364  // Store sets the value for a key.
  9365  func (s *Uint8Map) Store(key uint8, value interface{}) {
  9366  	level := s.randomlevel()
  9367  	var preds, succs [maxLevel]*uint8Node
  9368  	for {
  9369  		nodeFound := s.findNode(key, &preds, &succs)
  9370  		if nodeFound != nil { // indicating the key is already in the skip-list
  9371  			if !nodeFound.flags.Get(marked) {
  9372  				// We don't need to care about whether or not the node is fully linked,
  9373  				// just replace the value.
  9374  				nodeFound.storeVal(value)
  9375  				return
  9376  			}
  9377  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  9378  			// we need to add this node in next loop.
  9379  			continue
  9380  		}
  9381  
  9382  		// Add this node into skip list.
  9383  		var (
  9384  			highestLocked        = -1 // the highest level being locked by this process
  9385  			valid                = true
  9386  			pred, succ, prevPred *uint8Node
  9387  		)
  9388  		for layer := 0; valid && layer < level; layer++ {
  9389  			pred = preds[layer]   // target node's previous node
  9390  			succ = succs[layer]   // target node's next node
  9391  			if pred != prevPred { // the node in this layer could be locked by previous loop
  9392  				pred.mu.Lock()
  9393  				highestLocked = layer
  9394  				prevPred = pred
  9395  			}
  9396  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  9397  			// It is valid if:
  9398  			// 1. The previous node and next node both are not marked.
  9399  			// 2. The previous node's next node is succ in this layer.
  9400  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  9401  		}
  9402  		if !valid {
  9403  			unlockUint8(preds, highestLocked)
  9404  			continue
  9405  		}
  9406  
  9407  		nn := newUint8Node(key, value, level)
  9408  		for layer := 0; layer < level; layer++ {
  9409  			nn.storeNext(layer, succs[layer])
  9410  			preds[layer].atomicStoreNext(layer, nn)
  9411  		}
  9412  		nn.flags.SetTrue(fullyLinked)
  9413  		unlockUint8(preds, highestLocked)
  9414  		atomic.AddInt64(&s.length, 1)
  9415  		return
  9416  	}
  9417  }
  9418  
  9419  func (s *Uint8Map) randomlevel() int {
  9420  	// Generate random level.
  9421  	level := randomLevel()
  9422  	// Update highest level if possible.
  9423  	for {
  9424  		hl := atomic.LoadInt64(&s.highestLevel)
  9425  		if int64(level) <= hl {
  9426  			break
  9427  		}
  9428  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  9429  			break
  9430  		}
  9431  	}
  9432  	return level
  9433  }
  9434  
  9435  // Load returns the value stored in the map for a key, or nil if no
  9436  // value is present.
  9437  // The ok result indicates whether value was found in the map.
  9438  func (s *Uint8Map) Load(key uint8) (value interface{}, ok bool) {
  9439  	x := s.header
  9440  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  9441  		nex := x.atomicLoadNext(i)
  9442  		for nex != nil && nex.lessthan(key) {
  9443  			x = nex
  9444  			nex = x.atomicLoadNext(i)
  9445  		}
  9446  
  9447  		// Check if the key already in the skip list.
  9448  		if nex != nil && nex.equal(key) {
  9449  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  9450  				return nex.loadVal(), true
  9451  			}
  9452  			return nil, false
  9453  		}
  9454  	}
  9455  	return nil, false
  9456  }
  9457  
  9458  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  9459  // The loaded result reports whether the key was present.
  9460  // (Modified from Delete)
  9461  func (s *Uint8Map) LoadAndDelete(key uint8) (value interface{}, loaded bool) {
  9462  	var (
  9463  		nodeToDelete *uint8Node
  9464  		isMarked     bool // represents if this operation mark the node
  9465  		topLayer     = -1
  9466  		preds, succs [maxLevel]*uint8Node
  9467  	)
  9468  	for {
  9469  		lFound := s.findNodeDelete(key, &preds, &succs)
  9470  		if isMarked || // this process mark this node or we can find this node in the skip list
  9471  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  9472  			if !isMarked { // we don't mark this node for now
  9473  				nodeToDelete = succs[lFound]
  9474  				topLayer = lFound
  9475  				nodeToDelete.mu.Lock()
  9476  				if nodeToDelete.flags.Get(marked) {
  9477  					// The node is marked by another process,
  9478  					// the physical deletion will be accomplished by another process.
  9479  					nodeToDelete.mu.Unlock()
  9480  					return nil, false
  9481  				}
  9482  				nodeToDelete.flags.SetTrue(marked)
  9483  				isMarked = true
  9484  			}
  9485  			// Accomplish the physical deletion.
  9486  			var (
  9487  				highestLocked        = -1 // the highest level being locked by this process
  9488  				valid                = true
  9489  				pred, succ, prevPred *uint8Node
  9490  			)
  9491  			for layer := 0; valid && (layer <= topLayer); layer++ {
  9492  				pred, succ = preds[layer], succs[layer]
  9493  				if pred != prevPred { // the node in this layer could be locked by previous loop
  9494  					pred.mu.Lock()
  9495  					highestLocked = layer
  9496  					prevPred = pred
  9497  				}
  9498  				// valid check if there is another node has inserted into the skip list in this layer
  9499  				// during this process, or the previous is deleted by another process.
  9500  				// It is valid if:
  9501  				// 1. the previous node exists.
  9502  				// 2. no another node has inserted into the skip list in this layer.
  9503  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  9504  			}
  9505  			if !valid {
  9506  				unlockUint8(preds, highestLocked)
  9507  				continue
  9508  			}
  9509  			for i := topLayer; i >= 0; i-- {
  9510  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  9511  				// So we don't need `nodeToDelete.loadNext`
  9512  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  9513  			}
  9514  			nodeToDelete.mu.Unlock()
  9515  			unlockUint8(preds, highestLocked)
  9516  			atomic.AddInt64(&s.length, -1)
  9517  			return nodeToDelete.loadVal(), true
  9518  		}
  9519  		return nil, false
  9520  	}
  9521  }
  9522  
  9523  // LoadOrStore returns the existing value for the key if present.
  9524  // Otherwise, it stores and returns the given value.
  9525  // The loaded result is true if the value was loaded, false if stored.
  9526  // (Modified from Store)
  9527  func (s *Uint8Map) LoadOrStore(key uint8, value interface{}) (actual interface{}, loaded bool) {
  9528  	var (
  9529  		level        int
  9530  		preds, succs [maxLevel]*uint8Node
  9531  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  9532  	)
  9533  	for {
  9534  		nodeFound := s.findNode(key, &preds, &succs)
  9535  		if nodeFound != nil { // indicating the key is already in the skip-list
  9536  			if !nodeFound.flags.Get(marked) {
  9537  				// We don't need to care about whether or not the node is fully linked,
  9538  				// just return the value.
  9539  				return nodeFound.loadVal(), true
  9540  			}
  9541  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  9542  			// we need to add this node in next loop.
  9543  			continue
  9544  		}
  9545  
  9546  		// Add this node into skip list.
  9547  		var (
  9548  			highestLocked        = -1 // the highest level being locked by this process
  9549  			valid                = true
  9550  			pred, succ, prevPred *uint8Node
  9551  		)
  9552  		if level == 0 {
  9553  			level = s.randomlevel()
  9554  			if level > hl {
  9555  				// If the highest level is updated, usually means that many goroutines
  9556  				// are inserting items. Hopefully we can find a better path in next loop.
  9557  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  9558  				// but this strategy's performance is almost the same as the existing method.
  9559  				continue
  9560  			}
  9561  		}
  9562  		for layer := 0; valid && layer < level; layer++ {
  9563  			pred = preds[layer]   // target node's previous node
  9564  			succ = succs[layer]   // target node's next node
  9565  			if pred != prevPred { // the node in this layer could be locked by previous loop
  9566  				pred.mu.Lock()
  9567  				highestLocked = layer
  9568  				prevPred = pred
  9569  			}
  9570  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  9571  			// It is valid if:
  9572  			// 1. The previous node and next node both are not marked.
  9573  			// 2. The previous node's next node is succ in this layer.
  9574  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  9575  		}
  9576  		if !valid {
  9577  			unlockUint8(preds, highestLocked)
  9578  			continue
  9579  		}
  9580  
  9581  		nn := newUint8Node(key, value, level)
  9582  		for layer := 0; layer < level; layer++ {
  9583  			nn.storeNext(layer, succs[layer])
  9584  			preds[layer].atomicStoreNext(layer, nn)
  9585  		}
  9586  		nn.flags.SetTrue(fullyLinked)
  9587  		unlockUint8(preds, highestLocked)
  9588  		atomic.AddInt64(&s.length, 1)
  9589  		return value, false
  9590  	}
  9591  }
  9592  
  9593  // LoadOrStoreLazy returns the existing value for the key if present.
  9594  // Otherwise, it stores and returns the given value from f, f will only be called once.
  9595  // The loaded result is true if the value was loaded, false if stored.
  9596  // (Modified from LoadOrStore)
  9597  func (s *Uint8Map) LoadOrStoreLazy(key uint8, f func() interface{}) (actual interface{}, loaded bool) {
  9598  	var (
  9599  		level        int
  9600  		preds, succs [maxLevel]*uint8Node
  9601  		hl           = int(atomic.LoadInt64(&s.highestLevel))
  9602  	)
  9603  	for {
  9604  		nodeFound := s.findNode(key, &preds, &succs)
  9605  		if nodeFound != nil { // indicating the key is already in the skip-list
  9606  			if !nodeFound.flags.Get(marked) {
  9607  				// We don't need to care about whether or not the node is fully linked,
  9608  				// just return the value.
  9609  				return nodeFound.loadVal(), true
  9610  			}
  9611  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  9612  			// we need to add this node in next loop.
  9613  			continue
  9614  		}
  9615  
  9616  		// Add this node into skip list.
  9617  		var (
  9618  			highestLocked        = -1 // the highest level being locked by this process
  9619  			valid                = true
  9620  			pred, succ, prevPred *uint8Node
  9621  		)
  9622  		if level == 0 {
  9623  			level = s.randomlevel()
  9624  			if level > hl {
  9625  				// If the highest level is updated, usually means that many goroutines
  9626  				// are inserting items. Hopefully we can find a better path in next loop.
  9627  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
  9628  				// but this strategy's performance is almost the same as the existing method.
  9629  				continue
  9630  			}
  9631  		}
  9632  		for layer := 0; valid && layer < level; layer++ {
  9633  			pred = preds[layer]   // target node's previous node
  9634  			succ = succs[layer]   // target node's next node
  9635  			if pred != prevPred { // the node in this layer could be locked by previous loop
  9636  				pred.mu.Lock()
  9637  				highestLocked = layer
  9638  				prevPred = pred
  9639  			}
  9640  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  9641  			// It is valid if:
  9642  			// 1. The previous node and next node both are not marked.
  9643  			// 2. The previous node's next node is succ in this layer.
  9644  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
  9645  		}
  9646  		if !valid {
  9647  			unlockUint8(preds, highestLocked)
  9648  			continue
  9649  		}
  9650  		value := f()
  9651  		nn := newUint8Node(key, value, level)
  9652  		for layer := 0; layer < level; layer++ {
  9653  			nn.storeNext(layer, succs[layer])
  9654  			preds[layer].atomicStoreNext(layer, nn)
  9655  		}
  9656  		nn.flags.SetTrue(fullyLinked)
  9657  		unlockUint8(preds, highestLocked)
  9658  		atomic.AddInt64(&s.length, 1)
  9659  		return value, false
  9660  	}
  9661  }
  9662  
  9663  // Delete deletes the value for a key.
  9664  func (s *Uint8Map) Delete(key uint8) bool {
  9665  	var (
  9666  		nodeToDelete *uint8Node
  9667  		isMarked     bool // represents if this operation mark the node
  9668  		topLayer     = -1
  9669  		preds, succs [maxLevel]*uint8Node
  9670  	)
  9671  	for {
  9672  		lFound := s.findNodeDelete(key, &preds, &succs)
  9673  		if isMarked || // this process mark this node or we can find this node in the skip list
  9674  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  9675  			if !isMarked { // we don't mark this node for now
  9676  				nodeToDelete = succs[lFound]
  9677  				topLayer = lFound
  9678  				nodeToDelete.mu.Lock()
  9679  				if nodeToDelete.flags.Get(marked) {
  9680  					// The node is marked by another process,
  9681  					// the physical deletion will be accomplished by another process.
  9682  					nodeToDelete.mu.Unlock()
  9683  					return false
  9684  				}
  9685  				nodeToDelete.flags.SetTrue(marked)
  9686  				isMarked = true
  9687  			}
  9688  			// Accomplish the physical deletion.
  9689  			var (
  9690  				highestLocked        = -1 // the highest level being locked by this process
  9691  				valid                = true
  9692  				pred, succ, prevPred *uint8Node
  9693  			)
  9694  			for layer := 0; valid && (layer <= topLayer); layer++ {
  9695  				pred, succ = preds[layer], succs[layer]
  9696  				if pred != prevPred { // the node in this layer could be locked by previous loop
  9697  					pred.mu.Lock()
  9698  					highestLocked = layer
  9699  					prevPred = pred
  9700  				}
  9701  				// valid check if there is another node has inserted into the skip list in this layer
  9702  				// during this process, or the previous is deleted by another process.
  9703  				// It is valid if:
  9704  				// 1. the previous node exists.
  9705  				// 2. no another node has inserted into the skip list in this layer.
  9706  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
  9707  			}
  9708  			if !valid {
  9709  				unlockUint8(preds, highestLocked)
  9710  				continue
  9711  			}
  9712  			for i := topLayer; i >= 0; i-- {
  9713  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
  9714  				// So we don't need `nodeToDelete.loadNext`
  9715  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
  9716  			}
  9717  			nodeToDelete.mu.Unlock()
  9718  			unlockUint8(preds, highestLocked)
  9719  			atomic.AddInt64(&s.length, -1)
  9720  			return true
  9721  		}
  9722  		return false
  9723  	}
  9724  }
  9725  
  9726  // Range calls f sequentially for each key and value present in the skipmap.
  9727  // If f returns false, range stops the iteration.
  9728  //
  9729  // Range does not necessarily correspond to any consistent snapshot of the Map's
  9730  // contents: no key will be visited more than once, but if the value for any key
  9731  // is stored or deleted concurrently, Range may reflect any mapping for that key
  9732  // from any point during the Range call.
  9733  func (s *Uint8Map) Range(f func(key uint8, value interface{}) bool) {
  9734  	x := s.header.atomicLoadNext(0)
  9735  	for x != nil {
  9736  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  9737  			x = x.atomicLoadNext(0)
  9738  			continue
  9739  		}
  9740  		if !f(x.key, x.loadVal()) {
  9741  			break
  9742  		}
  9743  		x = x.atomicLoadNext(0)
  9744  	}
  9745  }
  9746  
  9747  // Len return the length of this skipmap.
  9748  func (s *Uint8Map) Len() int {
  9749  	return int(atomic.LoadInt64(&s.length))
  9750  }
  9751  
  9752  // Uint8MapDesc represents a map based on skip list in descending order.
  9753  type Uint8MapDesc struct {
  9754  	header       *uint8NodeDesc
  9755  	length       int64
  9756  	highestLevel int64 // highest level for now
  9757  }
  9758  
  9759  type uint8NodeDesc struct {
  9760  	key   uint8
  9761  	value unsafe.Pointer // *interface{}
  9762  	next  optionalArray  // [level]*uint8NodeDesc
  9763  	mu    sync.Mutex
  9764  	flags bitflag
  9765  	level uint32
  9766  }
  9767  
  9768  func newUint8NodeDesc(key uint8, value interface{}, level int) *uint8NodeDesc {
  9769  	node := &uint8NodeDesc{
  9770  		key:   key,
  9771  		level: uint32(level),
  9772  	}
  9773  	node.storeVal(value)
  9774  	if level > op1 {
  9775  		node.next.extra = new([op2]unsafe.Pointer)
  9776  	}
  9777  	return node
  9778  }
  9779  
  9780  func (n *uint8NodeDesc) storeVal(value interface{}) {
  9781  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
  9782  }
  9783  
  9784  func (n *uint8NodeDesc) loadVal() interface{} {
  9785  	return *(*interface{})(atomic.LoadPointer(&n.value))
  9786  }
  9787  
  9788  func (n *uint8NodeDesc) loadNext(i int) *uint8NodeDesc {
  9789  	return (*uint8NodeDesc)(n.next.load(i))
  9790  }
  9791  
  9792  func (n *uint8NodeDesc) storeNext(i int, node *uint8NodeDesc) {
  9793  	n.next.store(i, unsafe.Pointer(node))
  9794  }
  9795  
  9796  func (n *uint8NodeDesc) atomicLoadNext(i int) *uint8NodeDesc {
  9797  	return (*uint8NodeDesc)(n.next.atomicLoad(i))
  9798  }
  9799  
  9800  func (n *uint8NodeDesc) atomicStoreNext(i int, node *uint8NodeDesc) {
  9801  	n.next.atomicStore(i, unsafe.Pointer(node))
  9802  }
  9803  
  9804  func (n *uint8NodeDesc) lessthan(key uint8) bool {
  9805  	return n.key > key
  9806  }
  9807  
  9808  func (n *uint8NodeDesc) equal(key uint8) bool {
  9809  	return n.key == key
  9810  }
  9811  
  9812  // NewUint8Desc return an empty uint8 skipmap.
  9813  func NewUint8Desc() *Uint8MapDesc {
  9814  	h := newUint8NodeDesc(0, "", maxLevel)
  9815  	h.flags.SetTrue(fullyLinked)
  9816  	return &Uint8MapDesc{
  9817  		header:       h,
  9818  		highestLevel: defaultHighestLevel,
  9819  	}
  9820  }
  9821  
  9822  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
  9823  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  9824  // (without fullpath, if find the node will return immediately)
  9825  func (s *Uint8MapDesc) findNode(key uint8, preds *[maxLevel]*uint8NodeDesc, succs *[maxLevel]*uint8NodeDesc) *uint8NodeDesc {
  9826  	x := s.header
  9827  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  9828  		succ := x.atomicLoadNext(i)
  9829  		for succ != nil && succ.lessthan(key) {
  9830  			x = succ
  9831  			succ = x.atomicLoadNext(i)
  9832  		}
  9833  		preds[i] = x
  9834  		succs[i] = succ
  9835  
  9836  		// Check if the key already in the skipmap.
  9837  		if succ != nil && succ.equal(key) {
  9838  			return succ
  9839  		}
  9840  	}
  9841  	return nil
  9842  }
  9843  
  9844  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
  9845  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
  9846  func (s *Uint8MapDesc) findNodeDelete(key uint8, preds *[maxLevel]*uint8NodeDesc, succs *[maxLevel]*uint8NodeDesc) int {
  9847  	// lFound represents the index of the first layer at which it found a node.
  9848  	lFound, x := -1, s.header
  9849  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  9850  		succ := x.atomicLoadNext(i)
  9851  		for succ != nil && succ.lessthan(key) {
  9852  			x = succ
  9853  			succ = x.atomicLoadNext(i)
  9854  		}
  9855  		preds[i] = x
  9856  		succs[i] = succ
  9857  
  9858  		// Check if the key already in the skip list.
  9859  		if lFound == -1 && succ != nil && succ.equal(key) {
  9860  			lFound = i
  9861  		}
  9862  	}
  9863  	return lFound
  9864  }
  9865  
  9866  func unlockUint8Desc(preds [maxLevel]*uint8NodeDesc, highestLevel int) {
  9867  	var prevPred *uint8NodeDesc
  9868  	for i := highestLevel; i >= 0; i-- {
  9869  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  9870  			preds[i].mu.Unlock()
  9871  			prevPred = preds[i]
  9872  		}
  9873  	}
  9874  }
  9875  
  9876  // Store sets the value for a key.
  9877  func (s *Uint8MapDesc) Store(key uint8, value interface{}) {
  9878  	level := s.randomlevel()
  9879  	var preds, succs [maxLevel]*uint8NodeDesc
  9880  	for {
  9881  		nodeFound := s.findNode(key, &preds, &succs)
  9882  		if nodeFound != nil { // indicating the key is already in the skip-list
  9883  			if !nodeFound.flags.Get(marked) {
  9884  				// We don't need to care about whether or not the node is fully linked,
  9885  				// just replace the value.
  9886  				nodeFound.storeVal(value)
  9887  				return
  9888  			}
  9889  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
  9890  			// we need to add this node in next loop.
  9891  			continue
  9892  		}
  9893  
  9894  		// Add this node into skip list.
  9895  		var (
  9896  			highestLocked        = -1 // the highest level being locked by this process
  9897  			valid                = true
  9898  			pred, succ, prevPred *uint8NodeDesc
  9899  		)
  9900  		for layer := 0; valid && layer < level; layer++ {
  9901  			pred = preds[layer]   // target node's previous node
  9902  			succ = succs[layer]   // target node's next node
  9903  			if pred != prevPred { // the node in this layer could be locked by previous loop
  9904  				pred.mu.Lock()
  9905  				highestLocked = layer
  9906  				prevPred = pred
  9907  			}
  9908  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  9909  			// It is valid if:
  9910  			// 1. The previous node and next node both are not marked.
  9911  			// 2. The previous node's next node is succ in this layer.
  9912  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  9913  		}
  9914  		if !valid {
  9915  			unlockUint8Desc(preds, highestLocked)
  9916  			continue
  9917  		}
  9918  
  9919  		nn := newUint8NodeDesc(key, value, level)
  9920  		for layer := 0; layer < level; layer++ {
  9921  			nn.storeNext(layer, succs[layer])
  9922  			preds[layer].atomicStoreNext(layer, nn)
  9923  		}
  9924  		nn.flags.SetTrue(fullyLinked)
  9925  		unlockUint8Desc(preds, highestLocked)
  9926  		atomic.AddInt64(&s.length, 1)
  9927  		return
  9928  	}
  9929  }
  9930  
  9931  func (s *Uint8MapDesc) randomlevel() int {
  9932  	// Generate random level.
  9933  	level := randomLevel()
  9934  	// Update highest level if possible.
  9935  	for {
  9936  		hl := atomic.LoadInt64(&s.highestLevel)
  9937  		if int64(level) <= hl {
  9938  			break
  9939  		}
  9940  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  9941  			break
  9942  		}
  9943  	}
  9944  	return level
  9945  }
  9946  
  9947  // Load returns the value stored in the map for a key, or nil if no
  9948  // value is present.
  9949  // The ok result indicates whether value was found in the map.
  9950  func (s *Uint8MapDesc) Load(key uint8) (value interface{}, ok bool) {
  9951  	x := s.header
  9952  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  9953  		nex := x.atomicLoadNext(i)
  9954  		for nex != nil && nex.lessthan(key) {
  9955  			x = nex
  9956  			nex = x.atomicLoadNext(i)
  9957  		}
  9958  
  9959  		// Check if the key already in the skip list.
  9960  		if nex != nil && nex.equal(key) {
  9961  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
  9962  				return nex.loadVal(), true
  9963  			}
  9964  			return nil, false
  9965  		}
  9966  	}
  9967  	return nil, false
  9968  }
  9969  
  9970  // LoadAndDelete deletes the value for a key, returning the previous value if any.
  9971  // The loaded result reports whether the key was present.
  9972  // (Modified from Delete)
  9973  func (s *Uint8MapDesc) LoadAndDelete(key uint8) (value interface{}, loaded bool) {
  9974  	var (
  9975  		nodeToDelete *uint8NodeDesc
  9976  		isMarked     bool // represents if this operation mark the node
  9977  		topLayer     = -1
  9978  		preds, succs [maxLevel]*uint8NodeDesc
  9979  	)
  9980  	for {
  9981  		lFound := s.findNodeDelete(key, &preds, &succs)
  9982  		if isMarked || // this process mark this node or we can find this node in the skip list
  9983  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  9984  			if !isMarked { // we don't mark this node for now
  9985  				nodeToDelete = succs[lFound]
  9986  				topLayer = lFound
  9987  				nodeToDelete.mu.Lock()
  9988  				if nodeToDelete.flags.Get(marked) {
  9989  					// The node is marked by another process,
  9990  					// the physical deletion will be accomplished by another process.
  9991  					nodeToDelete.mu.Unlock()
  9992  					return nil, false
  9993  				}
  9994  				nodeToDelete.flags.SetTrue(marked)
  9995  				isMarked = true
  9996  			}
  9997  			// Accomplish the physical deletion.
  9998  			var (
  9999  				highestLocked        = -1 // the highest level being locked by this process
 10000  				valid                = true
 10001  				pred, succ, prevPred *uint8NodeDesc
 10002  			)
 10003  			for layer := 0; valid && (layer <= topLayer); layer++ {
 10004  				pred, succ = preds[layer], succs[layer]
 10005  				if pred != prevPred { // the node in this layer could be locked by previous loop
 10006  					pred.mu.Lock()
 10007  					highestLocked = layer
 10008  					prevPred = pred
 10009  				}
 10010  				// valid check if there is another node has inserted into the skip list in this layer
 10011  				// during this process, or the previous is deleted by another process.
 10012  				// It is valid if:
 10013  				// 1. the previous node exists.
 10014  				// 2. no another node has inserted into the skip list in this layer.
 10015  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 10016  			}
 10017  			if !valid {
 10018  				unlockUint8Desc(preds, highestLocked)
 10019  				continue
 10020  			}
 10021  			for i := topLayer; i >= 0; i-- {
 10022  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 10023  				// So we don't need `nodeToDelete.loadNext`
 10024  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 10025  			}
 10026  			nodeToDelete.mu.Unlock()
 10027  			unlockUint8Desc(preds, highestLocked)
 10028  			atomic.AddInt64(&s.length, -1)
 10029  			return nodeToDelete.loadVal(), true
 10030  		}
 10031  		return nil, false
 10032  	}
 10033  }
 10034  
 10035  // LoadOrStore returns the existing value for the key if present.
 10036  // Otherwise, it stores and returns the given value.
 10037  // The loaded result is true if the value was loaded, false if stored.
 10038  // (Modified from Store)
 10039  func (s *Uint8MapDesc) LoadOrStore(key uint8, value interface{}) (actual interface{}, loaded bool) {
 10040  	var (
 10041  		level        int
 10042  		preds, succs [maxLevel]*uint8NodeDesc
 10043  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 10044  	)
 10045  	for {
 10046  		nodeFound := s.findNode(key, &preds, &succs)
 10047  		if nodeFound != nil { // indicating the key is already in the skip-list
 10048  			if !nodeFound.flags.Get(marked) {
 10049  				// We don't need to care about whether or not the node is fully linked,
 10050  				// just return the value.
 10051  				return nodeFound.loadVal(), true
 10052  			}
 10053  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 10054  			// we need to add this node in next loop.
 10055  			continue
 10056  		}
 10057  
 10058  		// Add this node into skip list.
 10059  		var (
 10060  			highestLocked        = -1 // the highest level being locked by this process
 10061  			valid                = true
 10062  			pred, succ, prevPred *uint8NodeDesc
 10063  		)
 10064  		if level == 0 {
 10065  			level = s.randomlevel()
 10066  			if level > hl {
 10067  				// If the highest level is updated, usually means that many goroutines
 10068  				// are inserting items. Hopefully we can find a better path in next loop.
 10069  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 10070  				// but this strategy's performance is almost the same as the existing method.
 10071  				continue
 10072  			}
 10073  		}
 10074  		for layer := 0; valid && layer < level; layer++ {
 10075  			pred = preds[layer]   // target node's previous node
 10076  			succ = succs[layer]   // target node's next node
 10077  			if pred != prevPred { // the node in this layer could be locked by previous loop
 10078  				pred.mu.Lock()
 10079  				highestLocked = layer
 10080  				prevPred = pred
 10081  			}
 10082  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 10083  			// It is valid if:
 10084  			// 1. The previous node and next node both are not marked.
 10085  			// 2. The previous node's next node is succ in this layer.
 10086  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 10087  		}
 10088  		if !valid {
 10089  			unlockUint8Desc(preds, highestLocked)
 10090  			continue
 10091  		}
 10092  
 10093  		nn := newUint8NodeDesc(key, value, level)
 10094  		for layer := 0; layer < level; layer++ {
 10095  			nn.storeNext(layer, succs[layer])
 10096  			preds[layer].atomicStoreNext(layer, nn)
 10097  		}
 10098  		nn.flags.SetTrue(fullyLinked)
 10099  		unlockUint8Desc(preds, highestLocked)
 10100  		atomic.AddInt64(&s.length, 1)
 10101  		return value, false
 10102  	}
 10103  }
 10104  
 10105  // LoadOrStoreLazy returns the existing value for the key if present.
 10106  // Otherwise, it stores and returns the given value from f, f will only be called once.
 10107  // The loaded result is true if the value was loaded, false if stored.
 10108  // (Modified from LoadOrStore)
 10109  func (s *Uint8MapDesc) LoadOrStoreLazy(key uint8, f func() interface{}) (actual interface{}, loaded bool) {
 10110  	var (
 10111  		level        int
 10112  		preds, succs [maxLevel]*uint8NodeDesc
 10113  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 10114  	)
 10115  	for {
 10116  		nodeFound := s.findNode(key, &preds, &succs)
 10117  		if nodeFound != nil { // indicating the key is already in the skip-list
 10118  			if !nodeFound.flags.Get(marked) {
 10119  				// We don't need to care about whether or not the node is fully linked,
 10120  				// just return the value.
 10121  				return nodeFound.loadVal(), true
 10122  			}
 10123  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 10124  			// we need to add this node in next loop.
 10125  			continue
 10126  		}
 10127  
 10128  		// Add this node into skip list.
 10129  		var (
 10130  			highestLocked        = -1 // the highest level being locked by this process
 10131  			valid                = true
 10132  			pred, succ, prevPred *uint8NodeDesc
 10133  		)
 10134  		if level == 0 {
 10135  			level = s.randomlevel()
 10136  			if level > hl {
 10137  				// If the highest level is updated, usually means that many goroutines
 10138  				// are inserting items. Hopefully we can find a better path in next loop.
 10139  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 10140  				// but this strategy's performance is almost the same as the existing method.
 10141  				continue
 10142  			}
 10143  		}
 10144  		for layer := 0; valid && layer < level; layer++ {
 10145  			pred = preds[layer]   // target node's previous node
 10146  			succ = succs[layer]   // target node's next node
 10147  			if pred != prevPred { // the node in this layer could be locked by previous loop
 10148  				pred.mu.Lock()
 10149  				highestLocked = layer
 10150  				prevPred = pred
 10151  			}
 10152  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 10153  			// It is valid if:
 10154  			// 1. The previous node and next node both are not marked.
 10155  			// 2. The previous node's next node is succ in this layer.
 10156  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 10157  		}
 10158  		if !valid {
 10159  			unlockUint8Desc(preds, highestLocked)
 10160  			continue
 10161  		}
 10162  		value := f()
 10163  		nn := newUint8NodeDesc(key, value, level)
 10164  		for layer := 0; layer < level; layer++ {
 10165  			nn.storeNext(layer, succs[layer])
 10166  			preds[layer].atomicStoreNext(layer, nn)
 10167  		}
 10168  		nn.flags.SetTrue(fullyLinked)
 10169  		unlockUint8Desc(preds, highestLocked)
 10170  		atomic.AddInt64(&s.length, 1)
 10171  		return value, false
 10172  	}
 10173  }
 10174  
 10175  // Delete deletes the value for a key.
 10176  func (s *Uint8MapDesc) Delete(key uint8) bool {
 10177  	var (
 10178  		nodeToDelete *uint8NodeDesc
 10179  		isMarked     bool // represents if this operation mark the node
 10180  		topLayer     = -1
 10181  		preds, succs [maxLevel]*uint8NodeDesc
 10182  	)
 10183  	for {
 10184  		lFound := s.findNodeDelete(key, &preds, &succs)
 10185  		if isMarked || // this process mark this node or we can find this node in the skip list
 10186  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 10187  			if !isMarked { // we don't mark this node for now
 10188  				nodeToDelete = succs[lFound]
 10189  				topLayer = lFound
 10190  				nodeToDelete.mu.Lock()
 10191  				if nodeToDelete.flags.Get(marked) {
 10192  					// The node is marked by another process,
 10193  					// the physical deletion will be accomplished by another process.
 10194  					nodeToDelete.mu.Unlock()
 10195  					return false
 10196  				}
 10197  				nodeToDelete.flags.SetTrue(marked)
 10198  				isMarked = true
 10199  			}
 10200  			// Accomplish the physical deletion.
 10201  			var (
 10202  				highestLocked        = -1 // the highest level being locked by this process
 10203  				valid                = true
 10204  				pred, succ, prevPred *uint8NodeDesc
 10205  			)
 10206  			for layer := 0; valid && (layer <= topLayer); layer++ {
 10207  				pred, succ = preds[layer], succs[layer]
 10208  				if pred != prevPred { // the node in this layer could be locked by previous loop
 10209  					pred.mu.Lock()
 10210  					highestLocked = layer
 10211  					prevPred = pred
 10212  				}
 10213  				// valid check if there is another node has inserted into the skip list in this layer
 10214  				// during this process, or the previous is deleted by another process.
 10215  				// It is valid if:
 10216  				// 1. the previous node exists.
 10217  				// 2. no another node has inserted into the skip list in this layer.
 10218  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 10219  			}
 10220  			if !valid {
 10221  				unlockUint8Desc(preds, highestLocked)
 10222  				continue
 10223  			}
 10224  			for i := topLayer; i >= 0; i-- {
 10225  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 10226  				// So we don't need `nodeToDelete.loadNext`
 10227  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 10228  			}
 10229  			nodeToDelete.mu.Unlock()
 10230  			unlockUint8Desc(preds, highestLocked)
 10231  			atomic.AddInt64(&s.length, -1)
 10232  			return true
 10233  		}
 10234  		return false
 10235  	}
 10236  }
 10237  
 10238  // Range calls f sequentially for each key and value present in the skipmap.
 10239  // If f returns false, range stops the iteration.
 10240  //
 10241  // Range does not necessarily correspond to any consistent snapshot of the Map's
 10242  // contents: no key will be visited more than once, but if the value for any key
 10243  // is stored or deleted concurrently, Range may reflect any mapping for that key
 10244  // from any point during the Range call.
 10245  func (s *Uint8MapDesc) Range(f func(key uint8, value interface{}) bool) {
 10246  	x := s.header.atomicLoadNext(0)
 10247  	for x != nil {
 10248  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 10249  			x = x.atomicLoadNext(0)
 10250  			continue
 10251  		}
 10252  		if !f(x.key, x.loadVal()) {
 10253  			break
 10254  		}
 10255  		x = x.atomicLoadNext(0)
 10256  	}
 10257  }
 10258  
 10259  // Len return the length of this skipmap.
 10260  func (s *Uint8MapDesc) Len() int {
 10261  	return int(atomic.LoadInt64(&s.length))
 10262  }
 10263  
 10264  // Uint16Map represents a map based on skip list in ascending order.
 10265  type Uint16Map struct {
 10266  	header       *uint16Node
 10267  	length       int64
 10268  	highestLevel int64 // highest level for now
 10269  }
 10270  
 10271  type uint16Node struct {
 10272  	key   uint16
 10273  	value unsafe.Pointer // *interface{}
 10274  	next  optionalArray  // [level]*uint16Node
 10275  	mu    sync.Mutex
 10276  	flags bitflag
 10277  	level uint32
 10278  }
 10279  
 10280  func newUint16Node(key uint16, value interface{}, level int) *uint16Node {
 10281  	node := &uint16Node{
 10282  		key:   key,
 10283  		level: uint32(level),
 10284  	}
 10285  	node.storeVal(value)
 10286  	if level > op1 {
 10287  		node.next.extra = new([op2]unsafe.Pointer)
 10288  	}
 10289  	return node
 10290  }
 10291  
 10292  func (n *uint16Node) storeVal(value interface{}) {
 10293  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 10294  }
 10295  
 10296  func (n *uint16Node) loadVal() interface{} {
 10297  	return *(*interface{})(atomic.LoadPointer(&n.value))
 10298  }
 10299  
 10300  func (n *uint16Node) loadNext(i int) *uint16Node {
 10301  	return (*uint16Node)(n.next.load(i))
 10302  }
 10303  
 10304  func (n *uint16Node) storeNext(i int, node *uint16Node) {
 10305  	n.next.store(i, unsafe.Pointer(node))
 10306  }
 10307  
 10308  func (n *uint16Node) atomicLoadNext(i int) *uint16Node {
 10309  	return (*uint16Node)(n.next.atomicLoad(i))
 10310  }
 10311  
 10312  func (n *uint16Node) atomicStoreNext(i int, node *uint16Node) {
 10313  	n.next.atomicStore(i, unsafe.Pointer(node))
 10314  }
 10315  
 10316  func (n *uint16Node) lessthan(key uint16) bool {
 10317  	return n.key < key
 10318  }
 10319  
 10320  func (n *uint16Node) equal(key uint16) bool {
 10321  	return n.key == key
 10322  }
 10323  
 10324  // NewUint16 return an empty uint16 skipmap.
 10325  func NewUint16() *Uint16Map {
 10326  	h := newUint16Node(0, "", maxLevel)
 10327  	h.flags.SetTrue(fullyLinked)
 10328  	return &Uint16Map{
 10329  		header:       h,
 10330  		highestLevel: defaultHighestLevel,
 10331  	}
 10332  }
 10333  
 10334  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 10335  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 10336  // (without fullpath, if find the node will return immediately)
 10337  func (s *Uint16Map) findNode(key uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) *uint16Node {
 10338  	x := s.header
 10339  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 10340  		succ := x.atomicLoadNext(i)
 10341  		for succ != nil && succ.lessthan(key) {
 10342  			x = succ
 10343  			succ = x.atomicLoadNext(i)
 10344  		}
 10345  		preds[i] = x
 10346  		succs[i] = succ
 10347  
 10348  		// Check if the key already in the skipmap.
 10349  		if succ != nil && succ.equal(key) {
 10350  			return succ
 10351  		}
 10352  	}
 10353  	return nil
 10354  }
 10355  
 10356  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 10357  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 10358  func (s *Uint16Map) findNodeDelete(key uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int {
 10359  	// lFound represents the index of the first layer at which it found a node.
 10360  	lFound, x := -1, s.header
 10361  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 10362  		succ := x.atomicLoadNext(i)
 10363  		for succ != nil && succ.lessthan(key) {
 10364  			x = succ
 10365  			succ = x.atomicLoadNext(i)
 10366  		}
 10367  		preds[i] = x
 10368  		succs[i] = succ
 10369  
 10370  		// Check if the key already in the skip list.
 10371  		if lFound == -1 && succ != nil && succ.equal(key) {
 10372  			lFound = i
 10373  		}
 10374  	}
 10375  	return lFound
 10376  }
 10377  
 10378  func unlockUint16(preds [maxLevel]*uint16Node, highestLevel int) {
 10379  	var prevPred *uint16Node
 10380  	for i := highestLevel; i >= 0; i-- {
 10381  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 10382  			preds[i].mu.Unlock()
 10383  			prevPred = preds[i]
 10384  		}
 10385  	}
 10386  }
 10387  
 10388  // Store sets the value for a key.
 10389  func (s *Uint16Map) Store(key uint16, value interface{}) {
 10390  	level := s.randomlevel()
 10391  	var preds, succs [maxLevel]*uint16Node
 10392  	for {
 10393  		nodeFound := s.findNode(key, &preds, &succs)
 10394  		if nodeFound != nil { // indicating the key is already in the skip-list
 10395  			if !nodeFound.flags.Get(marked) {
 10396  				// We don't need to care about whether or not the node is fully linked,
 10397  				// just replace the value.
 10398  				nodeFound.storeVal(value)
 10399  				return
 10400  			}
 10401  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 10402  			// we need to add this node in next loop.
 10403  			continue
 10404  		}
 10405  
 10406  		// Add this node into skip list.
 10407  		var (
 10408  			highestLocked        = -1 // the highest level being locked by this process
 10409  			valid                = true
 10410  			pred, succ, prevPred *uint16Node
 10411  		)
 10412  		for layer := 0; valid && layer < level; layer++ {
 10413  			pred = preds[layer]   // target node's previous node
 10414  			succ = succs[layer]   // target node's next node
 10415  			if pred != prevPred { // the node in this layer could be locked by previous loop
 10416  				pred.mu.Lock()
 10417  				highestLocked = layer
 10418  				prevPred = pred
 10419  			}
 10420  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 10421  			// It is valid if:
 10422  			// 1. The previous node and next node both are not marked.
 10423  			// 2. The previous node's next node is succ in this layer.
 10424  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 10425  		}
 10426  		if !valid {
 10427  			unlockUint16(preds, highestLocked)
 10428  			continue
 10429  		}
 10430  
 10431  		nn := newUint16Node(key, value, level)
 10432  		for layer := 0; layer < level; layer++ {
 10433  			nn.storeNext(layer, succs[layer])
 10434  			preds[layer].atomicStoreNext(layer, nn)
 10435  		}
 10436  		nn.flags.SetTrue(fullyLinked)
 10437  		unlockUint16(preds, highestLocked)
 10438  		atomic.AddInt64(&s.length, 1)
 10439  		return
 10440  	}
 10441  }
 10442  
 10443  func (s *Uint16Map) randomlevel() int {
 10444  	// Generate random level.
 10445  	level := randomLevel()
 10446  	// Update highest level if possible.
 10447  	for {
 10448  		hl := atomic.LoadInt64(&s.highestLevel)
 10449  		if int64(level) <= hl {
 10450  			break
 10451  		}
 10452  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 10453  			break
 10454  		}
 10455  	}
 10456  	return level
 10457  }
 10458  
 10459  // Load returns the value stored in the map for a key, or nil if no
 10460  // value is present.
 10461  // The ok result indicates whether value was found in the map.
 10462  func (s *Uint16Map) Load(key uint16) (value interface{}, ok bool) {
 10463  	x := s.header
 10464  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 10465  		nex := x.atomicLoadNext(i)
 10466  		for nex != nil && nex.lessthan(key) {
 10467  			x = nex
 10468  			nex = x.atomicLoadNext(i)
 10469  		}
 10470  
 10471  		// Check if the key already in the skip list.
 10472  		if nex != nil && nex.equal(key) {
 10473  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 10474  				return nex.loadVal(), true
 10475  			}
 10476  			return nil, false
 10477  		}
 10478  	}
 10479  	return nil, false
 10480  }
 10481  
 10482  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 10483  // The loaded result reports whether the key was present.
 10484  // (Modified from Delete)
 10485  func (s *Uint16Map) LoadAndDelete(key uint16) (value interface{}, loaded bool) {
 10486  	var (
 10487  		nodeToDelete *uint16Node
 10488  		isMarked     bool // represents if this operation mark the node
 10489  		topLayer     = -1
 10490  		preds, succs [maxLevel]*uint16Node
 10491  	)
 10492  	for {
 10493  		lFound := s.findNodeDelete(key, &preds, &succs)
 10494  		if isMarked || // this process mark this node or we can find this node in the skip list
 10495  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 10496  			if !isMarked { // we don't mark this node for now
 10497  				nodeToDelete = succs[lFound]
 10498  				topLayer = lFound
 10499  				nodeToDelete.mu.Lock()
 10500  				if nodeToDelete.flags.Get(marked) {
 10501  					// The node is marked by another process,
 10502  					// the physical deletion will be accomplished by another process.
 10503  					nodeToDelete.mu.Unlock()
 10504  					return nil, false
 10505  				}
 10506  				nodeToDelete.flags.SetTrue(marked)
 10507  				isMarked = true
 10508  			}
 10509  			// Accomplish the physical deletion.
 10510  			var (
 10511  				highestLocked        = -1 // the highest level being locked by this process
 10512  				valid                = true
 10513  				pred, succ, prevPred *uint16Node
 10514  			)
 10515  			for layer := 0; valid && (layer <= topLayer); layer++ {
 10516  				pred, succ = preds[layer], succs[layer]
 10517  				if pred != prevPred { // the node in this layer could be locked by previous loop
 10518  					pred.mu.Lock()
 10519  					highestLocked = layer
 10520  					prevPred = pred
 10521  				}
 10522  				// valid check if there is another node has inserted into the skip list in this layer
 10523  				// during this process, or the previous is deleted by another process.
 10524  				// It is valid if:
 10525  				// 1. the previous node exists.
 10526  				// 2. no another node has inserted into the skip list in this layer.
 10527  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 10528  			}
 10529  			if !valid {
 10530  				unlockUint16(preds, highestLocked)
 10531  				continue
 10532  			}
 10533  			for i := topLayer; i >= 0; i-- {
 10534  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 10535  				// So we don't need `nodeToDelete.loadNext`
 10536  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 10537  			}
 10538  			nodeToDelete.mu.Unlock()
 10539  			unlockUint16(preds, highestLocked)
 10540  			atomic.AddInt64(&s.length, -1)
 10541  			return nodeToDelete.loadVal(), true
 10542  		}
 10543  		return nil, false
 10544  	}
 10545  }
 10546  
 10547  // LoadOrStore returns the existing value for the key if present.
 10548  // Otherwise, it stores and returns the given value.
 10549  // The loaded result is true if the value was loaded, false if stored.
 10550  // (Modified from Store)
 10551  func (s *Uint16Map) LoadOrStore(key uint16, value interface{}) (actual interface{}, loaded bool) {
 10552  	var (
 10553  		level        int
 10554  		preds, succs [maxLevel]*uint16Node
 10555  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 10556  	)
 10557  	for {
 10558  		nodeFound := s.findNode(key, &preds, &succs)
 10559  		if nodeFound != nil { // indicating the key is already in the skip-list
 10560  			if !nodeFound.flags.Get(marked) {
 10561  				// We don't need to care about whether or not the node is fully linked,
 10562  				// just return the value.
 10563  				return nodeFound.loadVal(), true
 10564  			}
 10565  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 10566  			// we need to add this node in next loop.
 10567  			continue
 10568  		}
 10569  
 10570  		// Add this node into skip list.
 10571  		var (
 10572  			highestLocked        = -1 // the highest level being locked by this process
 10573  			valid                = true
 10574  			pred, succ, prevPred *uint16Node
 10575  		)
 10576  		if level == 0 {
 10577  			level = s.randomlevel()
 10578  			if level > hl {
 10579  				// If the highest level is updated, usually means that many goroutines
 10580  				// are inserting items. Hopefully we can find a better path in next loop.
 10581  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 10582  				// but this strategy's performance is almost the same as the existing method.
 10583  				continue
 10584  			}
 10585  		}
 10586  		for layer := 0; valid && layer < level; layer++ {
 10587  			pred = preds[layer]   // target node's previous node
 10588  			succ = succs[layer]   // target node's next node
 10589  			if pred != prevPred { // the node in this layer could be locked by previous loop
 10590  				pred.mu.Lock()
 10591  				highestLocked = layer
 10592  				prevPred = pred
 10593  			}
 10594  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 10595  			// It is valid if:
 10596  			// 1. The previous node and next node both are not marked.
 10597  			// 2. The previous node's next node is succ in this layer.
 10598  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 10599  		}
 10600  		if !valid {
 10601  			unlockUint16(preds, highestLocked)
 10602  			continue
 10603  		}
 10604  
 10605  		nn := newUint16Node(key, value, level)
 10606  		for layer := 0; layer < level; layer++ {
 10607  			nn.storeNext(layer, succs[layer])
 10608  			preds[layer].atomicStoreNext(layer, nn)
 10609  		}
 10610  		nn.flags.SetTrue(fullyLinked)
 10611  		unlockUint16(preds, highestLocked)
 10612  		atomic.AddInt64(&s.length, 1)
 10613  		return value, false
 10614  	}
 10615  }
 10616  
 10617  // LoadOrStoreLazy returns the existing value for the key if present.
 10618  // Otherwise, it stores and returns the given value from f, f will only be called once.
 10619  // The loaded result is true if the value was loaded, false if stored.
 10620  // (Modified from LoadOrStore)
 10621  func (s *Uint16Map) LoadOrStoreLazy(key uint16, f func() interface{}) (actual interface{}, loaded bool) {
 10622  	var (
 10623  		level        int
 10624  		preds, succs [maxLevel]*uint16Node
 10625  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 10626  	)
 10627  	for {
 10628  		nodeFound := s.findNode(key, &preds, &succs)
 10629  		if nodeFound != nil { // indicating the key is already in the skip-list
 10630  			if !nodeFound.flags.Get(marked) {
 10631  				// We don't need to care about whether or not the node is fully linked,
 10632  				// just return the value.
 10633  				return nodeFound.loadVal(), true
 10634  			}
 10635  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 10636  			// we need to add this node in next loop.
 10637  			continue
 10638  		}
 10639  
 10640  		// Add this node into skip list.
 10641  		var (
 10642  			highestLocked        = -1 // the highest level being locked by this process
 10643  			valid                = true
 10644  			pred, succ, prevPred *uint16Node
 10645  		)
 10646  		if level == 0 {
 10647  			level = s.randomlevel()
 10648  			if level > hl {
 10649  				// If the highest level is updated, usually means that many goroutines
 10650  				// are inserting items. Hopefully we can find a better path in next loop.
 10651  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 10652  				// but this strategy's performance is almost the same as the existing method.
 10653  				continue
 10654  			}
 10655  		}
 10656  		for layer := 0; valid && layer < level; layer++ {
 10657  			pred = preds[layer]   // target node's previous node
 10658  			succ = succs[layer]   // target node's next node
 10659  			if pred != prevPred { // the node in this layer could be locked by previous loop
 10660  				pred.mu.Lock()
 10661  				highestLocked = layer
 10662  				prevPred = pred
 10663  			}
 10664  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 10665  			// It is valid if:
 10666  			// 1. The previous node and next node both are not marked.
 10667  			// 2. The previous node's next node is succ in this layer.
 10668  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 10669  		}
 10670  		if !valid {
 10671  			unlockUint16(preds, highestLocked)
 10672  			continue
 10673  		}
 10674  		value := f()
 10675  		nn := newUint16Node(key, value, level)
 10676  		for layer := 0; layer < level; layer++ {
 10677  			nn.storeNext(layer, succs[layer])
 10678  			preds[layer].atomicStoreNext(layer, nn)
 10679  		}
 10680  		nn.flags.SetTrue(fullyLinked)
 10681  		unlockUint16(preds, highestLocked)
 10682  		atomic.AddInt64(&s.length, 1)
 10683  		return value, false
 10684  	}
 10685  }
 10686  
 10687  // Delete deletes the value for a key.
 10688  func (s *Uint16Map) Delete(key uint16) bool {
 10689  	var (
 10690  		nodeToDelete *uint16Node
 10691  		isMarked     bool // represents if this operation mark the node
 10692  		topLayer     = -1
 10693  		preds, succs [maxLevel]*uint16Node
 10694  	)
 10695  	for {
 10696  		lFound := s.findNodeDelete(key, &preds, &succs)
 10697  		if isMarked || // this process mark this node or we can find this node in the skip list
 10698  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 10699  			if !isMarked { // we don't mark this node for now
 10700  				nodeToDelete = succs[lFound]
 10701  				topLayer = lFound
 10702  				nodeToDelete.mu.Lock()
 10703  				if nodeToDelete.flags.Get(marked) {
 10704  					// The node is marked by another process,
 10705  					// the physical deletion will be accomplished by another process.
 10706  					nodeToDelete.mu.Unlock()
 10707  					return false
 10708  				}
 10709  				nodeToDelete.flags.SetTrue(marked)
 10710  				isMarked = true
 10711  			}
 10712  			// Accomplish the physical deletion.
 10713  			var (
 10714  				highestLocked        = -1 // the highest level being locked by this process
 10715  				valid                = true
 10716  				pred, succ, prevPred *uint16Node
 10717  			)
 10718  			for layer := 0; valid && (layer <= topLayer); layer++ {
 10719  				pred, succ = preds[layer], succs[layer]
 10720  				if pred != prevPred { // the node in this layer could be locked by previous loop
 10721  					pred.mu.Lock()
 10722  					highestLocked = layer
 10723  					prevPred = pred
 10724  				}
 10725  				// valid check if there is another node has inserted into the skip list in this layer
 10726  				// during this process, or the previous is deleted by another process.
 10727  				// It is valid if:
 10728  				// 1. the previous node exists.
 10729  				// 2. no another node has inserted into the skip list in this layer.
 10730  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 10731  			}
 10732  			if !valid {
 10733  				unlockUint16(preds, highestLocked)
 10734  				continue
 10735  			}
 10736  			for i := topLayer; i >= 0; i-- {
 10737  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 10738  				// So we don't need `nodeToDelete.loadNext`
 10739  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 10740  			}
 10741  			nodeToDelete.mu.Unlock()
 10742  			unlockUint16(preds, highestLocked)
 10743  			atomic.AddInt64(&s.length, -1)
 10744  			return true
 10745  		}
 10746  		return false
 10747  	}
 10748  }
 10749  
 10750  // Range calls f sequentially for each key and value present in the skipmap.
 10751  // If f returns false, range stops the iteration.
 10752  //
 10753  // Range does not necessarily correspond to any consistent snapshot of the Map's
 10754  // contents: no key will be visited more than once, but if the value for any key
 10755  // is stored or deleted concurrently, Range may reflect any mapping for that key
 10756  // from any point during the Range call.
 10757  func (s *Uint16Map) Range(f func(key uint16, value interface{}) bool) {
 10758  	x := s.header.atomicLoadNext(0)
 10759  	for x != nil {
 10760  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 10761  			x = x.atomicLoadNext(0)
 10762  			continue
 10763  		}
 10764  		if !f(x.key, x.loadVal()) {
 10765  			break
 10766  		}
 10767  		x = x.atomicLoadNext(0)
 10768  	}
 10769  }
 10770  
 10771  // Len return the length of this skipmap.
 10772  func (s *Uint16Map) Len() int {
 10773  	return int(atomic.LoadInt64(&s.length))
 10774  }
 10775  
 10776  // Uint16MapDesc represents a map based on skip list in descending order.
 10777  type Uint16MapDesc struct {
 10778  	header       *uint16NodeDesc
 10779  	length       int64
 10780  	highestLevel int64 // highest level for now
 10781  }
 10782  
 10783  type uint16NodeDesc struct {
 10784  	key   uint16
 10785  	value unsafe.Pointer // *interface{}
 10786  	next  optionalArray  // [level]*uint16NodeDesc
 10787  	mu    sync.Mutex
 10788  	flags bitflag
 10789  	level uint32
 10790  }
 10791  
 10792  func newUint16NodeDesc(key uint16, value interface{}, level int) *uint16NodeDesc {
 10793  	node := &uint16NodeDesc{
 10794  		key:   key,
 10795  		level: uint32(level),
 10796  	}
 10797  	node.storeVal(value)
 10798  	if level > op1 {
 10799  		node.next.extra = new([op2]unsafe.Pointer)
 10800  	}
 10801  	return node
 10802  }
 10803  
 10804  func (n *uint16NodeDesc) storeVal(value interface{}) {
 10805  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 10806  }
 10807  
 10808  func (n *uint16NodeDesc) loadVal() interface{} {
 10809  	return *(*interface{})(atomic.LoadPointer(&n.value))
 10810  }
 10811  
 10812  func (n *uint16NodeDesc) loadNext(i int) *uint16NodeDesc {
 10813  	return (*uint16NodeDesc)(n.next.load(i))
 10814  }
 10815  
 10816  func (n *uint16NodeDesc) storeNext(i int, node *uint16NodeDesc) {
 10817  	n.next.store(i, unsafe.Pointer(node))
 10818  }
 10819  
 10820  func (n *uint16NodeDesc) atomicLoadNext(i int) *uint16NodeDesc {
 10821  	return (*uint16NodeDesc)(n.next.atomicLoad(i))
 10822  }
 10823  
 10824  func (n *uint16NodeDesc) atomicStoreNext(i int, node *uint16NodeDesc) {
 10825  	n.next.atomicStore(i, unsafe.Pointer(node))
 10826  }
 10827  
 10828  func (n *uint16NodeDesc) lessthan(key uint16) bool {
 10829  	return n.key > key
 10830  }
 10831  
 10832  func (n *uint16NodeDesc) equal(key uint16) bool {
 10833  	return n.key == key
 10834  }
 10835  
 10836  // NewUint16Desc return an empty uint16 skipmap.
 10837  func NewUint16Desc() *Uint16MapDesc {
 10838  	h := newUint16NodeDesc(0, "", maxLevel)
 10839  	h.flags.SetTrue(fullyLinked)
 10840  	return &Uint16MapDesc{
 10841  		header:       h,
 10842  		highestLevel: defaultHighestLevel,
 10843  	}
 10844  }
 10845  
 10846  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 10847  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 10848  // (without fullpath, if find the node will return immediately)
 10849  func (s *Uint16MapDesc) findNode(key uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) *uint16NodeDesc {
 10850  	x := s.header
 10851  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 10852  		succ := x.atomicLoadNext(i)
 10853  		for succ != nil && succ.lessthan(key) {
 10854  			x = succ
 10855  			succ = x.atomicLoadNext(i)
 10856  		}
 10857  		preds[i] = x
 10858  		succs[i] = succ
 10859  
 10860  		// Check if the key already in the skipmap.
 10861  		if succ != nil && succ.equal(key) {
 10862  			return succ
 10863  		}
 10864  	}
 10865  	return nil
 10866  }
 10867  
 10868  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 10869  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 10870  func (s *Uint16MapDesc) findNodeDelete(key uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int {
 10871  	// lFound represents the index of the first layer at which it found a node.
 10872  	lFound, x := -1, s.header
 10873  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 10874  		succ := x.atomicLoadNext(i)
 10875  		for succ != nil && succ.lessthan(key) {
 10876  			x = succ
 10877  			succ = x.atomicLoadNext(i)
 10878  		}
 10879  		preds[i] = x
 10880  		succs[i] = succ
 10881  
 10882  		// Check if the key already in the skip list.
 10883  		if lFound == -1 && succ != nil && succ.equal(key) {
 10884  			lFound = i
 10885  		}
 10886  	}
 10887  	return lFound
 10888  }
 10889  
 10890  func unlockUint16Desc(preds [maxLevel]*uint16NodeDesc, highestLevel int) {
 10891  	var prevPred *uint16NodeDesc
 10892  	for i := highestLevel; i >= 0; i-- {
 10893  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 10894  			preds[i].mu.Unlock()
 10895  			prevPred = preds[i]
 10896  		}
 10897  	}
 10898  }
 10899  
 10900  // Store sets the value for a key.
 10901  func (s *Uint16MapDesc) Store(key uint16, value interface{}) {
 10902  	level := s.randomlevel()
 10903  	var preds, succs [maxLevel]*uint16NodeDesc
 10904  	for {
 10905  		nodeFound := s.findNode(key, &preds, &succs)
 10906  		if nodeFound != nil { // indicating the key is already in the skip-list
 10907  			if !nodeFound.flags.Get(marked) {
 10908  				// We don't need to care about whether or not the node is fully linked,
 10909  				// just replace the value.
 10910  				nodeFound.storeVal(value)
 10911  				return
 10912  			}
 10913  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 10914  			// we need to add this node in next loop.
 10915  			continue
 10916  		}
 10917  
 10918  		// Add this node into skip list.
 10919  		var (
 10920  			highestLocked        = -1 // the highest level being locked by this process
 10921  			valid                = true
 10922  			pred, succ, prevPred *uint16NodeDesc
 10923  		)
 10924  		for layer := 0; valid && layer < level; layer++ {
 10925  			pred = preds[layer]   // target node's previous node
 10926  			succ = succs[layer]   // target node's next node
 10927  			if pred != prevPred { // the node in this layer could be locked by previous loop
 10928  				pred.mu.Lock()
 10929  				highestLocked = layer
 10930  				prevPred = pred
 10931  			}
 10932  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 10933  			// It is valid if:
 10934  			// 1. The previous node and next node both are not marked.
 10935  			// 2. The previous node's next node is succ in this layer.
 10936  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 10937  		}
 10938  		if !valid {
 10939  			unlockUint16Desc(preds, highestLocked)
 10940  			continue
 10941  		}
 10942  
 10943  		nn := newUint16NodeDesc(key, value, level)
 10944  		for layer := 0; layer < level; layer++ {
 10945  			nn.storeNext(layer, succs[layer])
 10946  			preds[layer].atomicStoreNext(layer, nn)
 10947  		}
 10948  		nn.flags.SetTrue(fullyLinked)
 10949  		unlockUint16Desc(preds, highestLocked)
 10950  		atomic.AddInt64(&s.length, 1)
 10951  		return
 10952  	}
 10953  }
 10954  
 10955  func (s *Uint16MapDesc) randomlevel() int {
 10956  	// Generate random level.
 10957  	level := randomLevel()
 10958  	// Update highest level if possible.
 10959  	for {
 10960  		hl := atomic.LoadInt64(&s.highestLevel)
 10961  		if int64(level) <= hl {
 10962  			break
 10963  		}
 10964  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 10965  			break
 10966  		}
 10967  	}
 10968  	return level
 10969  }
 10970  
 10971  // Load returns the value stored in the map for a key, or nil if no
 10972  // value is present.
 10973  // The ok result indicates whether value was found in the map.
 10974  func (s *Uint16MapDesc) Load(key uint16) (value interface{}, ok bool) {
 10975  	x := s.header
 10976  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 10977  		nex := x.atomicLoadNext(i)
 10978  		for nex != nil && nex.lessthan(key) {
 10979  			x = nex
 10980  			nex = x.atomicLoadNext(i)
 10981  		}
 10982  
 10983  		// Check if the key already in the skip list.
 10984  		if nex != nil && nex.equal(key) {
 10985  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 10986  				return nex.loadVal(), true
 10987  			}
 10988  			return nil, false
 10989  		}
 10990  	}
 10991  	return nil, false
 10992  }
 10993  
 10994  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 10995  // The loaded result reports whether the key was present.
 10996  // (Modified from Delete)
 10997  func (s *Uint16MapDesc) LoadAndDelete(key uint16) (value interface{}, loaded bool) {
 10998  	var (
 10999  		nodeToDelete *uint16NodeDesc
 11000  		isMarked     bool // represents if this operation mark the node
 11001  		topLayer     = -1
 11002  		preds, succs [maxLevel]*uint16NodeDesc
 11003  	)
 11004  	for {
 11005  		lFound := s.findNodeDelete(key, &preds, &succs)
 11006  		if isMarked || // this process mark this node or we can find this node in the skip list
 11007  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 11008  			if !isMarked { // we don't mark this node for now
 11009  				nodeToDelete = succs[lFound]
 11010  				topLayer = lFound
 11011  				nodeToDelete.mu.Lock()
 11012  				if nodeToDelete.flags.Get(marked) {
 11013  					// The node is marked by another process,
 11014  					// the physical deletion will be accomplished by another process.
 11015  					nodeToDelete.mu.Unlock()
 11016  					return nil, false
 11017  				}
 11018  				nodeToDelete.flags.SetTrue(marked)
 11019  				isMarked = true
 11020  			}
 11021  			// Accomplish the physical deletion.
 11022  			var (
 11023  				highestLocked        = -1 // the highest level being locked by this process
 11024  				valid                = true
 11025  				pred, succ, prevPred *uint16NodeDesc
 11026  			)
 11027  			for layer := 0; valid && (layer <= topLayer); layer++ {
 11028  				pred, succ = preds[layer], succs[layer]
 11029  				if pred != prevPred { // the node in this layer could be locked by previous loop
 11030  					pred.mu.Lock()
 11031  					highestLocked = layer
 11032  					prevPred = pred
 11033  				}
 11034  				// valid check if there is another node has inserted into the skip list in this layer
 11035  				// during this process, or the previous is deleted by another process.
 11036  				// It is valid if:
 11037  				// 1. the previous node exists.
 11038  				// 2. no another node has inserted into the skip list in this layer.
 11039  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 11040  			}
 11041  			if !valid {
 11042  				unlockUint16Desc(preds, highestLocked)
 11043  				continue
 11044  			}
 11045  			for i := topLayer; i >= 0; i-- {
 11046  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 11047  				// So we don't need `nodeToDelete.loadNext`
 11048  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 11049  			}
 11050  			nodeToDelete.mu.Unlock()
 11051  			unlockUint16Desc(preds, highestLocked)
 11052  			atomic.AddInt64(&s.length, -1)
 11053  			return nodeToDelete.loadVal(), true
 11054  		}
 11055  		return nil, false
 11056  	}
 11057  }
 11058  
 11059  // LoadOrStore returns the existing value for the key if present.
 11060  // Otherwise, it stores and returns the given value.
 11061  // The loaded result is true if the value was loaded, false if stored.
 11062  // (Modified from Store)
 11063  func (s *Uint16MapDesc) LoadOrStore(key uint16, value interface{}) (actual interface{}, loaded bool) {
 11064  	var (
 11065  		level        int
 11066  		preds, succs [maxLevel]*uint16NodeDesc
 11067  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 11068  	)
 11069  	for {
 11070  		nodeFound := s.findNode(key, &preds, &succs)
 11071  		if nodeFound != nil { // indicating the key is already in the skip-list
 11072  			if !nodeFound.flags.Get(marked) {
 11073  				// We don't need to care about whether or not the node is fully linked,
 11074  				// just return the value.
 11075  				return nodeFound.loadVal(), true
 11076  			}
 11077  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 11078  			// we need to add this node in next loop.
 11079  			continue
 11080  		}
 11081  
 11082  		// Add this node into skip list.
 11083  		var (
 11084  			highestLocked        = -1 // the highest level being locked by this process
 11085  			valid                = true
 11086  			pred, succ, prevPred *uint16NodeDesc
 11087  		)
 11088  		if level == 0 {
 11089  			level = s.randomlevel()
 11090  			if level > hl {
 11091  				// If the highest level is updated, usually means that many goroutines
 11092  				// are inserting items. Hopefully we can find a better path in next loop.
 11093  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 11094  				// but this strategy's performance is almost the same as the existing method.
 11095  				continue
 11096  			}
 11097  		}
 11098  		for layer := 0; valid && layer < level; layer++ {
 11099  			pred = preds[layer]   // target node's previous node
 11100  			succ = succs[layer]   // target node's next node
 11101  			if pred != prevPred { // the node in this layer could be locked by previous loop
 11102  				pred.mu.Lock()
 11103  				highestLocked = layer
 11104  				prevPred = pred
 11105  			}
 11106  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 11107  			// It is valid if:
 11108  			// 1. The previous node and next node both are not marked.
 11109  			// 2. The previous node's next node is succ in this layer.
 11110  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 11111  		}
 11112  		if !valid {
 11113  			unlockUint16Desc(preds, highestLocked)
 11114  			continue
 11115  		}
 11116  
 11117  		nn := newUint16NodeDesc(key, value, level)
 11118  		for layer := 0; layer < level; layer++ {
 11119  			nn.storeNext(layer, succs[layer])
 11120  			preds[layer].atomicStoreNext(layer, nn)
 11121  		}
 11122  		nn.flags.SetTrue(fullyLinked)
 11123  		unlockUint16Desc(preds, highestLocked)
 11124  		atomic.AddInt64(&s.length, 1)
 11125  		return value, false
 11126  	}
 11127  }
 11128  
 11129  // LoadOrStoreLazy returns the existing value for the key if present.
 11130  // Otherwise, it stores and returns the given value from f, f will only be called once.
 11131  // The loaded result is true if the value was loaded, false if stored.
 11132  // (Modified from LoadOrStore)
 11133  func (s *Uint16MapDesc) LoadOrStoreLazy(key uint16, f func() interface{}) (actual interface{}, loaded bool) {
 11134  	var (
 11135  		level        int
 11136  		preds, succs [maxLevel]*uint16NodeDesc
 11137  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 11138  	)
 11139  	for {
 11140  		nodeFound := s.findNode(key, &preds, &succs)
 11141  		if nodeFound != nil { // indicating the key is already in the skip-list
 11142  			if !nodeFound.flags.Get(marked) {
 11143  				// We don't need to care about whether or not the node is fully linked,
 11144  				// just return the value.
 11145  				return nodeFound.loadVal(), true
 11146  			}
 11147  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 11148  			// we need to add this node in next loop.
 11149  			continue
 11150  		}
 11151  
 11152  		// Add this node into skip list.
 11153  		var (
 11154  			highestLocked        = -1 // the highest level being locked by this process
 11155  			valid                = true
 11156  			pred, succ, prevPred *uint16NodeDesc
 11157  		)
 11158  		if level == 0 {
 11159  			level = s.randomlevel()
 11160  			if level > hl {
 11161  				// If the highest level is updated, usually means that many goroutines
 11162  				// are inserting items. Hopefully we can find a better path in next loop.
 11163  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 11164  				// but this strategy's performance is almost the same as the existing method.
 11165  				continue
 11166  			}
 11167  		}
 11168  		for layer := 0; valid && layer < level; layer++ {
 11169  			pred = preds[layer]   // target node's previous node
 11170  			succ = succs[layer]   // target node's next node
 11171  			if pred != prevPred { // the node in this layer could be locked by previous loop
 11172  				pred.mu.Lock()
 11173  				highestLocked = layer
 11174  				prevPred = pred
 11175  			}
 11176  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 11177  			// It is valid if:
 11178  			// 1. The previous node and next node both are not marked.
 11179  			// 2. The previous node's next node is succ in this layer.
 11180  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 11181  		}
 11182  		if !valid {
 11183  			unlockUint16Desc(preds, highestLocked)
 11184  			continue
 11185  		}
 11186  		value := f()
 11187  		nn := newUint16NodeDesc(key, value, level)
 11188  		for layer := 0; layer < level; layer++ {
 11189  			nn.storeNext(layer, succs[layer])
 11190  			preds[layer].atomicStoreNext(layer, nn)
 11191  		}
 11192  		nn.flags.SetTrue(fullyLinked)
 11193  		unlockUint16Desc(preds, highestLocked)
 11194  		atomic.AddInt64(&s.length, 1)
 11195  		return value, false
 11196  	}
 11197  }
 11198  
 11199  // Delete deletes the value for a key.
 11200  func (s *Uint16MapDesc) Delete(key uint16) bool {
 11201  	var (
 11202  		nodeToDelete *uint16NodeDesc
 11203  		isMarked     bool // represents if this operation mark the node
 11204  		topLayer     = -1
 11205  		preds, succs [maxLevel]*uint16NodeDesc
 11206  	)
 11207  	for {
 11208  		lFound := s.findNodeDelete(key, &preds, &succs)
 11209  		if isMarked || // this process mark this node or we can find this node in the skip list
 11210  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 11211  			if !isMarked { // we don't mark this node for now
 11212  				nodeToDelete = succs[lFound]
 11213  				topLayer = lFound
 11214  				nodeToDelete.mu.Lock()
 11215  				if nodeToDelete.flags.Get(marked) {
 11216  					// The node is marked by another process,
 11217  					// the physical deletion will be accomplished by another process.
 11218  					nodeToDelete.mu.Unlock()
 11219  					return false
 11220  				}
 11221  				nodeToDelete.flags.SetTrue(marked)
 11222  				isMarked = true
 11223  			}
 11224  			// Accomplish the physical deletion.
 11225  			var (
 11226  				highestLocked        = -1 // the highest level being locked by this process
 11227  				valid                = true
 11228  				pred, succ, prevPred *uint16NodeDesc
 11229  			)
 11230  			for layer := 0; valid && (layer <= topLayer); layer++ {
 11231  				pred, succ = preds[layer], succs[layer]
 11232  				if pred != prevPred { // the node in this layer could be locked by previous loop
 11233  					pred.mu.Lock()
 11234  					highestLocked = layer
 11235  					prevPred = pred
 11236  				}
 11237  				// valid check if there is another node has inserted into the skip list in this layer
 11238  				// during this process, or the previous is deleted by another process.
 11239  				// It is valid if:
 11240  				// 1. the previous node exists.
 11241  				// 2. no another node has inserted into the skip list in this layer.
 11242  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 11243  			}
 11244  			if !valid {
 11245  				unlockUint16Desc(preds, highestLocked)
 11246  				continue
 11247  			}
 11248  			for i := topLayer; i >= 0; i-- {
 11249  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 11250  				// So we don't need `nodeToDelete.loadNext`
 11251  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 11252  			}
 11253  			nodeToDelete.mu.Unlock()
 11254  			unlockUint16Desc(preds, highestLocked)
 11255  			atomic.AddInt64(&s.length, -1)
 11256  			return true
 11257  		}
 11258  		return false
 11259  	}
 11260  }
 11261  
 11262  // Range calls f sequentially for each key and value present in the skipmap.
 11263  // If f returns false, range stops the iteration.
 11264  //
 11265  // Range does not necessarily correspond to any consistent snapshot of the Map's
 11266  // contents: no key will be visited more than once, but if the value for any key
 11267  // is stored or deleted concurrently, Range may reflect any mapping for that key
 11268  // from any point during the Range call.
 11269  func (s *Uint16MapDesc) Range(f func(key uint16, value interface{}) bool) {
 11270  	x := s.header.atomicLoadNext(0)
 11271  	for x != nil {
 11272  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 11273  			x = x.atomicLoadNext(0)
 11274  			continue
 11275  		}
 11276  		if !f(x.key, x.loadVal()) {
 11277  			break
 11278  		}
 11279  		x = x.atomicLoadNext(0)
 11280  	}
 11281  }
 11282  
 11283  // Len return the length of this skipmap.
 11284  func (s *Uint16MapDesc) Len() int {
 11285  	return int(atomic.LoadInt64(&s.length))
 11286  }
 11287  
 11288  // Uint32Map represents a map based on skip list in ascending order.
 11289  type Uint32Map struct {
 11290  	header       *uint32Node
 11291  	length       int64
 11292  	highestLevel int64 // highest level for now
 11293  }
 11294  
 11295  type uint32Node struct {
 11296  	key   uint32
 11297  	value unsafe.Pointer // *interface{}
 11298  	next  optionalArray  // [level]*uint32Node
 11299  	mu    sync.Mutex
 11300  	flags bitflag
 11301  	level uint32
 11302  }
 11303  
 11304  func newUint32Node(key uint32, value interface{}, level int) *uint32Node {
 11305  	node := &uint32Node{
 11306  		key:   key,
 11307  		level: uint32(level),
 11308  	}
 11309  	node.storeVal(value)
 11310  	if level > op1 {
 11311  		node.next.extra = new([op2]unsafe.Pointer)
 11312  	}
 11313  	return node
 11314  }
 11315  
 11316  func (n *uint32Node) storeVal(value interface{}) {
 11317  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 11318  }
 11319  
 11320  func (n *uint32Node) loadVal() interface{} {
 11321  	return *(*interface{})(atomic.LoadPointer(&n.value))
 11322  }
 11323  
 11324  func (n *uint32Node) loadNext(i int) *uint32Node {
 11325  	return (*uint32Node)(n.next.load(i))
 11326  }
 11327  
 11328  func (n *uint32Node) storeNext(i int, node *uint32Node) {
 11329  	n.next.store(i, unsafe.Pointer(node))
 11330  }
 11331  
 11332  func (n *uint32Node) atomicLoadNext(i int) *uint32Node {
 11333  	return (*uint32Node)(n.next.atomicLoad(i))
 11334  }
 11335  
 11336  func (n *uint32Node) atomicStoreNext(i int, node *uint32Node) {
 11337  	n.next.atomicStore(i, unsafe.Pointer(node))
 11338  }
 11339  
 11340  func (n *uint32Node) lessthan(key uint32) bool {
 11341  	return n.key < key
 11342  }
 11343  
 11344  func (n *uint32Node) equal(key uint32) bool {
 11345  	return n.key == key
 11346  }
 11347  
 11348  // NewUint32 return an empty uint32 skipmap.
 11349  func NewUint32() *Uint32Map {
 11350  	h := newUint32Node(0, "", maxLevel)
 11351  	h.flags.SetTrue(fullyLinked)
 11352  	return &Uint32Map{
 11353  		header:       h,
 11354  		highestLevel: defaultHighestLevel,
 11355  	}
 11356  }
 11357  
 11358  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 11359  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 11360  // (without fullpath, if find the node will return immediately)
 11361  func (s *Uint32Map) findNode(key uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) *uint32Node {
 11362  	x := s.header
 11363  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 11364  		succ := x.atomicLoadNext(i)
 11365  		for succ != nil && succ.lessthan(key) {
 11366  			x = succ
 11367  			succ = x.atomicLoadNext(i)
 11368  		}
 11369  		preds[i] = x
 11370  		succs[i] = succ
 11371  
 11372  		// Check if the key already in the skipmap.
 11373  		if succ != nil && succ.equal(key) {
 11374  			return succ
 11375  		}
 11376  	}
 11377  	return nil
 11378  }
 11379  
 11380  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 11381  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 11382  func (s *Uint32Map) findNodeDelete(key uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int {
 11383  	// lFound represents the index of the first layer at which it found a node.
 11384  	lFound, x := -1, s.header
 11385  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 11386  		succ := x.atomicLoadNext(i)
 11387  		for succ != nil && succ.lessthan(key) {
 11388  			x = succ
 11389  			succ = x.atomicLoadNext(i)
 11390  		}
 11391  		preds[i] = x
 11392  		succs[i] = succ
 11393  
 11394  		// Check if the key already in the skip list.
 11395  		if lFound == -1 && succ != nil && succ.equal(key) {
 11396  			lFound = i
 11397  		}
 11398  	}
 11399  	return lFound
 11400  }
 11401  
 11402  func unlockUint32(preds [maxLevel]*uint32Node, highestLevel int) {
 11403  	var prevPred *uint32Node
 11404  	for i := highestLevel; i >= 0; i-- {
 11405  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 11406  			preds[i].mu.Unlock()
 11407  			prevPred = preds[i]
 11408  		}
 11409  	}
 11410  }
 11411  
 11412  // Store sets the value for a key.
 11413  func (s *Uint32Map) Store(key uint32, value interface{}) {
 11414  	level := s.randomlevel()
 11415  	var preds, succs [maxLevel]*uint32Node
 11416  	for {
 11417  		nodeFound := s.findNode(key, &preds, &succs)
 11418  		if nodeFound != nil { // indicating the key is already in the skip-list
 11419  			if !nodeFound.flags.Get(marked) {
 11420  				// We don't need to care about whether or not the node is fully linked,
 11421  				// just replace the value.
 11422  				nodeFound.storeVal(value)
 11423  				return
 11424  			}
 11425  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 11426  			// we need to add this node in next loop.
 11427  			continue
 11428  		}
 11429  
 11430  		// Add this node into skip list.
 11431  		var (
 11432  			highestLocked        = -1 // the highest level being locked by this process
 11433  			valid                = true
 11434  			pred, succ, prevPred *uint32Node
 11435  		)
 11436  		for layer := 0; valid && layer < level; layer++ {
 11437  			pred = preds[layer]   // target node's previous node
 11438  			succ = succs[layer]   // target node's next node
 11439  			if pred != prevPred { // the node in this layer could be locked by previous loop
 11440  				pred.mu.Lock()
 11441  				highestLocked = layer
 11442  				prevPred = pred
 11443  			}
 11444  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 11445  			// It is valid if:
 11446  			// 1. The previous node and next node both are not marked.
 11447  			// 2. The previous node's next node is succ in this layer.
 11448  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 11449  		}
 11450  		if !valid {
 11451  			unlockUint32(preds, highestLocked)
 11452  			continue
 11453  		}
 11454  
 11455  		nn := newUint32Node(key, value, level)
 11456  		for layer := 0; layer < level; layer++ {
 11457  			nn.storeNext(layer, succs[layer])
 11458  			preds[layer].atomicStoreNext(layer, nn)
 11459  		}
 11460  		nn.flags.SetTrue(fullyLinked)
 11461  		unlockUint32(preds, highestLocked)
 11462  		atomic.AddInt64(&s.length, 1)
 11463  		return
 11464  	}
 11465  }
 11466  
 11467  func (s *Uint32Map) randomlevel() int {
 11468  	// Generate random level.
 11469  	level := randomLevel()
 11470  	// Update highest level if possible.
 11471  	for {
 11472  		hl := atomic.LoadInt64(&s.highestLevel)
 11473  		if int64(level) <= hl {
 11474  			break
 11475  		}
 11476  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 11477  			break
 11478  		}
 11479  	}
 11480  	return level
 11481  }
 11482  
 11483  // Load returns the value stored in the map for a key, or nil if no
 11484  // value is present.
 11485  // The ok result indicates whether value was found in the map.
 11486  func (s *Uint32Map) Load(key uint32) (value interface{}, ok bool) {
 11487  	x := s.header
 11488  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 11489  		nex := x.atomicLoadNext(i)
 11490  		for nex != nil && nex.lessthan(key) {
 11491  			x = nex
 11492  			nex = x.atomicLoadNext(i)
 11493  		}
 11494  
 11495  		// Check if the key already in the skip list.
 11496  		if nex != nil && nex.equal(key) {
 11497  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 11498  				return nex.loadVal(), true
 11499  			}
 11500  			return nil, false
 11501  		}
 11502  	}
 11503  	return nil, false
 11504  }
 11505  
 11506  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 11507  // The loaded result reports whether the key was present.
 11508  // (Modified from Delete)
 11509  func (s *Uint32Map) LoadAndDelete(key uint32) (value interface{}, loaded bool) {
 11510  	var (
 11511  		nodeToDelete *uint32Node
 11512  		isMarked     bool // represents if this operation mark the node
 11513  		topLayer     = -1
 11514  		preds, succs [maxLevel]*uint32Node
 11515  	)
 11516  	for {
 11517  		lFound := s.findNodeDelete(key, &preds, &succs)
 11518  		if isMarked || // this process mark this node or we can find this node in the skip list
 11519  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 11520  			if !isMarked { // we don't mark this node for now
 11521  				nodeToDelete = succs[lFound]
 11522  				topLayer = lFound
 11523  				nodeToDelete.mu.Lock()
 11524  				if nodeToDelete.flags.Get(marked) {
 11525  					// The node is marked by another process,
 11526  					// the physical deletion will be accomplished by another process.
 11527  					nodeToDelete.mu.Unlock()
 11528  					return nil, false
 11529  				}
 11530  				nodeToDelete.flags.SetTrue(marked)
 11531  				isMarked = true
 11532  			}
 11533  			// Accomplish the physical deletion.
 11534  			var (
 11535  				highestLocked        = -1 // the highest level being locked by this process
 11536  				valid                = true
 11537  				pred, succ, prevPred *uint32Node
 11538  			)
 11539  			for layer := 0; valid && (layer <= topLayer); layer++ {
 11540  				pred, succ = preds[layer], succs[layer]
 11541  				if pred != prevPred { // the node in this layer could be locked by previous loop
 11542  					pred.mu.Lock()
 11543  					highestLocked = layer
 11544  					prevPred = pred
 11545  				}
 11546  				// valid check if there is another node has inserted into the skip list in this layer
 11547  				// during this process, or the previous is deleted by another process.
 11548  				// It is valid if:
 11549  				// 1. the previous node exists.
 11550  				// 2. no another node has inserted into the skip list in this layer.
 11551  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 11552  			}
 11553  			if !valid {
 11554  				unlockUint32(preds, highestLocked)
 11555  				continue
 11556  			}
 11557  			for i := topLayer; i >= 0; i-- {
 11558  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 11559  				// So we don't need `nodeToDelete.loadNext`
 11560  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 11561  			}
 11562  			nodeToDelete.mu.Unlock()
 11563  			unlockUint32(preds, highestLocked)
 11564  			atomic.AddInt64(&s.length, -1)
 11565  			return nodeToDelete.loadVal(), true
 11566  		}
 11567  		return nil, false
 11568  	}
 11569  }
 11570  
 11571  // LoadOrStore returns the existing value for the key if present.
 11572  // Otherwise, it stores and returns the given value.
 11573  // The loaded result is true if the value was loaded, false if stored.
 11574  // (Modified from Store)
 11575  func (s *Uint32Map) LoadOrStore(key uint32, value interface{}) (actual interface{}, loaded bool) {
 11576  	var (
 11577  		level        int
 11578  		preds, succs [maxLevel]*uint32Node
 11579  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 11580  	)
 11581  	for {
 11582  		nodeFound := s.findNode(key, &preds, &succs)
 11583  		if nodeFound != nil { // indicating the key is already in the skip-list
 11584  			if !nodeFound.flags.Get(marked) {
 11585  				// We don't need to care about whether or not the node is fully linked,
 11586  				// just return the value.
 11587  				return nodeFound.loadVal(), true
 11588  			}
 11589  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 11590  			// we need to add this node in next loop.
 11591  			continue
 11592  		}
 11593  
 11594  		// Add this node into skip list.
 11595  		var (
 11596  			highestLocked        = -1 // the highest level being locked by this process
 11597  			valid                = true
 11598  			pred, succ, prevPred *uint32Node
 11599  		)
 11600  		if level == 0 {
 11601  			level = s.randomlevel()
 11602  			if level > hl {
 11603  				// If the highest level is updated, usually means that many goroutines
 11604  				// are inserting items. Hopefully we can find a better path in next loop.
 11605  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 11606  				// but this strategy's performance is almost the same as the existing method.
 11607  				continue
 11608  			}
 11609  		}
 11610  		for layer := 0; valid && layer < level; layer++ {
 11611  			pred = preds[layer]   // target node's previous node
 11612  			succ = succs[layer]   // target node's next node
 11613  			if pred != prevPred { // the node in this layer could be locked by previous loop
 11614  				pred.mu.Lock()
 11615  				highestLocked = layer
 11616  				prevPred = pred
 11617  			}
 11618  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 11619  			// It is valid if:
 11620  			// 1. The previous node and next node both are not marked.
 11621  			// 2. The previous node's next node is succ in this layer.
 11622  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 11623  		}
 11624  		if !valid {
 11625  			unlockUint32(preds, highestLocked)
 11626  			continue
 11627  		}
 11628  
 11629  		nn := newUint32Node(key, value, level)
 11630  		for layer := 0; layer < level; layer++ {
 11631  			nn.storeNext(layer, succs[layer])
 11632  			preds[layer].atomicStoreNext(layer, nn)
 11633  		}
 11634  		nn.flags.SetTrue(fullyLinked)
 11635  		unlockUint32(preds, highestLocked)
 11636  		atomic.AddInt64(&s.length, 1)
 11637  		return value, false
 11638  	}
 11639  }
 11640  
 11641  // LoadOrStoreLazy returns the existing value for the key if present.
 11642  // Otherwise, it stores and returns the given value from f, f will only be called once.
 11643  // The loaded result is true if the value was loaded, false if stored.
 11644  // (Modified from LoadOrStore)
 11645  func (s *Uint32Map) LoadOrStoreLazy(key uint32, f func() interface{}) (actual interface{}, loaded bool) {
 11646  	var (
 11647  		level        int
 11648  		preds, succs [maxLevel]*uint32Node
 11649  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 11650  	)
 11651  	for {
 11652  		nodeFound := s.findNode(key, &preds, &succs)
 11653  		if nodeFound != nil { // indicating the key is already in the skip-list
 11654  			if !nodeFound.flags.Get(marked) {
 11655  				// We don't need to care about whether or not the node is fully linked,
 11656  				// just return the value.
 11657  				return nodeFound.loadVal(), true
 11658  			}
 11659  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 11660  			// we need to add this node in next loop.
 11661  			continue
 11662  		}
 11663  
 11664  		// Add this node into skip list.
 11665  		var (
 11666  			highestLocked        = -1 // the highest level being locked by this process
 11667  			valid                = true
 11668  			pred, succ, prevPred *uint32Node
 11669  		)
 11670  		if level == 0 {
 11671  			level = s.randomlevel()
 11672  			if level > hl {
 11673  				// If the highest level is updated, usually means that many goroutines
 11674  				// are inserting items. Hopefully we can find a better path in next loop.
 11675  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 11676  				// but this strategy's performance is almost the same as the existing method.
 11677  				continue
 11678  			}
 11679  		}
 11680  		for layer := 0; valid && layer < level; layer++ {
 11681  			pred = preds[layer]   // target node's previous node
 11682  			succ = succs[layer]   // target node's next node
 11683  			if pred != prevPred { // the node in this layer could be locked by previous loop
 11684  				pred.mu.Lock()
 11685  				highestLocked = layer
 11686  				prevPred = pred
 11687  			}
 11688  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 11689  			// It is valid if:
 11690  			// 1. The previous node and next node both are not marked.
 11691  			// 2. The previous node's next node is succ in this layer.
 11692  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 11693  		}
 11694  		if !valid {
 11695  			unlockUint32(preds, highestLocked)
 11696  			continue
 11697  		}
 11698  		value := f()
 11699  		nn := newUint32Node(key, value, level)
 11700  		for layer := 0; layer < level; layer++ {
 11701  			nn.storeNext(layer, succs[layer])
 11702  			preds[layer].atomicStoreNext(layer, nn)
 11703  		}
 11704  		nn.flags.SetTrue(fullyLinked)
 11705  		unlockUint32(preds, highestLocked)
 11706  		atomic.AddInt64(&s.length, 1)
 11707  		return value, false
 11708  	}
 11709  }
 11710  
 11711  // Delete deletes the value for a key.
 11712  func (s *Uint32Map) Delete(key uint32) bool {
 11713  	var (
 11714  		nodeToDelete *uint32Node
 11715  		isMarked     bool // represents if this operation mark the node
 11716  		topLayer     = -1
 11717  		preds, succs [maxLevel]*uint32Node
 11718  	)
 11719  	for {
 11720  		lFound := s.findNodeDelete(key, &preds, &succs)
 11721  		if isMarked || // this process mark this node or we can find this node in the skip list
 11722  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 11723  			if !isMarked { // we don't mark this node for now
 11724  				nodeToDelete = succs[lFound]
 11725  				topLayer = lFound
 11726  				nodeToDelete.mu.Lock()
 11727  				if nodeToDelete.flags.Get(marked) {
 11728  					// The node is marked by another process,
 11729  					// the physical deletion will be accomplished by another process.
 11730  					nodeToDelete.mu.Unlock()
 11731  					return false
 11732  				}
 11733  				nodeToDelete.flags.SetTrue(marked)
 11734  				isMarked = true
 11735  			}
 11736  			// Accomplish the physical deletion.
 11737  			var (
 11738  				highestLocked        = -1 // the highest level being locked by this process
 11739  				valid                = true
 11740  				pred, succ, prevPred *uint32Node
 11741  			)
 11742  			for layer := 0; valid && (layer <= topLayer); layer++ {
 11743  				pred, succ = preds[layer], succs[layer]
 11744  				if pred != prevPred { // the node in this layer could be locked by previous loop
 11745  					pred.mu.Lock()
 11746  					highestLocked = layer
 11747  					prevPred = pred
 11748  				}
 11749  				// valid check if there is another node has inserted into the skip list in this layer
 11750  				// during this process, or the previous is deleted by another process.
 11751  				// It is valid if:
 11752  				// 1. the previous node exists.
 11753  				// 2. no another node has inserted into the skip list in this layer.
 11754  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 11755  			}
 11756  			if !valid {
 11757  				unlockUint32(preds, highestLocked)
 11758  				continue
 11759  			}
 11760  			for i := topLayer; i >= 0; i-- {
 11761  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 11762  				// So we don't need `nodeToDelete.loadNext`
 11763  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 11764  			}
 11765  			nodeToDelete.mu.Unlock()
 11766  			unlockUint32(preds, highestLocked)
 11767  			atomic.AddInt64(&s.length, -1)
 11768  			return true
 11769  		}
 11770  		return false
 11771  	}
 11772  }
 11773  
 11774  // Range calls f sequentially for each key and value present in the skipmap.
 11775  // If f returns false, range stops the iteration.
 11776  //
 11777  // Range does not necessarily correspond to any consistent snapshot of the Map's
 11778  // contents: no key will be visited more than once, but if the value for any key
 11779  // is stored or deleted concurrently, Range may reflect any mapping for that key
 11780  // from any point during the Range call.
 11781  func (s *Uint32Map) Range(f func(key uint32, value interface{}) bool) {
 11782  	x := s.header.atomicLoadNext(0)
 11783  	for x != nil {
 11784  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 11785  			x = x.atomicLoadNext(0)
 11786  			continue
 11787  		}
 11788  		if !f(x.key, x.loadVal()) {
 11789  			break
 11790  		}
 11791  		x = x.atomicLoadNext(0)
 11792  	}
 11793  }
 11794  
 11795  // Len return the length of this skipmap.
 11796  func (s *Uint32Map) Len() int {
 11797  	return int(atomic.LoadInt64(&s.length))
 11798  }
 11799  
 11800  // Uint32MapDesc represents a map based on skip list in descending order.
 11801  type Uint32MapDesc struct {
 11802  	header       *uint32NodeDesc
 11803  	length       int64
 11804  	highestLevel int64 // highest level for now
 11805  }
 11806  
 11807  type uint32NodeDesc struct {
 11808  	key   uint32
 11809  	value unsafe.Pointer // *interface{}
 11810  	next  optionalArray  // [level]*uint32NodeDesc
 11811  	mu    sync.Mutex
 11812  	flags bitflag
 11813  	level uint32
 11814  }
 11815  
 11816  func newUint32NodeDesc(key uint32, value interface{}, level int) *uint32NodeDesc {
 11817  	node := &uint32NodeDesc{
 11818  		key:   key,
 11819  		level: uint32(level),
 11820  	}
 11821  	node.storeVal(value)
 11822  	if level > op1 {
 11823  		node.next.extra = new([op2]unsafe.Pointer)
 11824  	}
 11825  	return node
 11826  }
 11827  
 11828  func (n *uint32NodeDesc) storeVal(value interface{}) {
 11829  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 11830  }
 11831  
 11832  func (n *uint32NodeDesc) loadVal() interface{} {
 11833  	return *(*interface{})(atomic.LoadPointer(&n.value))
 11834  }
 11835  
 11836  func (n *uint32NodeDesc) loadNext(i int) *uint32NodeDesc {
 11837  	return (*uint32NodeDesc)(n.next.load(i))
 11838  }
 11839  
 11840  func (n *uint32NodeDesc) storeNext(i int, node *uint32NodeDesc) {
 11841  	n.next.store(i, unsafe.Pointer(node))
 11842  }
 11843  
 11844  func (n *uint32NodeDesc) atomicLoadNext(i int) *uint32NodeDesc {
 11845  	return (*uint32NodeDesc)(n.next.atomicLoad(i))
 11846  }
 11847  
 11848  func (n *uint32NodeDesc) atomicStoreNext(i int, node *uint32NodeDesc) {
 11849  	n.next.atomicStore(i, unsafe.Pointer(node))
 11850  }
 11851  
 11852  func (n *uint32NodeDesc) lessthan(key uint32) bool {
 11853  	return n.key > key
 11854  }
 11855  
 11856  func (n *uint32NodeDesc) equal(key uint32) bool {
 11857  	return n.key == key
 11858  }
 11859  
 11860  // NewUint32Desc return an empty uint32 skipmap.
 11861  func NewUint32Desc() *Uint32MapDesc {
 11862  	h := newUint32NodeDesc(0, "", maxLevel)
 11863  	h.flags.SetTrue(fullyLinked)
 11864  	return &Uint32MapDesc{
 11865  		header:       h,
 11866  		highestLevel: defaultHighestLevel,
 11867  	}
 11868  }
 11869  
 11870  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 11871  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 11872  // (without fullpath, if find the node will return immediately)
 11873  func (s *Uint32MapDesc) findNode(key uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) *uint32NodeDesc {
 11874  	x := s.header
 11875  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 11876  		succ := x.atomicLoadNext(i)
 11877  		for succ != nil && succ.lessthan(key) {
 11878  			x = succ
 11879  			succ = x.atomicLoadNext(i)
 11880  		}
 11881  		preds[i] = x
 11882  		succs[i] = succ
 11883  
 11884  		// Check if the key already in the skipmap.
 11885  		if succ != nil && succ.equal(key) {
 11886  			return succ
 11887  		}
 11888  	}
 11889  	return nil
 11890  }
 11891  
 11892  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 11893  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 11894  func (s *Uint32MapDesc) findNodeDelete(key uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int {
 11895  	// lFound represents the index of the first layer at which it found a node.
 11896  	lFound, x := -1, s.header
 11897  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 11898  		succ := x.atomicLoadNext(i)
 11899  		for succ != nil && succ.lessthan(key) {
 11900  			x = succ
 11901  			succ = x.atomicLoadNext(i)
 11902  		}
 11903  		preds[i] = x
 11904  		succs[i] = succ
 11905  
 11906  		// Check if the key already in the skip list.
 11907  		if lFound == -1 && succ != nil && succ.equal(key) {
 11908  			lFound = i
 11909  		}
 11910  	}
 11911  	return lFound
 11912  }
 11913  
 11914  func unlockUint32Desc(preds [maxLevel]*uint32NodeDesc, highestLevel int) {
 11915  	var prevPred *uint32NodeDesc
 11916  	for i := highestLevel; i >= 0; i-- {
 11917  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 11918  			preds[i].mu.Unlock()
 11919  			prevPred = preds[i]
 11920  		}
 11921  	}
 11922  }
 11923  
 11924  // Store sets the value for a key.
 11925  func (s *Uint32MapDesc) Store(key uint32, value interface{}) {
 11926  	level := s.randomlevel()
 11927  	var preds, succs [maxLevel]*uint32NodeDesc
 11928  	for {
 11929  		nodeFound := s.findNode(key, &preds, &succs)
 11930  		if nodeFound != nil { // indicating the key is already in the skip-list
 11931  			if !nodeFound.flags.Get(marked) {
 11932  				// We don't need to care about whether or not the node is fully linked,
 11933  				// just replace the value.
 11934  				nodeFound.storeVal(value)
 11935  				return
 11936  			}
 11937  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 11938  			// we need to add this node in next loop.
 11939  			continue
 11940  		}
 11941  
 11942  		// Add this node into skip list.
 11943  		var (
 11944  			highestLocked        = -1 // the highest level being locked by this process
 11945  			valid                = true
 11946  			pred, succ, prevPred *uint32NodeDesc
 11947  		)
 11948  		for layer := 0; valid && layer < level; layer++ {
 11949  			pred = preds[layer]   // target node's previous node
 11950  			succ = succs[layer]   // target node's next node
 11951  			if pred != prevPred { // the node in this layer could be locked by previous loop
 11952  				pred.mu.Lock()
 11953  				highestLocked = layer
 11954  				prevPred = pred
 11955  			}
 11956  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 11957  			// It is valid if:
 11958  			// 1. The previous node and next node both are not marked.
 11959  			// 2. The previous node's next node is succ in this layer.
 11960  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 11961  		}
 11962  		if !valid {
 11963  			unlockUint32Desc(preds, highestLocked)
 11964  			continue
 11965  		}
 11966  
 11967  		nn := newUint32NodeDesc(key, value, level)
 11968  		for layer := 0; layer < level; layer++ {
 11969  			nn.storeNext(layer, succs[layer])
 11970  			preds[layer].atomicStoreNext(layer, nn)
 11971  		}
 11972  		nn.flags.SetTrue(fullyLinked)
 11973  		unlockUint32Desc(preds, highestLocked)
 11974  		atomic.AddInt64(&s.length, 1)
 11975  		return
 11976  	}
 11977  }
 11978  
 11979  func (s *Uint32MapDesc) randomlevel() int {
 11980  	// Generate random level.
 11981  	level := randomLevel()
 11982  	// Update highest level if possible.
 11983  	for {
 11984  		hl := atomic.LoadInt64(&s.highestLevel)
 11985  		if int64(level) <= hl {
 11986  			break
 11987  		}
 11988  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 11989  			break
 11990  		}
 11991  	}
 11992  	return level
 11993  }
 11994  
 11995  // Load returns the value stored in the map for a key, or nil if no
 11996  // value is present.
 11997  // The ok result indicates whether value was found in the map.
 11998  func (s *Uint32MapDesc) Load(key uint32) (value interface{}, ok bool) {
 11999  	x := s.header
 12000  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 12001  		nex := x.atomicLoadNext(i)
 12002  		for nex != nil && nex.lessthan(key) {
 12003  			x = nex
 12004  			nex = x.atomicLoadNext(i)
 12005  		}
 12006  
 12007  		// Check if the key already in the skip list.
 12008  		if nex != nil && nex.equal(key) {
 12009  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 12010  				return nex.loadVal(), true
 12011  			}
 12012  			return nil, false
 12013  		}
 12014  	}
 12015  	return nil, false
 12016  }
 12017  
 12018  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 12019  // The loaded result reports whether the key was present.
 12020  // (Modified from Delete)
 12021  func (s *Uint32MapDesc) LoadAndDelete(key uint32) (value interface{}, loaded bool) {
 12022  	var (
 12023  		nodeToDelete *uint32NodeDesc
 12024  		isMarked     bool // represents if this operation mark the node
 12025  		topLayer     = -1
 12026  		preds, succs [maxLevel]*uint32NodeDesc
 12027  	)
 12028  	for {
 12029  		lFound := s.findNodeDelete(key, &preds, &succs)
 12030  		if isMarked || // this process mark this node or we can find this node in the skip list
 12031  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 12032  			if !isMarked { // we don't mark this node for now
 12033  				nodeToDelete = succs[lFound]
 12034  				topLayer = lFound
 12035  				nodeToDelete.mu.Lock()
 12036  				if nodeToDelete.flags.Get(marked) {
 12037  					// The node is marked by another process,
 12038  					// the physical deletion will be accomplished by another process.
 12039  					nodeToDelete.mu.Unlock()
 12040  					return nil, false
 12041  				}
 12042  				nodeToDelete.flags.SetTrue(marked)
 12043  				isMarked = true
 12044  			}
 12045  			// Accomplish the physical deletion.
 12046  			var (
 12047  				highestLocked        = -1 // the highest level being locked by this process
 12048  				valid                = true
 12049  				pred, succ, prevPred *uint32NodeDesc
 12050  			)
 12051  			for layer := 0; valid && (layer <= topLayer); layer++ {
 12052  				pred, succ = preds[layer], succs[layer]
 12053  				if pred != prevPred { // the node in this layer could be locked by previous loop
 12054  					pred.mu.Lock()
 12055  					highestLocked = layer
 12056  					prevPred = pred
 12057  				}
 12058  				// valid check if there is another node has inserted into the skip list in this layer
 12059  				// during this process, or the previous is deleted by another process.
 12060  				// It is valid if:
 12061  				// 1. the previous node exists.
 12062  				// 2. no another node has inserted into the skip list in this layer.
 12063  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 12064  			}
 12065  			if !valid {
 12066  				unlockUint32Desc(preds, highestLocked)
 12067  				continue
 12068  			}
 12069  			for i := topLayer; i >= 0; i-- {
 12070  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 12071  				// So we don't need `nodeToDelete.loadNext`
 12072  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 12073  			}
 12074  			nodeToDelete.mu.Unlock()
 12075  			unlockUint32Desc(preds, highestLocked)
 12076  			atomic.AddInt64(&s.length, -1)
 12077  			return nodeToDelete.loadVal(), true
 12078  		}
 12079  		return nil, false
 12080  	}
 12081  }
 12082  
 12083  // LoadOrStore returns the existing value for the key if present.
 12084  // Otherwise, it stores and returns the given value.
 12085  // The loaded result is true if the value was loaded, false if stored.
 12086  // (Modified from Store)
 12087  func (s *Uint32MapDesc) LoadOrStore(key uint32, value interface{}) (actual interface{}, loaded bool) {
 12088  	var (
 12089  		level        int
 12090  		preds, succs [maxLevel]*uint32NodeDesc
 12091  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 12092  	)
 12093  	for {
 12094  		nodeFound := s.findNode(key, &preds, &succs)
 12095  		if nodeFound != nil { // indicating the key is already in the skip-list
 12096  			if !nodeFound.flags.Get(marked) {
 12097  				// We don't need to care about whether or not the node is fully linked,
 12098  				// just return the value.
 12099  				return nodeFound.loadVal(), true
 12100  			}
 12101  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 12102  			// we need to add this node in next loop.
 12103  			continue
 12104  		}
 12105  
 12106  		// Add this node into skip list.
 12107  		var (
 12108  			highestLocked        = -1 // the highest level being locked by this process
 12109  			valid                = true
 12110  			pred, succ, prevPred *uint32NodeDesc
 12111  		)
 12112  		if level == 0 {
 12113  			level = s.randomlevel()
 12114  			if level > hl {
 12115  				// If the highest level is updated, usually means that many goroutines
 12116  				// are inserting items. Hopefully we can find a better path in next loop.
 12117  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 12118  				// but this strategy's performance is almost the same as the existing method.
 12119  				continue
 12120  			}
 12121  		}
 12122  		for layer := 0; valid && layer < level; layer++ {
 12123  			pred = preds[layer]   // target node's previous node
 12124  			succ = succs[layer]   // target node's next node
 12125  			if pred != prevPred { // the node in this layer could be locked by previous loop
 12126  				pred.mu.Lock()
 12127  				highestLocked = layer
 12128  				prevPred = pred
 12129  			}
 12130  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 12131  			// It is valid if:
 12132  			// 1. The previous node and next node both are not marked.
 12133  			// 2. The previous node's next node is succ in this layer.
 12134  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 12135  		}
 12136  		if !valid {
 12137  			unlockUint32Desc(preds, highestLocked)
 12138  			continue
 12139  		}
 12140  
 12141  		nn := newUint32NodeDesc(key, value, level)
 12142  		for layer := 0; layer < level; layer++ {
 12143  			nn.storeNext(layer, succs[layer])
 12144  			preds[layer].atomicStoreNext(layer, nn)
 12145  		}
 12146  		nn.flags.SetTrue(fullyLinked)
 12147  		unlockUint32Desc(preds, highestLocked)
 12148  		atomic.AddInt64(&s.length, 1)
 12149  		return value, false
 12150  	}
 12151  }
 12152  
 12153  // LoadOrStoreLazy returns the existing value for the key if present.
 12154  // Otherwise, it stores and returns the given value from f, f will only be called once.
 12155  // The loaded result is true if the value was loaded, false if stored.
 12156  // (Modified from LoadOrStore)
 12157  func (s *Uint32MapDesc) LoadOrStoreLazy(key uint32, f func() interface{}) (actual interface{}, loaded bool) {
 12158  	var (
 12159  		level        int
 12160  		preds, succs [maxLevel]*uint32NodeDesc
 12161  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 12162  	)
 12163  	for {
 12164  		nodeFound := s.findNode(key, &preds, &succs)
 12165  		if nodeFound != nil { // indicating the key is already in the skip-list
 12166  			if !nodeFound.flags.Get(marked) {
 12167  				// We don't need to care about whether or not the node is fully linked,
 12168  				// just return the value.
 12169  				return nodeFound.loadVal(), true
 12170  			}
 12171  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 12172  			// we need to add this node in next loop.
 12173  			continue
 12174  		}
 12175  
 12176  		// Add this node into skip list.
 12177  		var (
 12178  			highestLocked        = -1 // the highest level being locked by this process
 12179  			valid                = true
 12180  			pred, succ, prevPred *uint32NodeDesc
 12181  		)
 12182  		if level == 0 {
 12183  			level = s.randomlevel()
 12184  			if level > hl {
 12185  				// If the highest level is updated, usually means that many goroutines
 12186  				// are inserting items. Hopefully we can find a better path in next loop.
 12187  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 12188  				// but this strategy's performance is almost the same as the existing method.
 12189  				continue
 12190  			}
 12191  		}
 12192  		for layer := 0; valid && layer < level; layer++ {
 12193  			pred = preds[layer]   // target node's previous node
 12194  			succ = succs[layer]   // target node's next node
 12195  			if pred != prevPred { // the node in this layer could be locked by previous loop
 12196  				pred.mu.Lock()
 12197  				highestLocked = layer
 12198  				prevPred = pred
 12199  			}
 12200  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 12201  			// It is valid if:
 12202  			// 1. The previous node and next node both are not marked.
 12203  			// 2. The previous node's next node is succ in this layer.
 12204  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 12205  		}
 12206  		if !valid {
 12207  			unlockUint32Desc(preds, highestLocked)
 12208  			continue
 12209  		}
 12210  		value := f()
 12211  		nn := newUint32NodeDesc(key, value, level)
 12212  		for layer := 0; layer < level; layer++ {
 12213  			nn.storeNext(layer, succs[layer])
 12214  			preds[layer].atomicStoreNext(layer, nn)
 12215  		}
 12216  		nn.flags.SetTrue(fullyLinked)
 12217  		unlockUint32Desc(preds, highestLocked)
 12218  		atomic.AddInt64(&s.length, 1)
 12219  		return value, false
 12220  	}
 12221  }
 12222  
 12223  // Delete deletes the value for a key.
 12224  func (s *Uint32MapDesc) Delete(key uint32) bool {
 12225  	var (
 12226  		nodeToDelete *uint32NodeDesc
 12227  		isMarked     bool // represents if this operation mark the node
 12228  		topLayer     = -1
 12229  		preds, succs [maxLevel]*uint32NodeDesc
 12230  	)
 12231  	for {
 12232  		lFound := s.findNodeDelete(key, &preds, &succs)
 12233  		if isMarked || // this process mark this node or we can find this node in the skip list
 12234  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 12235  			if !isMarked { // we don't mark this node for now
 12236  				nodeToDelete = succs[lFound]
 12237  				topLayer = lFound
 12238  				nodeToDelete.mu.Lock()
 12239  				if nodeToDelete.flags.Get(marked) {
 12240  					// The node is marked by another process,
 12241  					// the physical deletion will be accomplished by another process.
 12242  					nodeToDelete.mu.Unlock()
 12243  					return false
 12244  				}
 12245  				nodeToDelete.flags.SetTrue(marked)
 12246  				isMarked = true
 12247  			}
 12248  			// Accomplish the physical deletion.
 12249  			var (
 12250  				highestLocked        = -1 // the highest level being locked by this process
 12251  				valid                = true
 12252  				pred, succ, prevPred *uint32NodeDesc
 12253  			)
 12254  			for layer := 0; valid && (layer <= topLayer); layer++ {
 12255  				pred, succ = preds[layer], succs[layer]
 12256  				if pred != prevPred { // the node in this layer could be locked by previous loop
 12257  					pred.mu.Lock()
 12258  					highestLocked = layer
 12259  					prevPred = pred
 12260  				}
 12261  				// valid check if there is another node has inserted into the skip list in this layer
 12262  				// during this process, or the previous is deleted by another process.
 12263  				// It is valid if:
 12264  				// 1. the previous node exists.
 12265  				// 2. no another node has inserted into the skip list in this layer.
 12266  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 12267  			}
 12268  			if !valid {
 12269  				unlockUint32Desc(preds, highestLocked)
 12270  				continue
 12271  			}
 12272  			for i := topLayer; i >= 0; i-- {
 12273  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 12274  				// So we don't need `nodeToDelete.loadNext`
 12275  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 12276  			}
 12277  			nodeToDelete.mu.Unlock()
 12278  			unlockUint32Desc(preds, highestLocked)
 12279  			atomic.AddInt64(&s.length, -1)
 12280  			return true
 12281  		}
 12282  		return false
 12283  	}
 12284  }
 12285  
 12286  // Range calls f sequentially for each key and value present in the skipmap.
 12287  // If f returns false, range stops the iteration.
 12288  //
 12289  // Range does not necessarily correspond to any consistent snapshot of the Map's
 12290  // contents: no key will be visited more than once, but if the value for any key
 12291  // is stored or deleted concurrently, Range may reflect any mapping for that key
 12292  // from any point during the Range call.
 12293  func (s *Uint32MapDesc) Range(f func(key uint32, value interface{}) bool) {
 12294  	x := s.header.atomicLoadNext(0)
 12295  	for x != nil {
 12296  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 12297  			x = x.atomicLoadNext(0)
 12298  			continue
 12299  		}
 12300  		if !f(x.key, x.loadVal()) {
 12301  			break
 12302  		}
 12303  		x = x.atomicLoadNext(0)
 12304  	}
 12305  }
 12306  
 12307  // Len return the length of this skipmap.
 12308  func (s *Uint32MapDesc) Len() int {
 12309  	return int(atomic.LoadInt64(&s.length))
 12310  }
 12311  
 12312  // Uint64Map represents a map based on skip list in ascending order.
 12313  type Uint64Map struct {
 12314  	header       *uint64Node
 12315  	length       int64
 12316  	highestLevel int64 // highest level for now
 12317  }
 12318  
 12319  type uint64Node struct {
 12320  	key   uint64
 12321  	value unsafe.Pointer // *interface{}
 12322  	next  optionalArray  // [level]*uint64Node
 12323  	mu    sync.Mutex
 12324  	flags bitflag
 12325  	level uint32
 12326  }
 12327  
 12328  func newUuint64Node(key uint64, value interface{}, level int) *uint64Node {
 12329  	node := &uint64Node{
 12330  		key:   key,
 12331  		level: uint32(level),
 12332  	}
 12333  	node.storeVal(value)
 12334  	if level > op1 {
 12335  		node.next.extra = new([op2]unsafe.Pointer)
 12336  	}
 12337  	return node
 12338  }
 12339  
 12340  func (n *uint64Node) storeVal(value interface{}) {
 12341  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 12342  }
 12343  
 12344  func (n *uint64Node) loadVal() interface{} {
 12345  	return *(*interface{})(atomic.LoadPointer(&n.value))
 12346  }
 12347  
 12348  func (n *uint64Node) loadNext(i int) *uint64Node {
 12349  	return (*uint64Node)(n.next.load(i))
 12350  }
 12351  
 12352  func (n *uint64Node) storeNext(i int, node *uint64Node) {
 12353  	n.next.store(i, unsafe.Pointer(node))
 12354  }
 12355  
 12356  func (n *uint64Node) atomicLoadNext(i int) *uint64Node {
 12357  	return (*uint64Node)(n.next.atomicLoad(i))
 12358  }
 12359  
 12360  func (n *uint64Node) atomicStoreNext(i int, node *uint64Node) {
 12361  	n.next.atomicStore(i, unsafe.Pointer(node))
 12362  }
 12363  
 12364  func (n *uint64Node) lessthan(key uint64) bool {
 12365  	return n.key < key
 12366  }
 12367  
 12368  func (n *uint64Node) equal(key uint64) bool {
 12369  	return n.key == key
 12370  }
 12371  
 12372  // NewUint64 return an empty uint64 skipmap.
 12373  func NewUint64() *Uint64Map {
 12374  	h := newUuint64Node(0, "", maxLevel)
 12375  	h.flags.SetTrue(fullyLinked)
 12376  	return &Uint64Map{
 12377  		header:       h,
 12378  		highestLevel: defaultHighestLevel,
 12379  	}
 12380  }
 12381  
 12382  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 12383  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 12384  // (without fullpath, if find the node will return immediately)
 12385  func (s *Uint64Map) findNode(key uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) *uint64Node {
 12386  	x := s.header
 12387  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 12388  		succ := x.atomicLoadNext(i)
 12389  		for succ != nil && succ.lessthan(key) {
 12390  			x = succ
 12391  			succ = x.atomicLoadNext(i)
 12392  		}
 12393  		preds[i] = x
 12394  		succs[i] = succ
 12395  
 12396  		// Check if the key already in the skipmap.
 12397  		if succ != nil && succ.equal(key) {
 12398  			return succ
 12399  		}
 12400  	}
 12401  	return nil
 12402  }
 12403  
 12404  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 12405  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 12406  func (s *Uint64Map) findNodeDelete(key uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int {
 12407  	// lFound represents the index of the first layer at which it found a node.
 12408  	lFound, x := -1, s.header
 12409  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 12410  		succ := x.atomicLoadNext(i)
 12411  		for succ != nil && succ.lessthan(key) {
 12412  			x = succ
 12413  			succ = x.atomicLoadNext(i)
 12414  		}
 12415  		preds[i] = x
 12416  		succs[i] = succ
 12417  
 12418  		// Check if the key already in the skip list.
 12419  		if lFound == -1 && succ != nil && succ.equal(key) {
 12420  			lFound = i
 12421  		}
 12422  	}
 12423  	return lFound
 12424  }
 12425  
 12426  func unlockUint64(preds [maxLevel]*uint64Node, highestLevel int) {
 12427  	var prevPred *uint64Node
 12428  	for i := highestLevel; i >= 0; i-- {
 12429  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 12430  			preds[i].mu.Unlock()
 12431  			prevPred = preds[i]
 12432  		}
 12433  	}
 12434  }
 12435  
 12436  // Store sets the value for a key.
 12437  func (s *Uint64Map) Store(key uint64, value interface{}) {
 12438  	level := s.randomlevel()
 12439  	var preds, succs [maxLevel]*uint64Node
 12440  	for {
 12441  		nodeFound := s.findNode(key, &preds, &succs)
 12442  		if nodeFound != nil { // indicating the key is already in the skip-list
 12443  			if !nodeFound.flags.Get(marked) {
 12444  				// We don't need to care about whether or not the node is fully linked,
 12445  				// just replace the value.
 12446  				nodeFound.storeVal(value)
 12447  				return
 12448  			}
 12449  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 12450  			// we need to add this node in next loop.
 12451  			continue
 12452  		}
 12453  
 12454  		// Add this node into skip list.
 12455  		var (
 12456  			highestLocked        = -1 // the highest level being locked by this process
 12457  			valid                = true
 12458  			pred, succ, prevPred *uint64Node
 12459  		)
 12460  		for layer := 0; valid && layer < level; layer++ {
 12461  			pred = preds[layer]   // target node's previous node
 12462  			succ = succs[layer]   // target node's next node
 12463  			if pred != prevPred { // the node in this layer could be locked by previous loop
 12464  				pred.mu.Lock()
 12465  				highestLocked = layer
 12466  				prevPred = pred
 12467  			}
 12468  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 12469  			// It is valid if:
 12470  			// 1. The previous node and next node both are not marked.
 12471  			// 2. The previous node's next node is succ in this layer.
 12472  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 12473  		}
 12474  		if !valid {
 12475  			unlockUint64(preds, highestLocked)
 12476  			continue
 12477  		}
 12478  
 12479  		nn := newUuint64Node(key, value, level)
 12480  		for layer := 0; layer < level; layer++ {
 12481  			nn.storeNext(layer, succs[layer])
 12482  			preds[layer].atomicStoreNext(layer, nn)
 12483  		}
 12484  		nn.flags.SetTrue(fullyLinked)
 12485  		unlockUint64(preds, highestLocked)
 12486  		atomic.AddInt64(&s.length, 1)
 12487  		return
 12488  	}
 12489  }
 12490  
 12491  func (s *Uint64Map) randomlevel() int {
 12492  	// Generate random level.
 12493  	level := randomLevel()
 12494  	// Update highest level if possible.
 12495  	for {
 12496  		hl := atomic.LoadInt64(&s.highestLevel)
 12497  		if int64(level) <= hl {
 12498  			break
 12499  		}
 12500  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 12501  			break
 12502  		}
 12503  	}
 12504  	return level
 12505  }
 12506  
 12507  // Load returns the value stored in the map for a key, or nil if no
 12508  // value is present.
 12509  // The ok result indicates whether value was found in the map.
 12510  func (s *Uint64Map) Load(key uint64) (value interface{}, ok bool) {
 12511  	x := s.header
 12512  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 12513  		nex := x.atomicLoadNext(i)
 12514  		for nex != nil && nex.lessthan(key) {
 12515  			x = nex
 12516  			nex = x.atomicLoadNext(i)
 12517  		}
 12518  
 12519  		// Check if the key already in the skip list.
 12520  		if nex != nil && nex.equal(key) {
 12521  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 12522  				return nex.loadVal(), true
 12523  			}
 12524  			return nil, false
 12525  		}
 12526  	}
 12527  	return nil, false
 12528  }
 12529  
 12530  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 12531  // The loaded result reports whether the key was present.
 12532  // (Modified from Delete)
 12533  func (s *Uint64Map) LoadAndDelete(key uint64) (value interface{}, loaded bool) {
 12534  	var (
 12535  		nodeToDelete *uint64Node
 12536  		isMarked     bool // represents if this operation mark the node
 12537  		topLayer     = -1
 12538  		preds, succs [maxLevel]*uint64Node
 12539  	)
 12540  	for {
 12541  		lFound := s.findNodeDelete(key, &preds, &succs)
 12542  		if isMarked || // this process mark this node or we can find this node in the skip list
 12543  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 12544  			if !isMarked { // we don't mark this node for now
 12545  				nodeToDelete = succs[lFound]
 12546  				topLayer = lFound
 12547  				nodeToDelete.mu.Lock()
 12548  				if nodeToDelete.flags.Get(marked) {
 12549  					// The node is marked by another process,
 12550  					// the physical deletion will be accomplished by another process.
 12551  					nodeToDelete.mu.Unlock()
 12552  					return nil, false
 12553  				}
 12554  				nodeToDelete.flags.SetTrue(marked)
 12555  				isMarked = true
 12556  			}
 12557  			// Accomplish the physical deletion.
 12558  			var (
 12559  				highestLocked        = -1 // the highest level being locked by this process
 12560  				valid                = true
 12561  				pred, succ, prevPred *uint64Node
 12562  			)
 12563  			for layer := 0; valid && (layer <= topLayer); layer++ {
 12564  				pred, succ = preds[layer], succs[layer]
 12565  				if pred != prevPred { // the node in this layer could be locked by previous loop
 12566  					pred.mu.Lock()
 12567  					highestLocked = layer
 12568  					prevPred = pred
 12569  				}
 12570  				// valid check if there is another node has inserted into the skip list in this layer
 12571  				// during this process, or the previous is deleted by another process.
 12572  				// It is valid if:
 12573  				// 1. the previous node exists.
 12574  				// 2. no another node has inserted into the skip list in this layer.
 12575  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 12576  			}
 12577  			if !valid {
 12578  				unlockUint64(preds, highestLocked)
 12579  				continue
 12580  			}
 12581  			for i := topLayer; i >= 0; i-- {
 12582  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 12583  				// So we don't need `nodeToDelete.loadNext`
 12584  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 12585  			}
 12586  			nodeToDelete.mu.Unlock()
 12587  			unlockUint64(preds, highestLocked)
 12588  			atomic.AddInt64(&s.length, -1)
 12589  			return nodeToDelete.loadVal(), true
 12590  		}
 12591  		return nil, false
 12592  	}
 12593  }
 12594  
 12595  // LoadOrStore returns the existing value for the key if present.
 12596  // Otherwise, it stores and returns the given value.
 12597  // The loaded result is true if the value was loaded, false if stored.
 12598  // (Modified from Store)
 12599  func (s *Uint64Map) LoadOrStore(key uint64, value interface{}) (actual interface{}, loaded bool) {
 12600  	var (
 12601  		level        int
 12602  		preds, succs [maxLevel]*uint64Node
 12603  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 12604  	)
 12605  	for {
 12606  		nodeFound := s.findNode(key, &preds, &succs)
 12607  		if nodeFound != nil { // indicating the key is already in the skip-list
 12608  			if !nodeFound.flags.Get(marked) {
 12609  				// We don't need to care about whether or not the node is fully linked,
 12610  				// just return the value.
 12611  				return nodeFound.loadVal(), true
 12612  			}
 12613  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 12614  			// we need to add this node in next loop.
 12615  			continue
 12616  		}
 12617  
 12618  		// Add this node into skip list.
 12619  		var (
 12620  			highestLocked        = -1 // the highest level being locked by this process
 12621  			valid                = true
 12622  			pred, succ, prevPred *uint64Node
 12623  		)
 12624  		if level == 0 {
 12625  			level = s.randomlevel()
 12626  			if level > hl {
 12627  				// If the highest level is updated, usually means that many goroutines
 12628  				// are inserting items. Hopefully we can find a better path in next loop.
 12629  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 12630  				// but this strategy's performance is almost the same as the existing method.
 12631  				continue
 12632  			}
 12633  		}
 12634  		for layer := 0; valid && layer < level; layer++ {
 12635  			pred = preds[layer]   // target node's previous node
 12636  			succ = succs[layer]   // target node's next node
 12637  			if pred != prevPred { // the node in this layer could be locked by previous loop
 12638  				pred.mu.Lock()
 12639  				highestLocked = layer
 12640  				prevPred = pred
 12641  			}
 12642  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 12643  			// It is valid if:
 12644  			// 1. The previous node and next node both are not marked.
 12645  			// 2. The previous node's next node is succ in this layer.
 12646  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 12647  		}
 12648  		if !valid {
 12649  			unlockUint64(preds, highestLocked)
 12650  			continue
 12651  		}
 12652  
 12653  		nn := newUuint64Node(key, value, level)
 12654  		for layer := 0; layer < level; layer++ {
 12655  			nn.storeNext(layer, succs[layer])
 12656  			preds[layer].atomicStoreNext(layer, nn)
 12657  		}
 12658  		nn.flags.SetTrue(fullyLinked)
 12659  		unlockUint64(preds, highestLocked)
 12660  		atomic.AddInt64(&s.length, 1)
 12661  		return value, false
 12662  	}
 12663  }
 12664  
 12665  // LoadOrStoreLazy returns the existing value for the key if present.
 12666  // Otherwise, it stores and returns the given value from f, f will only be called once.
 12667  // The loaded result is true if the value was loaded, false if stored.
 12668  // (Modified from LoadOrStore)
 12669  func (s *Uint64Map) LoadOrStoreLazy(key uint64, f func() interface{}) (actual interface{}, loaded bool) {
 12670  	var (
 12671  		level        int
 12672  		preds, succs [maxLevel]*uint64Node
 12673  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 12674  	)
 12675  	for {
 12676  		nodeFound := s.findNode(key, &preds, &succs)
 12677  		if nodeFound != nil { // indicating the key is already in the skip-list
 12678  			if !nodeFound.flags.Get(marked) {
 12679  				// We don't need to care about whether or not the node is fully linked,
 12680  				// just return the value.
 12681  				return nodeFound.loadVal(), true
 12682  			}
 12683  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 12684  			// we need to add this node in next loop.
 12685  			continue
 12686  		}
 12687  
 12688  		// Add this node into skip list.
 12689  		var (
 12690  			highestLocked        = -1 // the highest level being locked by this process
 12691  			valid                = true
 12692  			pred, succ, prevPred *uint64Node
 12693  		)
 12694  		if level == 0 {
 12695  			level = s.randomlevel()
 12696  			if level > hl {
 12697  				// If the highest level is updated, usually means that many goroutines
 12698  				// are inserting items. Hopefully we can find a better path in next loop.
 12699  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 12700  				// but this strategy's performance is almost the same as the existing method.
 12701  				continue
 12702  			}
 12703  		}
 12704  		for layer := 0; valid && layer < level; layer++ {
 12705  			pred = preds[layer]   // target node's previous node
 12706  			succ = succs[layer]   // target node's next node
 12707  			if pred != prevPred { // the node in this layer could be locked by previous loop
 12708  				pred.mu.Lock()
 12709  				highestLocked = layer
 12710  				prevPred = pred
 12711  			}
 12712  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 12713  			// It is valid if:
 12714  			// 1. The previous node and next node both are not marked.
 12715  			// 2. The previous node's next node is succ in this layer.
 12716  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 12717  		}
 12718  		if !valid {
 12719  			unlockUint64(preds, highestLocked)
 12720  			continue
 12721  		}
 12722  		value := f()
 12723  		nn := newUuint64Node(key, value, level)
 12724  		for layer := 0; layer < level; layer++ {
 12725  			nn.storeNext(layer, succs[layer])
 12726  			preds[layer].atomicStoreNext(layer, nn)
 12727  		}
 12728  		nn.flags.SetTrue(fullyLinked)
 12729  		unlockUint64(preds, highestLocked)
 12730  		atomic.AddInt64(&s.length, 1)
 12731  		return value, false
 12732  	}
 12733  }
 12734  
 12735  // Delete deletes the value for a key.
 12736  func (s *Uint64Map) Delete(key uint64) bool {
 12737  	var (
 12738  		nodeToDelete *uint64Node
 12739  		isMarked     bool // represents if this operation mark the node
 12740  		topLayer     = -1
 12741  		preds, succs [maxLevel]*uint64Node
 12742  	)
 12743  	for {
 12744  		lFound := s.findNodeDelete(key, &preds, &succs)
 12745  		if isMarked || // this process mark this node or we can find this node in the skip list
 12746  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 12747  			if !isMarked { // we don't mark this node for now
 12748  				nodeToDelete = succs[lFound]
 12749  				topLayer = lFound
 12750  				nodeToDelete.mu.Lock()
 12751  				if nodeToDelete.flags.Get(marked) {
 12752  					// The node is marked by another process,
 12753  					// the physical deletion will be accomplished by another process.
 12754  					nodeToDelete.mu.Unlock()
 12755  					return false
 12756  				}
 12757  				nodeToDelete.flags.SetTrue(marked)
 12758  				isMarked = true
 12759  			}
 12760  			// Accomplish the physical deletion.
 12761  			var (
 12762  				highestLocked        = -1 // the highest level being locked by this process
 12763  				valid                = true
 12764  				pred, succ, prevPred *uint64Node
 12765  			)
 12766  			for layer := 0; valid && (layer <= topLayer); layer++ {
 12767  				pred, succ = preds[layer], succs[layer]
 12768  				if pred != prevPred { // the node in this layer could be locked by previous loop
 12769  					pred.mu.Lock()
 12770  					highestLocked = layer
 12771  					prevPred = pred
 12772  				}
 12773  				// valid check if there is another node has inserted into the skip list in this layer
 12774  				// during this process, or the previous is deleted by another process.
 12775  				// It is valid if:
 12776  				// 1. the previous node exists.
 12777  				// 2. no another node has inserted into the skip list in this layer.
 12778  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 12779  			}
 12780  			if !valid {
 12781  				unlockUint64(preds, highestLocked)
 12782  				continue
 12783  			}
 12784  			for i := topLayer; i >= 0; i-- {
 12785  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 12786  				// So we don't need `nodeToDelete.loadNext`
 12787  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 12788  			}
 12789  			nodeToDelete.mu.Unlock()
 12790  			unlockUint64(preds, highestLocked)
 12791  			atomic.AddInt64(&s.length, -1)
 12792  			return true
 12793  		}
 12794  		return false
 12795  	}
 12796  }
 12797  
 12798  // Range calls f sequentially for each key and value present in the skipmap.
 12799  // If f returns false, range stops the iteration.
 12800  //
 12801  // Range does not necessarily correspond to any consistent snapshot of the Map's
 12802  // contents: no key will be visited more than once, but if the value for any key
 12803  // is stored or deleted concurrently, Range may reflect any mapping for that key
 12804  // from any point during the Range call.
 12805  func (s *Uint64Map) Range(f func(key uint64, value interface{}) bool) {
 12806  	x := s.header.atomicLoadNext(0)
 12807  	for x != nil {
 12808  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 12809  			x = x.atomicLoadNext(0)
 12810  			continue
 12811  		}
 12812  		if !f(x.key, x.loadVal()) {
 12813  			break
 12814  		}
 12815  		x = x.atomicLoadNext(0)
 12816  	}
 12817  }
 12818  
 12819  // Len return the length of this skipmap.
 12820  func (s *Uint64Map) Len() int {
 12821  	return int(atomic.LoadInt64(&s.length))
 12822  }
 12823  
 12824  // Uint64MapDesc represents a map based on skip list in descending order.
 12825  type Uint64MapDesc struct {
 12826  	header       *uint64NodeDesc
 12827  	length       int64
 12828  	highestLevel int64 // highest level for now
 12829  }
 12830  
 12831  type uint64NodeDesc struct {
 12832  	key   uint64
 12833  	value unsafe.Pointer // *interface{}
 12834  	next  optionalArray  // [level]*uint64NodeDesc
 12835  	mu    sync.Mutex
 12836  	flags bitflag
 12837  	level uint32
 12838  }
 12839  
 12840  func newUuint64NodeDescDesc(key uint64, value interface{}, level int) *uint64NodeDesc {
 12841  	node := &uint64NodeDesc{
 12842  		key:   key,
 12843  		level: uint32(level),
 12844  	}
 12845  	node.storeVal(value)
 12846  	if level > op1 {
 12847  		node.next.extra = new([op2]unsafe.Pointer)
 12848  	}
 12849  	return node
 12850  }
 12851  
 12852  func (n *uint64NodeDesc) storeVal(value interface{}) {
 12853  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 12854  }
 12855  
 12856  func (n *uint64NodeDesc) loadVal() interface{} {
 12857  	return *(*interface{})(atomic.LoadPointer(&n.value))
 12858  }
 12859  
 12860  func (n *uint64NodeDesc) loadNext(i int) *uint64NodeDesc {
 12861  	return (*uint64NodeDesc)(n.next.load(i))
 12862  }
 12863  
 12864  func (n *uint64NodeDesc) storeNext(i int, node *uint64NodeDesc) {
 12865  	n.next.store(i, unsafe.Pointer(node))
 12866  }
 12867  
 12868  func (n *uint64NodeDesc) atomicLoadNext(i int) *uint64NodeDesc {
 12869  	return (*uint64NodeDesc)(n.next.atomicLoad(i))
 12870  }
 12871  
 12872  func (n *uint64NodeDesc) atomicStoreNext(i int, node *uint64NodeDesc) {
 12873  	n.next.atomicStore(i, unsafe.Pointer(node))
 12874  }
 12875  
 12876  func (n *uint64NodeDesc) lessthan(key uint64) bool {
 12877  	return n.key > key
 12878  }
 12879  
 12880  func (n *uint64NodeDesc) equal(key uint64) bool {
 12881  	return n.key == key
 12882  }
 12883  
 12884  // NewUint64Desc return an empty uint64 skipmap.
 12885  func NewUint64Desc() *Uint64MapDesc {
 12886  	h := newUuint64NodeDescDesc(0, "", maxLevel)
 12887  	h.flags.SetTrue(fullyLinked)
 12888  	return &Uint64MapDesc{
 12889  		header:       h,
 12890  		highestLevel: defaultHighestLevel,
 12891  	}
 12892  }
 12893  
 12894  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 12895  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 12896  // (without fullpath, if find the node will return immediately)
 12897  func (s *Uint64MapDesc) findNode(key uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) *uint64NodeDesc {
 12898  	x := s.header
 12899  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 12900  		succ := x.atomicLoadNext(i)
 12901  		for succ != nil && succ.lessthan(key) {
 12902  			x = succ
 12903  			succ = x.atomicLoadNext(i)
 12904  		}
 12905  		preds[i] = x
 12906  		succs[i] = succ
 12907  
 12908  		// Check if the key already in the skipmap.
 12909  		if succ != nil && succ.equal(key) {
 12910  			return succ
 12911  		}
 12912  	}
 12913  	return nil
 12914  }
 12915  
 12916  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 12917  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 12918  func (s *Uint64MapDesc) findNodeDelete(key uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int {
 12919  	// lFound represents the index of the first layer at which it found a node.
 12920  	lFound, x := -1, s.header
 12921  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 12922  		succ := x.atomicLoadNext(i)
 12923  		for succ != nil && succ.lessthan(key) {
 12924  			x = succ
 12925  			succ = x.atomicLoadNext(i)
 12926  		}
 12927  		preds[i] = x
 12928  		succs[i] = succ
 12929  
 12930  		// Check if the key already in the skip list.
 12931  		if lFound == -1 && succ != nil && succ.equal(key) {
 12932  			lFound = i
 12933  		}
 12934  	}
 12935  	return lFound
 12936  }
 12937  
 12938  func unlockUint64Desc(preds [maxLevel]*uint64NodeDesc, highestLevel int) {
 12939  	var prevPred *uint64NodeDesc
 12940  	for i := highestLevel; i >= 0; i-- {
 12941  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 12942  			preds[i].mu.Unlock()
 12943  			prevPred = preds[i]
 12944  		}
 12945  	}
 12946  }
 12947  
 12948  // Store sets the value for a key.
 12949  func (s *Uint64MapDesc) Store(key uint64, value interface{}) {
 12950  	level := s.randomlevel()
 12951  	var preds, succs [maxLevel]*uint64NodeDesc
 12952  	for {
 12953  		nodeFound := s.findNode(key, &preds, &succs)
 12954  		if nodeFound != nil { // indicating the key is already in the skip-list
 12955  			if !nodeFound.flags.Get(marked) {
 12956  				// We don't need to care about whether or not the node is fully linked,
 12957  				// just replace the value.
 12958  				nodeFound.storeVal(value)
 12959  				return
 12960  			}
 12961  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 12962  			// we need to add this node in next loop.
 12963  			continue
 12964  		}
 12965  
 12966  		// Add this node into skip list.
 12967  		var (
 12968  			highestLocked        = -1 // the highest level being locked by this process
 12969  			valid                = true
 12970  			pred, succ, prevPred *uint64NodeDesc
 12971  		)
 12972  		for layer := 0; valid && layer < level; layer++ {
 12973  			pred = preds[layer]   // target node's previous node
 12974  			succ = succs[layer]   // target node's next node
 12975  			if pred != prevPred { // the node in this layer could be locked by previous loop
 12976  				pred.mu.Lock()
 12977  				highestLocked = layer
 12978  				prevPred = pred
 12979  			}
 12980  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 12981  			// It is valid if:
 12982  			// 1. The previous node and next node both are not marked.
 12983  			// 2. The previous node's next node is succ in this layer.
 12984  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 12985  		}
 12986  		if !valid {
 12987  			unlockUint64Desc(preds, highestLocked)
 12988  			continue
 12989  		}
 12990  
 12991  		nn := newUuint64NodeDescDesc(key, value, level)
 12992  		for layer := 0; layer < level; layer++ {
 12993  			nn.storeNext(layer, succs[layer])
 12994  			preds[layer].atomicStoreNext(layer, nn)
 12995  		}
 12996  		nn.flags.SetTrue(fullyLinked)
 12997  		unlockUint64Desc(preds, highestLocked)
 12998  		atomic.AddInt64(&s.length, 1)
 12999  		return
 13000  	}
 13001  }
 13002  
 13003  func (s *Uint64MapDesc) randomlevel() int {
 13004  	// Generate random level.
 13005  	level := randomLevel()
 13006  	// Update highest level if possible.
 13007  	for {
 13008  		hl := atomic.LoadInt64(&s.highestLevel)
 13009  		if int64(level) <= hl {
 13010  			break
 13011  		}
 13012  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 13013  			break
 13014  		}
 13015  	}
 13016  	return level
 13017  }
 13018  
 13019  // Load returns the value stored in the map for a key, or nil if no
 13020  // value is present.
 13021  // The ok result indicates whether value was found in the map.
 13022  func (s *Uint64MapDesc) Load(key uint64) (value interface{}, ok bool) {
 13023  	x := s.header
 13024  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 13025  		nex := x.atomicLoadNext(i)
 13026  		for nex != nil && nex.lessthan(key) {
 13027  			x = nex
 13028  			nex = x.atomicLoadNext(i)
 13029  		}
 13030  
 13031  		// Check if the key already in the skip list.
 13032  		if nex != nil && nex.equal(key) {
 13033  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 13034  				return nex.loadVal(), true
 13035  			}
 13036  			return nil, false
 13037  		}
 13038  	}
 13039  	return nil, false
 13040  }
 13041  
 13042  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 13043  // The loaded result reports whether the key was present.
 13044  // (Modified from Delete)
 13045  func (s *Uint64MapDesc) LoadAndDelete(key uint64) (value interface{}, loaded bool) {
 13046  	var (
 13047  		nodeToDelete *uint64NodeDesc
 13048  		isMarked     bool // represents if this operation mark the node
 13049  		topLayer     = -1
 13050  		preds, succs [maxLevel]*uint64NodeDesc
 13051  	)
 13052  	for {
 13053  		lFound := s.findNodeDelete(key, &preds, &succs)
 13054  		if isMarked || // this process mark this node or we can find this node in the skip list
 13055  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 13056  			if !isMarked { // we don't mark this node for now
 13057  				nodeToDelete = succs[lFound]
 13058  				topLayer = lFound
 13059  				nodeToDelete.mu.Lock()
 13060  				if nodeToDelete.flags.Get(marked) {
 13061  					// The node is marked by another process,
 13062  					// the physical deletion will be accomplished by another process.
 13063  					nodeToDelete.mu.Unlock()
 13064  					return nil, false
 13065  				}
 13066  				nodeToDelete.flags.SetTrue(marked)
 13067  				isMarked = true
 13068  			}
 13069  			// Accomplish the physical deletion.
 13070  			var (
 13071  				highestLocked        = -1 // the highest level being locked by this process
 13072  				valid                = true
 13073  				pred, succ, prevPred *uint64NodeDesc
 13074  			)
 13075  			for layer := 0; valid && (layer <= topLayer); layer++ {
 13076  				pred, succ = preds[layer], succs[layer]
 13077  				if pred != prevPred { // the node in this layer could be locked by previous loop
 13078  					pred.mu.Lock()
 13079  					highestLocked = layer
 13080  					prevPred = pred
 13081  				}
 13082  				// valid check if there is another node has inserted into the skip list in this layer
 13083  				// during this process, or the previous is deleted by another process.
 13084  				// It is valid if:
 13085  				// 1. the previous node exists.
 13086  				// 2. no another node has inserted into the skip list in this layer.
 13087  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 13088  			}
 13089  			if !valid {
 13090  				unlockUint64Desc(preds, highestLocked)
 13091  				continue
 13092  			}
 13093  			for i := topLayer; i >= 0; i-- {
 13094  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 13095  				// So we don't need `nodeToDelete.loadNext`
 13096  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 13097  			}
 13098  			nodeToDelete.mu.Unlock()
 13099  			unlockUint64Desc(preds, highestLocked)
 13100  			atomic.AddInt64(&s.length, -1)
 13101  			return nodeToDelete.loadVal(), true
 13102  		}
 13103  		return nil, false
 13104  	}
 13105  }
 13106  
 13107  // LoadOrStore returns the existing value for the key if present.
 13108  // Otherwise, it stores and returns the given value.
 13109  // The loaded result is true if the value was loaded, false if stored.
 13110  // (Modified from Store)
 13111  func (s *Uint64MapDesc) LoadOrStore(key uint64, value interface{}) (actual interface{}, loaded bool) {
 13112  	var (
 13113  		level        int
 13114  		preds, succs [maxLevel]*uint64NodeDesc
 13115  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 13116  	)
 13117  	for {
 13118  		nodeFound := s.findNode(key, &preds, &succs)
 13119  		if nodeFound != nil { // indicating the key is already in the skip-list
 13120  			if !nodeFound.flags.Get(marked) {
 13121  				// We don't need to care about whether or not the node is fully linked,
 13122  				// just return the value.
 13123  				return nodeFound.loadVal(), true
 13124  			}
 13125  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 13126  			// we need to add this node in next loop.
 13127  			continue
 13128  		}
 13129  
 13130  		// Add this node into skip list.
 13131  		var (
 13132  			highestLocked        = -1 // the highest level being locked by this process
 13133  			valid                = true
 13134  			pred, succ, prevPred *uint64NodeDesc
 13135  		)
 13136  		if level == 0 {
 13137  			level = s.randomlevel()
 13138  			if level > hl {
 13139  				// If the highest level is updated, usually means that many goroutines
 13140  				// are inserting items. Hopefully we can find a better path in next loop.
 13141  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 13142  				// but this strategy's performance is almost the same as the existing method.
 13143  				continue
 13144  			}
 13145  		}
 13146  		for layer := 0; valid && layer < level; layer++ {
 13147  			pred = preds[layer]   // target node's previous node
 13148  			succ = succs[layer]   // target node's next node
 13149  			if pred != prevPred { // the node in this layer could be locked by previous loop
 13150  				pred.mu.Lock()
 13151  				highestLocked = layer
 13152  				prevPred = pred
 13153  			}
 13154  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 13155  			// It is valid if:
 13156  			// 1. The previous node and next node both are not marked.
 13157  			// 2. The previous node's next node is succ in this layer.
 13158  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 13159  		}
 13160  		if !valid {
 13161  			unlockUint64Desc(preds, highestLocked)
 13162  			continue
 13163  		}
 13164  
 13165  		nn := newUuint64NodeDescDesc(key, value, level)
 13166  		for layer := 0; layer < level; layer++ {
 13167  			nn.storeNext(layer, succs[layer])
 13168  			preds[layer].atomicStoreNext(layer, nn)
 13169  		}
 13170  		nn.flags.SetTrue(fullyLinked)
 13171  		unlockUint64Desc(preds, highestLocked)
 13172  		atomic.AddInt64(&s.length, 1)
 13173  		return value, false
 13174  	}
 13175  }
 13176  
 13177  // LoadOrStoreLazy returns the existing value for the key if present.
 13178  // Otherwise, it stores and returns the given value from f, f will only be called once.
 13179  // The loaded result is true if the value was loaded, false if stored.
 13180  // (Modified from LoadOrStore)
 13181  func (s *Uint64MapDesc) LoadOrStoreLazy(key uint64, f func() interface{}) (actual interface{}, loaded bool) {
 13182  	var (
 13183  		level        int
 13184  		preds, succs [maxLevel]*uint64NodeDesc
 13185  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 13186  	)
 13187  	for {
 13188  		nodeFound := s.findNode(key, &preds, &succs)
 13189  		if nodeFound != nil { // indicating the key is already in the skip-list
 13190  			if !nodeFound.flags.Get(marked) {
 13191  				// We don't need to care about whether or not the node is fully linked,
 13192  				// just return the value.
 13193  				return nodeFound.loadVal(), true
 13194  			}
 13195  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 13196  			// we need to add this node in next loop.
 13197  			continue
 13198  		}
 13199  
 13200  		// Add this node into skip list.
 13201  		var (
 13202  			highestLocked        = -1 // the highest level being locked by this process
 13203  			valid                = true
 13204  			pred, succ, prevPred *uint64NodeDesc
 13205  		)
 13206  		if level == 0 {
 13207  			level = s.randomlevel()
 13208  			if level > hl {
 13209  				// If the highest level is updated, usually means that many goroutines
 13210  				// are inserting items. Hopefully we can find a better path in next loop.
 13211  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 13212  				// but this strategy's performance is almost the same as the existing method.
 13213  				continue
 13214  			}
 13215  		}
 13216  		for layer := 0; valid && layer < level; layer++ {
 13217  			pred = preds[layer]   // target node's previous node
 13218  			succ = succs[layer]   // target node's next node
 13219  			if pred != prevPred { // the node in this layer could be locked by previous loop
 13220  				pred.mu.Lock()
 13221  				highestLocked = layer
 13222  				prevPred = pred
 13223  			}
 13224  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 13225  			// It is valid if:
 13226  			// 1. The previous node and next node both are not marked.
 13227  			// 2. The previous node's next node is succ in this layer.
 13228  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 13229  		}
 13230  		if !valid {
 13231  			unlockUint64Desc(preds, highestLocked)
 13232  			continue
 13233  		}
 13234  		value := f()
 13235  		nn := newUuint64NodeDescDesc(key, value, level)
 13236  		for layer := 0; layer < level; layer++ {
 13237  			nn.storeNext(layer, succs[layer])
 13238  			preds[layer].atomicStoreNext(layer, nn)
 13239  		}
 13240  		nn.flags.SetTrue(fullyLinked)
 13241  		unlockUint64Desc(preds, highestLocked)
 13242  		atomic.AddInt64(&s.length, 1)
 13243  		return value, false
 13244  	}
 13245  }
 13246  
 13247  // Delete deletes the value for a key.
 13248  func (s *Uint64MapDesc) Delete(key uint64) bool {
 13249  	var (
 13250  		nodeToDelete *uint64NodeDesc
 13251  		isMarked     bool // represents if this operation mark the node
 13252  		topLayer     = -1
 13253  		preds, succs [maxLevel]*uint64NodeDesc
 13254  	)
 13255  	for {
 13256  		lFound := s.findNodeDelete(key, &preds, &succs)
 13257  		if isMarked || // this process mark this node or we can find this node in the skip list
 13258  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 13259  			if !isMarked { // we don't mark this node for now
 13260  				nodeToDelete = succs[lFound]
 13261  				topLayer = lFound
 13262  				nodeToDelete.mu.Lock()
 13263  				if nodeToDelete.flags.Get(marked) {
 13264  					// The node is marked by another process,
 13265  					// the physical deletion will be accomplished by another process.
 13266  					nodeToDelete.mu.Unlock()
 13267  					return false
 13268  				}
 13269  				nodeToDelete.flags.SetTrue(marked)
 13270  				isMarked = true
 13271  			}
 13272  			// Accomplish the physical deletion.
 13273  			var (
 13274  				highestLocked        = -1 // the highest level being locked by this process
 13275  				valid                = true
 13276  				pred, succ, prevPred *uint64NodeDesc
 13277  			)
 13278  			for layer := 0; valid && (layer <= topLayer); layer++ {
 13279  				pred, succ = preds[layer], succs[layer]
 13280  				if pred != prevPred { // the node in this layer could be locked by previous loop
 13281  					pred.mu.Lock()
 13282  					highestLocked = layer
 13283  					prevPred = pred
 13284  				}
 13285  				// valid check if there is another node has inserted into the skip list in this layer
 13286  				// during this process, or the previous is deleted by another process.
 13287  				// It is valid if:
 13288  				// 1. the previous node exists.
 13289  				// 2. no another node has inserted into the skip list in this layer.
 13290  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 13291  			}
 13292  			if !valid {
 13293  				unlockUint64Desc(preds, highestLocked)
 13294  				continue
 13295  			}
 13296  			for i := topLayer; i >= 0; i-- {
 13297  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 13298  				// So we don't need `nodeToDelete.loadNext`
 13299  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 13300  			}
 13301  			nodeToDelete.mu.Unlock()
 13302  			unlockUint64Desc(preds, highestLocked)
 13303  			atomic.AddInt64(&s.length, -1)
 13304  			return true
 13305  		}
 13306  		return false
 13307  	}
 13308  }
 13309  
 13310  // Range calls f sequentially for each key and value present in the skipmap.
 13311  // If f returns false, range stops the iteration.
 13312  //
 13313  // Range does not necessarily correspond to any consistent snapshot of the Map's
 13314  // contents: no key will be visited more than once, but if the value for any key
 13315  // is stored or deleted concurrently, Range may reflect any mapping for that key
 13316  // from any point during the Range call.
 13317  func (s *Uint64MapDesc) Range(f func(key uint64, value interface{}) bool) {
 13318  	x := s.header.atomicLoadNext(0)
 13319  	for x != nil {
 13320  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 13321  			x = x.atomicLoadNext(0)
 13322  			continue
 13323  		}
 13324  		if !f(x.key, x.loadVal()) {
 13325  			break
 13326  		}
 13327  		x = x.atomicLoadNext(0)
 13328  	}
 13329  }
 13330  
 13331  // Len return the length of this skipmap.
 13332  func (s *Uint64MapDesc) Len() int {
 13333  	return int(atomic.LoadInt64(&s.length))
 13334  }
 13335  
 13336  // UintptrMap represents a map based on skip list in ascending order.
 13337  type UintptrMap struct {
 13338  	header       *uintptrNode
 13339  	length       int64
 13340  	highestLevel int64 // highest level for now
 13341  }
 13342  
 13343  type uintptrNode struct {
 13344  	key   uintptr
 13345  	value unsafe.Pointer // *interface{}
 13346  	next  optionalArray  // [level]*uintptrNode
 13347  	mu    sync.Mutex
 13348  	flags bitflag
 13349  	level uint32
 13350  }
 13351  
 13352  func newUintptrNode(key uintptr, value interface{}, level int) *uintptrNode {
 13353  	node := &uintptrNode{
 13354  		key:   key,
 13355  		level: uint32(level),
 13356  	}
 13357  	node.storeVal(value)
 13358  	if level > op1 {
 13359  		node.next.extra = new([op2]unsafe.Pointer)
 13360  	}
 13361  	return node
 13362  }
 13363  
 13364  func (n *uintptrNode) storeVal(value interface{}) {
 13365  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 13366  }
 13367  
 13368  func (n *uintptrNode) loadVal() interface{} {
 13369  	return *(*interface{})(atomic.LoadPointer(&n.value))
 13370  }
 13371  
 13372  func (n *uintptrNode) loadNext(i int) *uintptrNode {
 13373  	return (*uintptrNode)(n.next.load(i))
 13374  }
 13375  
 13376  func (n *uintptrNode) storeNext(i int, node *uintptrNode) {
 13377  	n.next.store(i, unsafe.Pointer(node))
 13378  }
 13379  
 13380  func (n *uintptrNode) atomicLoadNext(i int) *uintptrNode {
 13381  	return (*uintptrNode)(n.next.atomicLoad(i))
 13382  }
 13383  
 13384  func (n *uintptrNode) atomicStoreNext(i int, node *uintptrNode) {
 13385  	n.next.atomicStore(i, unsafe.Pointer(node))
 13386  }
 13387  
 13388  func (n *uintptrNode) lessthan(key uintptr) bool {
 13389  	return n.key < key
 13390  }
 13391  
 13392  func (n *uintptrNode) equal(key uintptr) bool {
 13393  	return n.key == key
 13394  }
 13395  
 13396  // NewUintptr return an empty uintptr skipmap.
 13397  func NewUintptr() *UintptrMap {
 13398  	h := newUintptrNode(0, "", maxLevel)
 13399  	h.flags.SetTrue(fullyLinked)
 13400  	return &UintptrMap{
 13401  		header:       h,
 13402  		highestLevel: defaultHighestLevel,
 13403  	}
 13404  }
 13405  
 13406  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 13407  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 13408  // (without fullpath, if find the node will return immediately)
 13409  func (s *UintptrMap) findNode(key uintptr, preds *[maxLevel]*uintptrNode, succs *[maxLevel]*uintptrNode) *uintptrNode {
 13410  	x := s.header
 13411  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 13412  		succ := x.atomicLoadNext(i)
 13413  		for succ != nil && succ.lessthan(key) {
 13414  			x = succ
 13415  			succ = x.atomicLoadNext(i)
 13416  		}
 13417  		preds[i] = x
 13418  		succs[i] = succ
 13419  
 13420  		// Check if the key already in the skipmap.
 13421  		if succ != nil && succ.equal(key) {
 13422  			return succ
 13423  		}
 13424  	}
 13425  	return nil
 13426  }
 13427  
 13428  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 13429  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 13430  func (s *UintptrMap) findNodeDelete(key uintptr, preds *[maxLevel]*uintptrNode, succs *[maxLevel]*uintptrNode) int {
 13431  	// lFound represents the index of the first layer at which it found a node.
 13432  	lFound, x := -1, s.header
 13433  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 13434  		succ := x.atomicLoadNext(i)
 13435  		for succ != nil && succ.lessthan(key) {
 13436  			x = succ
 13437  			succ = x.atomicLoadNext(i)
 13438  		}
 13439  		preds[i] = x
 13440  		succs[i] = succ
 13441  
 13442  		// Check if the key already in the skip list.
 13443  		if lFound == -1 && succ != nil && succ.equal(key) {
 13444  			lFound = i
 13445  		}
 13446  	}
 13447  	return lFound
 13448  }
 13449  
 13450  func unlockUintptr(preds [maxLevel]*uintptrNode, highestLevel int) {
 13451  	var prevPred *uintptrNode
 13452  	for i := highestLevel; i >= 0; i-- {
 13453  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 13454  			preds[i].mu.Unlock()
 13455  			prevPred = preds[i]
 13456  		}
 13457  	}
 13458  }
 13459  
 13460  // Store sets the value for a key.
 13461  func (s *UintptrMap) Store(key uintptr, value interface{}) {
 13462  	level := s.randomlevel()
 13463  	var preds, succs [maxLevel]*uintptrNode
 13464  	for {
 13465  		nodeFound := s.findNode(key, &preds, &succs)
 13466  		if nodeFound != nil { // indicating the key is already in the skip-list
 13467  			if !nodeFound.flags.Get(marked) {
 13468  				// We don't need to care about whether or not the node is fully linked,
 13469  				// just replace the value.
 13470  				nodeFound.storeVal(value)
 13471  				return
 13472  			}
 13473  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 13474  			// we need to add this node in next loop.
 13475  			continue
 13476  		}
 13477  
 13478  		// Add this node into skip list.
 13479  		var (
 13480  			highestLocked        = -1 // the highest level being locked by this process
 13481  			valid                = true
 13482  			pred, succ, prevPred *uintptrNode
 13483  		)
 13484  		for layer := 0; valid && layer < level; layer++ {
 13485  			pred = preds[layer]   // target node's previous node
 13486  			succ = succs[layer]   // target node's next node
 13487  			if pred != prevPred { // the node in this layer could be locked by previous loop
 13488  				pred.mu.Lock()
 13489  				highestLocked = layer
 13490  				prevPred = pred
 13491  			}
 13492  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 13493  			// It is valid if:
 13494  			// 1. The previous node and next node both are not marked.
 13495  			// 2. The previous node's next node is succ in this layer.
 13496  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 13497  		}
 13498  		if !valid {
 13499  			unlockUintptr(preds, highestLocked)
 13500  			continue
 13501  		}
 13502  
 13503  		nn := newUintptrNode(key, value, level)
 13504  		for layer := 0; layer < level; layer++ {
 13505  			nn.storeNext(layer, succs[layer])
 13506  			preds[layer].atomicStoreNext(layer, nn)
 13507  		}
 13508  		nn.flags.SetTrue(fullyLinked)
 13509  		unlockUintptr(preds, highestLocked)
 13510  		atomic.AddInt64(&s.length, 1)
 13511  		return
 13512  	}
 13513  }
 13514  
 13515  func (s *UintptrMap) randomlevel() int {
 13516  	// Generate random level.
 13517  	level := randomLevel()
 13518  	// Update highest level if possible.
 13519  	for {
 13520  		hl := atomic.LoadInt64(&s.highestLevel)
 13521  		if int64(level) <= hl {
 13522  			break
 13523  		}
 13524  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 13525  			break
 13526  		}
 13527  	}
 13528  	return level
 13529  }
 13530  
 13531  // Load returns the value stored in the map for a key, or nil if no
 13532  // value is present.
 13533  // The ok result indicates whether value was found in the map.
 13534  func (s *UintptrMap) Load(key uintptr) (value interface{}, ok bool) {
 13535  	x := s.header
 13536  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 13537  		nex := x.atomicLoadNext(i)
 13538  		for nex != nil && nex.lessthan(key) {
 13539  			x = nex
 13540  			nex = x.atomicLoadNext(i)
 13541  		}
 13542  
 13543  		// Check if the key already in the skip list.
 13544  		if nex != nil && nex.equal(key) {
 13545  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 13546  				return nex.loadVal(), true
 13547  			}
 13548  			return nil, false
 13549  		}
 13550  	}
 13551  	return nil, false
 13552  }
 13553  
 13554  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 13555  // The loaded result reports whether the key was present.
 13556  // (Modified from Delete)
 13557  func (s *UintptrMap) LoadAndDelete(key uintptr) (value interface{}, loaded bool) {
 13558  	var (
 13559  		nodeToDelete *uintptrNode
 13560  		isMarked     bool // represents if this operation mark the node
 13561  		topLayer     = -1
 13562  		preds, succs [maxLevel]*uintptrNode
 13563  	)
 13564  	for {
 13565  		lFound := s.findNodeDelete(key, &preds, &succs)
 13566  		if isMarked || // this process mark this node or we can find this node in the skip list
 13567  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 13568  			if !isMarked { // we don't mark this node for now
 13569  				nodeToDelete = succs[lFound]
 13570  				topLayer = lFound
 13571  				nodeToDelete.mu.Lock()
 13572  				if nodeToDelete.flags.Get(marked) {
 13573  					// The node is marked by another process,
 13574  					// the physical deletion will be accomplished by another process.
 13575  					nodeToDelete.mu.Unlock()
 13576  					return nil, false
 13577  				}
 13578  				nodeToDelete.flags.SetTrue(marked)
 13579  				isMarked = true
 13580  			}
 13581  			// Accomplish the physical deletion.
 13582  			var (
 13583  				highestLocked        = -1 // the highest level being locked by this process
 13584  				valid                = true
 13585  				pred, succ, prevPred *uintptrNode
 13586  			)
 13587  			for layer := 0; valid && (layer <= topLayer); layer++ {
 13588  				pred, succ = preds[layer], succs[layer]
 13589  				if pred != prevPred { // the node in this layer could be locked by previous loop
 13590  					pred.mu.Lock()
 13591  					highestLocked = layer
 13592  					prevPred = pred
 13593  				}
 13594  				// valid check if there is another node has inserted into the skip list in this layer
 13595  				// during this process, or the previous is deleted by another process.
 13596  				// It is valid if:
 13597  				// 1. the previous node exists.
 13598  				// 2. no another node has inserted into the skip list in this layer.
 13599  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 13600  			}
 13601  			if !valid {
 13602  				unlockUintptr(preds, highestLocked)
 13603  				continue
 13604  			}
 13605  			for i := topLayer; i >= 0; i-- {
 13606  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 13607  				// So we don't need `nodeToDelete.loadNext`
 13608  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 13609  			}
 13610  			nodeToDelete.mu.Unlock()
 13611  			unlockUintptr(preds, highestLocked)
 13612  			atomic.AddInt64(&s.length, -1)
 13613  			return nodeToDelete.loadVal(), true
 13614  		}
 13615  		return nil, false
 13616  	}
 13617  }
 13618  
 13619  // LoadOrStore returns the existing value for the key if present.
 13620  // Otherwise, it stores and returns the given value.
 13621  // The loaded result is true if the value was loaded, false if stored.
 13622  // (Modified from Store)
 13623  func (s *UintptrMap) LoadOrStore(key uintptr, value interface{}) (actual interface{}, loaded bool) {
 13624  	var (
 13625  		level        int
 13626  		preds, succs [maxLevel]*uintptrNode
 13627  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 13628  	)
 13629  	for {
 13630  		nodeFound := s.findNode(key, &preds, &succs)
 13631  		if nodeFound != nil { // indicating the key is already in the skip-list
 13632  			if !nodeFound.flags.Get(marked) {
 13633  				// We don't need to care about whether or not the node is fully linked,
 13634  				// just return the value.
 13635  				return nodeFound.loadVal(), true
 13636  			}
 13637  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 13638  			// we need to add this node in next loop.
 13639  			continue
 13640  		}
 13641  
 13642  		// Add this node into skip list.
 13643  		var (
 13644  			highestLocked        = -1 // the highest level being locked by this process
 13645  			valid                = true
 13646  			pred, succ, prevPred *uintptrNode
 13647  		)
 13648  		if level == 0 {
 13649  			level = s.randomlevel()
 13650  			if level > hl {
 13651  				// If the highest level is updated, usually means that many goroutines
 13652  				// are inserting items. Hopefully we can find a better path in next loop.
 13653  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 13654  				// but this strategy's performance is almost the same as the existing method.
 13655  				continue
 13656  			}
 13657  		}
 13658  		for layer := 0; valid && layer < level; layer++ {
 13659  			pred = preds[layer]   // target node's previous node
 13660  			succ = succs[layer]   // target node's next node
 13661  			if pred != prevPred { // the node in this layer could be locked by previous loop
 13662  				pred.mu.Lock()
 13663  				highestLocked = layer
 13664  				prevPred = pred
 13665  			}
 13666  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 13667  			// It is valid if:
 13668  			// 1. The previous node and next node both are not marked.
 13669  			// 2. The previous node's next node is succ in this layer.
 13670  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 13671  		}
 13672  		if !valid {
 13673  			unlockUintptr(preds, highestLocked)
 13674  			continue
 13675  		}
 13676  
 13677  		nn := newUintptrNode(key, value, level)
 13678  		for layer := 0; layer < level; layer++ {
 13679  			nn.storeNext(layer, succs[layer])
 13680  			preds[layer].atomicStoreNext(layer, nn)
 13681  		}
 13682  		nn.flags.SetTrue(fullyLinked)
 13683  		unlockUintptr(preds, highestLocked)
 13684  		atomic.AddInt64(&s.length, 1)
 13685  		return value, false
 13686  	}
 13687  }
 13688  
 13689  // LoadOrStoreLazy returns the existing value for the key if present.
 13690  // Otherwise, it stores and returns the given value from f, f will only be called once.
 13691  // The loaded result is true if the value was loaded, false if stored.
 13692  // (Modified from LoadOrStore)
 13693  func (s *UintptrMap) LoadOrStoreLazy(key uintptr, f func() interface{}) (actual interface{}, loaded bool) {
 13694  	var (
 13695  		level        int
 13696  		preds, succs [maxLevel]*uintptrNode
 13697  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 13698  	)
 13699  	for {
 13700  		nodeFound := s.findNode(key, &preds, &succs)
 13701  		if nodeFound != nil { // indicating the key is already in the skip-list
 13702  			if !nodeFound.flags.Get(marked) {
 13703  				// We don't need to care about whether or not the node is fully linked,
 13704  				// just return the value.
 13705  				return nodeFound.loadVal(), true
 13706  			}
 13707  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 13708  			// we need to add this node in next loop.
 13709  			continue
 13710  		}
 13711  
 13712  		// Add this node into skip list.
 13713  		var (
 13714  			highestLocked        = -1 // the highest level being locked by this process
 13715  			valid                = true
 13716  			pred, succ, prevPred *uintptrNode
 13717  		)
 13718  		if level == 0 {
 13719  			level = s.randomlevel()
 13720  			if level > hl {
 13721  				// If the highest level is updated, usually means that many goroutines
 13722  				// are inserting items. Hopefully we can find a better path in next loop.
 13723  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 13724  				// but this strategy's performance is almost the same as the existing method.
 13725  				continue
 13726  			}
 13727  		}
 13728  		for layer := 0; valid && layer < level; layer++ {
 13729  			pred = preds[layer]   // target node's previous node
 13730  			succ = succs[layer]   // target node's next node
 13731  			if pred != prevPred { // the node in this layer could be locked by previous loop
 13732  				pred.mu.Lock()
 13733  				highestLocked = layer
 13734  				prevPred = pred
 13735  			}
 13736  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 13737  			// It is valid if:
 13738  			// 1. The previous node and next node both are not marked.
 13739  			// 2. The previous node's next node is succ in this layer.
 13740  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 13741  		}
 13742  		if !valid {
 13743  			unlockUintptr(preds, highestLocked)
 13744  			continue
 13745  		}
 13746  		value := f()
 13747  		nn := newUintptrNode(key, value, level)
 13748  		for layer := 0; layer < level; layer++ {
 13749  			nn.storeNext(layer, succs[layer])
 13750  			preds[layer].atomicStoreNext(layer, nn)
 13751  		}
 13752  		nn.flags.SetTrue(fullyLinked)
 13753  		unlockUintptr(preds, highestLocked)
 13754  		atomic.AddInt64(&s.length, 1)
 13755  		return value, false
 13756  	}
 13757  }
 13758  
 13759  // Delete deletes the value for a key.
 13760  func (s *UintptrMap) Delete(key uintptr) bool {
 13761  	var (
 13762  		nodeToDelete *uintptrNode
 13763  		isMarked     bool // represents if this operation mark the node
 13764  		topLayer     = -1
 13765  		preds, succs [maxLevel]*uintptrNode
 13766  	)
 13767  	for {
 13768  		lFound := s.findNodeDelete(key, &preds, &succs)
 13769  		if isMarked || // this process mark this node or we can find this node in the skip list
 13770  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 13771  			if !isMarked { // we don't mark this node for now
 13772  				nodeToDelete = succs[lFound]
 13773  				topLayer = lFound
 13774  				nodeToDelete.mu.Lock()
 13775  				if nodeToDelete.flags.Get(marked) {
 13776  					// The node is marked by another process,
 13777  					// the physical deletion will be accomplished by another process.
 13778  					nodeToDelete.mu.Unlock()
 13779  					return false
 13780  				}
 13781  				nodeToDelete.flags.SetTrue(marked)
 13782  				isMarked = true
 13783  			}
 13784  			// Accomplish the physical deletion.
 13785  			var (
 13786  				highestLocked        = -1 // the highest level being locked by this process
 13787  				valid                = true
 13788  				pred, succ, prevPred *uintptrNode
 13789  			)
 13790  			for layer := 0; valid && (layer <= topLayer); layer++ {
 13791  				pred, succ = preds[layer], succs[layer]
 13792  				if pred != prevPred { // the node in this layer could be locked by previous loop
 13793  					pred.mu.Lock()
 13794  					highestLocked = layer
 13795  					prevPred = pred
 13796  				}
 13797  				// valid check if there is another node has inserted into the skip list in this layer
 13798  				// during this process, or the previous is deleted by another process.
 13799  				// It is valid if:
 13800  				// 1. the previous node exists.
 13801  				// 2. no another node has inserted into the skip list in this layer.
 13802  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 13803  			}
 13804  			if !valid {
 13805  				unlockUintptr(preds, highestLocked)
 13806  				continue
 13807  			}
 13808  			for i := topLayer; i >= 0; i-- {
 13809  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 13810  				// So we don't need `nodeToDelete.loadNext`
 13811  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 13812  			}
 13813  			nodeToDelete.mu.Unlock()
 13814  			unlockUintptr(preds, highestLocked)
 13815  			atomic.AddInt64(&s.length, -1)
 13816  			return true
 13817  		}
 13818  		return false
 13819  	}
 13820  }
 13821  
 13822  // Range calls f sequentially for each key and value present in the skipmap.
 13823  // If f returns false, range stops the iteration.
 13824  //
 13825  // Range does not necessarily correspond to any consistent snapshot of the Map's
 13826  // contents: no key will be visited more than once, but if the value for any key
 13827  // is stored or deleted concurrently, Range may reflect any mapping for that key
 13828  // from any point during the Range call.
 13829  func (s *UintptrMap) Range(f func(key uintptr, value interface{}) bool) {
 13830  	x := s.header.atomicLoadNext(0)
 13831  	for x != nil {
 13832  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 13833  			x = x.atomicLoadNext(0)
 13834  			continue
 13835  		}
 13836  		if !f(x.key, x.loadVal()) {
 13837  			break
 13838  		}
 13839  		x = x.atomicLoadNext(0)
 13840  	}
 13841  }
 13842  
 13843  // Len return the length of this skipmap.
 13844  func (s *UintptrMap) Len() int {
 13845  	return int(atomic.LoadInt64(&s.length))
 13846  }
 13847  
 13848  // UintptrMapDesc represents a map based on skip list in descending order.
 13849  type UintptrMapDesc struct {
 13850  	header       *uintptrNodeDesc
 13851  	length       int64
 13852  	highestLevel int64 // highest level for now
 13853  }
 13854  
 13855  type uintptrNodeDesc struct {
 13856  	key   uintptr
 13857  	value unsafe.Pointer // *interface{}
 13858  	next  optionalArray  // [level]*uintptrNodeDesc
 13859  	mu    sync.Mutex
 13860  	flags bitflag
 13861  	level uint32
 13862  }
 13863  
 13864  func newUintptrNodeDesc(key uintptr, value interface{}, level int) *uintptrNodeDesc {
 13865  	node := &uintptrNodeDesc{
 13866  		key:   key,
 13867  		level: uint32(level),
 13868  	}
 13869  	node.storeVal(value)
 13870  	if level > op1 {
 13871  		node.next.extra = new([op2]unsafe.Pointer)
 13872  	}
 13873  	return node
 13874  }
 13875  
 13876  func (n *uintptrNodeDesc) storeVal(value interface{}) {
 13877  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 13878  }
 13879  
 13880  func (n *uintptrNodeDesc) loadVal() interface{} {
 13881  	return *(*interface{})(atomic.LoadPointer(&n.value))
 13882  }
 13883  
 13884  func (n *uintptrNodeDesc) loadNext(i int) *uintptrNodeDesc {
 13885  	return (*uintptrNodeDesc)(n.next.load(i))
 13886  }
 13887  
 13888  func (n *uintptrNodeDesc) storeNext(i int, node *uintptrNodeDesc) {
 13889  	n.next.store(i, unsafe.Pointer(node))
 13890  }
 13891  
 13892  func (n *uintptrNodeDesc) atomicLoadNext(i int) *uintptrNodeDesc {
 13893  	return (*uintptrNodeDesc)(n.next.atomicLoad(i))
 13894  }
 13895  
 13896  func (n *uintptrNodeDesc) atomicStoreNext(i int, node *uintptrNodeDesc) {
 13897  	n.next.atomicStore(i, unsafe.Pointer(node))
 13898  }
 13899  
 13900  func (n *uintptrNodeDesc) lessthan(key uintptr) bool {
 13901  	return n.key > key
 13902  }
 13903  
 13904  func (n *uintptrNodeDesc) equal(key uintptr) bool {
 13905  	return n.key == key
 13906  }
 13907  
 13908  // NewUintptrDesc return an empty uintptr skipmap.
 13909  func NewUintptrDesc() *UintptrMapDesc {
 13910  	h := newUintptrNodeDesc(0, "", maxLevel)
 13911  	h.flags.SetTrue(fullyLinked)
 13912  	return &UintptrMapDesc{
 13913  		header:       h,
 13914  		highestLevel: defaultHighestLevel,
 13915  	}
 13916  }
 13917  
 13918  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 13919  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 13920  // (without fullpath, if find the node will return immediately)
 13921  func (s *UintptrMapDesc) findNode(key uintptr, preds *[maxLevel]*uintptrNodeDesc, succs *[maxLevel]*uintptrNodeDesc) *uintptrNodeDesc {
 13922  	x := s.header
 13923  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 13924  		succ := x.atomicLoadNext(i)
 13925  		for succ != nil && succ.lessthan(key) {
 13926  			x = succ
 13927  			succ = x.atomicLoadNext(i)
 13928  		}
 13929  		preds[i] = x
 13930  		succs[i] = succ
 13931  
 13932  		// Check if the key already in the skipmap.
 13933  		if succ != nil && succ.equal(key) {
 13934  			return succ
 13935  		}
 13936  	}
 13937  	return nil
 13938  }
 13939  
 13940  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 13941  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 13942  func (s *UintptrMapDesc) findNodeDelete(key uintptr, preds *[maxLevel]*uintptrNodeDesc, succs *[maxLevel]*uintptrNodeDesc) int {
 13943  	// lFound represents the index of the first layer at which it found a node.
 13944  	lFound, x := -1, s.header
 13945  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 13946  		succ := x.atomicLoadNext(i)
 13947  		for succ != nil && succ.lessthan(key) {
 13948  			x = succ
 13949  			succ = x.atomicLoadNext(i)
 13950  		}
 13951  		preds[i] = x
 13952  		succs[i] = succ
 13953  
 13954  		// Check if the key already in the skip list.
 13955  		if lFound == -1 && succ != nil && succ.equal(key) {
 13956  			lFound = i
 13957  		}
 13958  	}
 13959  	return lFound
 13960  }
 13961  
 13962  func unlockUintptrDesc(preds [maxLevel]*uintptrNodeDesc, highestLevel int) {
 13963  	var prevPred *uintptrNodeDesc
 13964  	for i := highestLevel; i >= 0; i-- {
 13965  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 13966  			preds[i].mu.Unlock()
 13967  			prevPred = preds[i]
 13968  		}
 13969  	}
 13970  }
 13971  
 13972  // Store sets the value for a key.
 13973  func (s *UintptrMapDesc) Store(key uintptr, value interface{}) {
 13974  	level := s.randomlevel()
 13975  	var preds, succs [maxLevel]*uintptrNodeDesc
 13976  	for {
 13977  		nodeFound := s.findNode(key, &preds, &succs)
 13978  		if nodeFound != nil { // indicating the key is already in the skip-list
 13979  			if !nodeFound.flags.Get(marked) {
 13980  				// We don't need to care about whether or not the node is fully linked,
 13981  				// just replace the value.
 13982  				nodeFound.storeVal(value)
 13983  				return
 13984  			}
 13985  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 13986  			// we need to add this node in next loop.
 13987  			continue
 13988  		}
 13989  
 13990  		// Add this node into skip list.
 13991  		var (
 13992  			highestLocked        = -1 // the highest level being locked by this process
 13993  			valid                = true
 13994  			pred, succ, prevPred *uintptrNodeDesc
 13995  		)
 13996  		for layer := 0; valid && layer < level; layer++ {
 13997  			pred = preds[layer]   // target node's previous node
 13998  			succ = succs[layer]   // target node's next node
 13999  			if pred != prevPred { // the node in this layer could be locked by previous loop
 14000  				pred.mu.Lock()
 14001  				highestLocked = layer
 14002  				prevPred = pred
 14003  			}
 14004  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 14005  			// It is valid if:
 14006  			// 1. The previous node and next node both are not marked.
 14007  			// 2. The previous node's next node is succ in this layer.
 14008  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 14009  		}
 14010  		if !valid {
 14011  			unlockUintptrDesc(preds, highestLocked)
 14012  			continue
 14013  		}
 14014  
 14015  		nn := newUintptrNodeDesc(key, value, level)
 14016  		for layer := 0; layer < level; layer++ {
 14017  			nn.storeNext(layer, succs[layer])
 14018  			preds[layer].atomicStoreNext(layer, nn)
 14019  		}
 14020  		nn.flags.SetTrue(fullyLinked)
 14021  		unlockUintptrDesc(preds, highestLocked)
 14022  		atomic.AddInt64(&s.length, 1)
 14023  		return
 14024  	}
 14025  }
 14026  
 14027  func (s *UintptrMapDesc) randomlevel() int {
 14028  	// Generate random level.
 14029  	level := randomLevel()
 14030  	// Update highest level if possible.
 14031  	for {
 14032  		hl := atomic.LoadInt64(&s.highestLevel)
 14033  		if int64(level) <= hl {
 14034  			break
 14035  		}
 14036  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 14037  			break
 14038  		}
 14039  	}
 14040  	return level
 14041  }
 14042  
 14043  // Load returns the value stored in the map for a key, or nil if no
 14044  // value is present.
 14045  // The ok result indicates whether value was found in the map.
 14046  func (s *UintptrMapDesc) Load(key uintptr) (value interface{}, ok bool) {
 14047  	x := s.header
 14048  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 14049  		nex := x.atomicLoadNext(i)
 14050  		for nex != nil && nex.lessthan(key) {
 14051  			x = nex
 14052  			nex = x.atomicLoadNext(i)
 14053  		}
 14054  
 14055  		// Check if the key already in the skip list.
 14056  		if nex != nil && nex.equal(key) {
 14057  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 14058  				return nex.loadVal(), true
 14059  			}
 14060  			return nil, false
 14061  		}
 14062  	}
 14063  	return nil, false
 14064  }
 14065  
 14066  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 14067  // The loaded result reports whether the key was present.
 14068  // (Modified from Delete)
 14069  func (s *UintptrMapDesc) LoadAndDelete(key uintptr) (value interface{}, loaded bool) {
 14070  	var (
 14071  		nodeToDelete *uintptrNodeDesc
 14072  		isMarked     bool // represents if this operation mark the node
 14073  		topLayer     = -1
 14074  		preds, succs [maxLevel]*uintptrNodeDesc
 14075  	)
 14076  	for {
 14077  		lFound := s.findNodeDelete(key, &preds, &succs)
 14078  		if isMarked || // this process mark this node or we can find this node in the skip list
 14079  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 14080  			if !isMarked { // we don't mark this node for now
 14081  				nodeToDelete = succs[lFound]
 14082  				topLayer = lFound
 14083  				nodeToDelete.mu.Lock()
 14084  				if nodeToDelete.flags.Get(marked) {
 14085  					// The node is marked by another process,
 14086  					// the physical deletion will be accomplished by another process.
 14087  					nodeToDelete.mu.Unlock()
 14088  					return nil, false
 14089  				}
 14090  				nodeToDelete.flags.SetTrue(marked)
 14091  				isMarked = true
 14092  			}
 14093  			// Accomplish the physical deletion.
 14094  			var (
 14095  				highestLocked        = -1 // the highest level being locked by this process
 14096  				valid                = true
 14097  				pred, succ, prevPred *uintptrNodeDesc
 14098  			)
 14099  			for layer := 0; valid && (layer <= topLayer); layer++ {
 14100  				pred, succ = preds[layer], succs[layer]
 14101  				if pred != prevPred { // the node in this layer could be locked by previous loop
 14102  					pred.mu.Lock()
 14103  					highestLocked = layer
 14104  					prevPred = pred
 14105  				}
 14106  				// valid check if there is another node has inserted into the skip list in this layer
 14107  				// during this process, or the previous is deleted by another process.
 14108  				// It is valid if:
 14109  				// 1. the previous node exists.
 14110  				// 2. no another node has inserted into the skip list in this layer.
 14111  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 14112  			}
 14113  			if !valid {
 14114  				unlockUintptrDesc(preds, highestLocked)
 14115  				continue
 14116  			}
 14117  			for i := topLayer; i >= 0; i-- {
 14118  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 14119  				// So we don't need `nodeToDelete.loadNext`
 14120  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 14121  			}
 14122  			nodeToDelete.mu.Unlock()
 14123  			unlockUintptrDesc(preds, highestLocked)
 14124  			atomic.AddInt64(&s.length, -1)
 14125  			return nodeToDelete.loadVal(), true
 14126  		}
 14127  		return nil, false
 14128  	}
 14129  }
 14130  
 14131  // LoadOrStore returns the existing value for the key if present.
 14132  // Otherwise, it stores and returns the given value.
 14133  // The loaded result is true if the value was loaded, false if stored.
 14134  // (Modified from Store)
 14135  func (s *UintptrMapDesc) LoadOrStore(key uintptr, value interface{}) (actual interface{}, loaded bool) {
 14136  	var (
 14137  		level        int
 14138  		preds, succs [maxLevel]*uintptrNodeDesc
 14139  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 14140  	)
 14141  	for {
 14142  		nodeFound := s.findNode(key, &preds, &succs)
 14143  		if nodeFound != nil { // indicating the key is already in the skip-list
 14144  			if !nodeFound.flags.Get(marked) {
 14145  				// We don't need to care about whether or not the node is fully linked,
 14146  				// just return the value.
 14147  				return nodeFound.loadVal(), true
 14148  			}
 14149  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 14150  			// we need to add this node in next loop.
 14151  			continue
 14152  		}
 14153  
 14154  		// Add this node into skip list.
 14155  		var (
 14156  			highestLocked        = -1 // the highest level being locked by this process
 14157  			valid                = true
 14158  			pred, succ, prevPred *uintptrNodeDesc
 14159  		)
 14160  		if level == 0 {
 14161  			level = s.randomlevel()
 14162  			if level > hl {
 14163  				// If the highest level is updated, usually means that many goroutines
 14164  				// are inserting items. Hopefully we can find a better path in next loop.
 14165  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 14166  				// but this strategy's performance is almost the same as the existing method.
 14167  				continue
 14168  			}
 14169  		}
 14170  		for layer := 0; valid && layer < level; layer++ {
 14171  			pred = preds[layer]   // target node's previous node
 14172  			succ = succs[layer]   // target node's next node
 14173  			if pred != prevPred { // the node in this layer could be locked by previous loop
 14174  				pred.mu.Lock()
 14175  				highestLocked = layer
 14176  				prevPred = pred
 14177  			}
 14178  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 14179  			// It is valid if:
 14180  			// 1. The previous node and next node both are not marked.
 14181  			// 2. The previous node's next node is succ in this layer.
 14182  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 14183  		}
 14184  		if !valid {
 14185  			unlockUintptrDesc(preds, highestLocked)
 14186  			continue
 14187  		}
 14188  
 14189  		nn := newUintptrNodeDesc(key, value, level)
 14190  		for layer := 0; layer < level; layer++ {
 14191  			nn.storeNext(layer, succs[layer])
 14192  			preds[layer].atomicStoreNext(layer, nn)
 14193  		}
 14194  		nn.flags.SetTrue(fullyLinked)
 14195  		unlockUintptrDesc(preds, highestLocked)
 14196  		atomic.AddInt64(&s.length, 1)
 14197  		return value, false
 14198  	}
 14199  }
 14200  
 14201  // LoadOrStoreLazy returns the existing value for the key if present.
 14202  // Otherwise, it stores and returns the given value from f, f will only be called once.
 14203  // The loaded result is true if the value was loaded, false if stored.
 14204  // (Modified from LoadOrStore)
 14205  func (s *UintptrMapDesc) LoadOrStoreLazy(key uintptr, f func() interface{}) (actual interface{}, loaded bool) {
 14206  	var (
 14207  		level        int
 14208  		preds, succs [maxLevel]*uintptrNodeDesc
 14209  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 14210  	)
 14211  	for {
 14212  		nodeFound := s.findNode(key, &preds, &succs)
 14213  		if nodeFound != nil { // indicating the key is already in the skip-list
 14214  			if !nodeFound.flags.Get(marked) {
 14215  				// We don't need to care about whether or not the node is fully linked,
 14216  				// just return the value.
 14217  				return nodeFound.loadVal(), true
 14218  			}
 14219  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 14220  			// we need to add this node in next loop.
 14221  			continue
 14222  		}
 14223  
 14224  		// Add this node into skip list.
 14225  		var (
 14226  			highestLocked        = -1 // the highest level being locked by this process
 14227  			valid                = true
 14228  			pred, succ, prevPred *uintptrNodeDesc
 14229  		)
 14230  		if level == 0 {
 14231  			level = s.randomlevel()
 14232  			if level > hl {
 14233  				// If the highest level is updated, usually means that many goroutines
 14234  				// are inserting items. Hopefully we can find a better path in next loop.
 14235  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 14236  				// but this strategy's performance is almost the same as the existing method.
 14237  				continue
 14238  			}
 14239  		}
 14240  		for layer := 0; valid && layer < level; layer++ {
 14241  			pred = preds[layer]   // target node's previous node
 14242  			succ = succs[layer]   // target node's next node
 14243  			if pred != prevPred { // the node in this layer could be locked by previous loop
 14244  				pred.mu.Lock()
 14245  				highestLocked = layer
 14246  				prevPred = pred
 14247  			}
 14248  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 14249  			// It is valid if:
 14250  			// 1. The previous node and next node both are not marked.
 14251  			// 2. The previous node's next node is succ in this layer.
 14252  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 14253  		}
 14254  		if !valid {
 14255  			unlockUintptrDesc(preds, highestLocked)
 14256  			continue
 14257  		}
 14258  		value := f()
 14259  		nn := newUintptrNodeDesc(key, value, level)
 14260  		for layer := 0; layer < level; layer++ {
 14261  			nn.storeNext(layer, succs[layer])
 14262  			preds[layer].atomicStoreNext(layer, nn)
 14263  		}
 14264  		nn.flags.SetTrue(fullyLinked)
 14265  		unlockUintptrDesc(preds, highestLocked)
 14266  		atomic.AddInt64(&s.length, 1)
 14267  		return value, false
 14268  	}
 14269  }
 14270  
 14271  // Delete deletes the value for a key.
 14272  func (s *UintptrMapDesc) Delete(key uintptr) bool {
 14273  	var (
 14274  		nodeToDelete *uintptrNodeDesc
 14275  		isMarked     bool // represents if this operation mark the node
 14276  		topLayer     = -1
 14277  		preds, succs [maxLevel]*uintptrNodeDesc
 14278  	)
 14279  	for {
 14280  		lFound := s.findNodeDelete(key, &preds, &succs)
 14281  		if isMarked || // this process mark this node or we can find this node in the skip list
 14282  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 14283  			if !isMarked { // we don't mark this node for now
 14284  				nodeToDelete = succs[lFound]
 14285  				topLayer = lFound
 14286  				nodeToDelete.mu.Lock()
 14287  				if nodeToDelete.flags.Get(marked) {
 14288  					// The node is marked by another process,
 14289  					// the physical deletion will be accomplished by another process.
 14290  					nodeToDelete.mu.Unlock()
 14291  					return false
 14292  				}
 14293  				nodeToDelete.flags.SetTrue(marked)
 14294  				isMarked = true
 14295  			}
 14296  			// Accomplish the physical deletion.
 14297  			var (
 14298  				highestLocked        = -1 // the highest level being locked by this process
 14299  				valid                = true
 14300  				pred, succ, prevPred *uintptrNodeDesc
 14301  			)
 14302  			for layer := 0; valid && (layer <= topLayer); layer++ {
 14303  				pred, succ = preds[layer], succs[layer]
 14304  				if pred != prevPred { // the node in this layer could be locked by previous loop
 14305  					pred.mu.Lock()
 14306  					highestLocked = layer
 14307  					prevPred = pred
 14308  				}
 14309  				// valid check if there is another node has inserted into the skip list in this layer
 14310  				// during this process, or the previous is deleted by another process.
 14311  				// It is valid if:
 14312  				// 1. the previous node exists.
 14313  				// 2. no another node has inserted into the skip list in this layer.
 14314  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 14315  			}
 14316  			if !valid {
 14317  				unlockUintptrDesc(preds, highestLocked)
 14318  				continue
 14319  			}
 14320  			for i := topLayer; i >= 0; i-- {
 14321  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 14322  				// So we don't need `nodeToDelete.loadNext`
 14323  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 14324  			}
 14325  			nodeToDelete.mu.Unlock()
 14326  			unlockUintptrDesc(preds, highestLocked)
 14327  			atomic.AddInt64(&s.length, -1)
 14328  			return true
 14329  		}
 14330  		return false
 14331  	}
 14332  }
 14333  
 14334  // Range calls f sequentially for each key and value present in the skipmap.
 14335  // If f returns false, range stops the iteration.
 14336  //
 14337  // Range does not necessarily correspond to any consistent snapshot of the Map's
 14338  // contents: no key will be visited more than once, but if the value for any key
 14339  // is stored or deleted concurrently, Range may reflect any mapping for that key
 14340  // from any point during the Range call.
 14341  func (s *UintptrMapDesc) Range(f func(key uintptr, value interface{}) bool) {
 14342  	x := s.header.atomicLoadNext(0)
 14343  	for x != nil {
 14344  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 14345  			x = x.atomicLoadNext(0)
 14346  			continue
 14347  		}
 14348  		if !f(x.key, x.loadVal()) {
 14349  			break
 14350  		}
 14351  		x = x.atomicLoadNext(0)
 14352  	}
 14353  }
 14354  
 14355  // Len return the length of this skipmap.
 14356  func (s *UintptrMapDesc) Len() int {
 14357  	return int(atomic.LoadInt64(&s.length))
 14358  }
 14359  
 14360  // StringMap represents a map based on skip list.
 14361  type StringMap struct {
 14362  	header       *stringNode
 14363  	length       int64
 14364  	highestLevel int64 // highest level for now
 14365  }
 14366  
 14367  type stringNode struct {
 14368  	key   string
 14369  	score uint64
 14370  	value unsafe.Pointer // *interface{}
 14371  	next  optionalArray  // [level]*stringNode
 14372  	mu    sync.Mutex
 14373  	flags bitflag
 14374  	level uint32
 14375  }
 14376  
 14377  func newStringNode(key string, value interface{}, level int) *stringNode {
 14378  	node := &stringNode{
 14379  		score: hash(key),
 14380  		key:   key,
 14381  		level: uint32(level),
 14382  	}
 14383  	node.storeVal(value)
 14384  	if level > op1 {
 14385  		node.next.extra = new([op2]unsafe.Pointer)
 14386  	}
 14387  	return node
 14388  }
 14389  
 14390  func (n *stringNode) storeVal(value interface{}) {
 14391  	atomic.StorePointer(&n.value, unsafe.Pointer(&value))
 14392  }
 14393  
 14394  func (n *stringNode) loadVal() interface{} {
 14395  	return *(*interface{})(atomic.LoadPointer(&n.value))
 14396  }
 14397  
 14398  func (n *stringNode) loadNext(i int) *stringNode {
 14399  	return (*stringNode)(n.next.load(i))
 14400  }
 14401  
 14402  func (n *stringNode) storeNext(i int, node *stringNode) {
 14403  	n.next.store(i, unsafe.Pointer(node))
 14404  }
 14405  
 14406  func (n *stringNode) atomicLoadNext(i int) *stringNode {
 14407  	return (*stringNode)(n.next.atomicLoad(i))
 14408  }
 14409  
 14410  func (n *stringNode) atomicStoreNext(i int, node *stringNode) {
 14411  	n.next.atomicStore(i, unsafe.Pointer(node))
 14412  }
 14413  
 14414  // NewString return an empty int64 skipmap.
 14415  func NewString() *StringMap {
 14416  	h := newStringNode("", "", maxLevel)
 14417  	h.flags.SetTrue(fullyLinked)
 14418  	return &StringMap{
 14419  		header:       h,
 14420  		highestLevel: defaultHighestLevel,
 14421  	}
 14422  }
 14423  
 14424  // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
 14425  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 14426  // (without fullpath, if find the node will return immediately)
 14427  func (s *StringMap) findNode(key string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) *stringNode {
 14428  	score := hash(key)
 14429  	x := s.header
 14430  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 14431  		succ := x.atomicLoadNext(i)
 14432  		for succ != nil && succ.cmp(score, key) < 0 {
 14433  			x = succ
 14434  			succ = x.atomicLoadNext(i)
 14435  		}
 14436  		preds[i] = x
 14437  		succs[i] = succ
 14438  
 14439  		// Check if the key already in the skipmap.
 14440  		if succ != nil && succ.cmp(score, key) == 0 {
 14441  			return succ
 14442  		}
 14443  	}
 14444  	return nil
 14445  }
 14446  
 14447  // findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
 14448  // The returned preds and succs always satisfy preds[i] > key >= succs[i].
 14449  func (s *StringMap) findNodeDelete(key string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int {
 14450  	score := hash(key)
 14451  	// lFound represents the index of the first layer at which it found a node.
 14452  	lFound, x := -1, s.header
 14453  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 14454  		succ := x.atomicLoadNext(i)
 14455  		for succ != nil && succ.cmp(score, key) < 0 {
 14456  			x = succ
 14457  			succ = x.atomicLoadNext(i)
 14458  		}
 14459  		preds[i] = x
 14460  		succs[i] = succ
 14461  
 14462  		// Check if the key already in the skip list.
 14463  		if lFound == -1 && succ != nil && succ.cmp(score, key) == 0 {
 14464  			lFound = i
 14465  		}
 14466  	}
 14467  	return lFound
 14468  }
 14469  
 14470  func unlockString(preds [maxLevel]*stringNode, highestLevel int) {
 14471  	var prevPred *stringNode
 14472  	for i := highestLevel; i >= 0; i-- {
 14473  		if preds[i] != prevPred { // the node could be unlocked by previous loop
 14474  			preds[i].mu.Unlock()
 14475  			prevPred = preds[i]
 14476  		}
 14477  	}
 14478  }
 14479  
 14480  // Store sets the value for a key.
 14481  func (s *StringMap) Store(key string, value interface{}) {
 14482  	level := s.randomlevel()
 14483  	var preds, succs [maxLevel]*stringNode
 14484  	for {
 14485  		nodeFound := s.findNode(key, &preds, &succs)
 14486  		if nodeFound != nil { // indicating the key is already in the skip-list
 14487  			if !nodeFound.flags.Get(marked) {
 14488  				// We don't need to care about whether or not the node is fully linked,
 14489  				// just replace the value.
 14490  				nodeFound.storeVal(value)
 14491  				return
 14492  			}
 14493  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 14494  			// we need to add this node in next loop.
 14495  			continue
 14496  		}
 14497  
 14498  		// Add this node into skip list.
 14499  		var (
 14500  			highestLocked        = -1 // the highest level being locked by this process
 14501  			valid                = true
 14502  			pred, succ, prevPred *stringNode
 14503  		)
 14504  		for layer := 0; valid && layer < level; layer++ {
 14505  			pred = preds[layer]   // target node's previous node
 14506  			succ = succs[layer]   // target node's next node
 14507  			if pred != prevPred { // the node in this layer could be locked by previous loop
 14508  				pred.mu.Lock()
 14509  				highestLocked = layer
 14510  				prevPred = pred
 14511  			}
 14512  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 14513  			// It is valid if:
 14514  			// 1. The previous node and next node both are not marked.
 14515  			// 2. The previous node's next node is succ in this layer.
 14516  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 14517  		}
 14518  		if !valid {
 14519  			unlockString(preds, highestLocked)
 14520  			continue
 14521  		}
 14522  
 14523  		nn := newStringNode(key, value, level)
 14524  		for layer := 0; layer < level; layer++ {
 14525  			nn.storeNext(layer, succs[layer])
 14526  			preds[layer].atomicStoreNext(layer, nn)
 14527  		}
 14528  		nn.flags.SetTrue(fullyLinked)
 14529  		unlockString(preds, highestLocked)
 14530  		atomic.AddInt64(&s.length, 1)
 14531  		return
 14532  	}
 14533  }
 14534  
 14535  func (s *StringMap) randomlevel() int {
 14536  	// Generate random level.
 14537  	level := randomLevel()
 14538  	// Update highest level if possible.
 14539  	for {
 14540  		hl := atomic.LoadInt64(&s.highestLevel)
 14541  		if int64(level) <= hl {
 14542  			break
 14543  		}
 14544  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
 14545  			break
 14546  		}
 14547  	}
 14548  	return level
 14549  }
 14550  
 14551  // Load returns the value stored in the map for a key, or nil if no
 14552  // value is present.
 14553  // The ok result indicates whether value was found in the map.
 14554  func (s *StringMap) Load(key string) (value interface{}, ok bool) {
 14555  	score := hash(key)
 14556  	x := s.header
 14557  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
 14558  		nex := x.atomicLoadNext(i)
 14559  		for nex != nil && nex.cmp(score, key) < 0 {
 14560  			x = nex
 14561  			nex = x.atomicLoadNext(i)
 14562  		}
 14563  
 14564  		// Check if the key already in the skip list.
 14565  		if nex != nil && nex.cmp(score, key) == 0 {
 14566  			if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
 14567  				return nex.loadVal(), true
 14568  			}
 14569  			return nil, false
 14570  		}
 14571  	}
 14572  	return nil, false
 14573  }
 14574  
 14575  // LoadAndDelete deletes the value for a key, returning the previous value if any.
 14576  // The loaded result reports whether the key was present.
 14577  // (Modified from Delete)
 14578  func (s *StringMap) LoadAndDelete(key string) (value interface{}, loaded bool) {
 14579  	var (
 14580  		nodeToDelete *stringNode
 14581  		isMarked     bool // represents if this operation mark the node
 14582  		topLayer     = -1
 14583  		preds, succs [maxLevel]*stringNode
 14584  	)
 14585  	for {
 14586  		lFound := s.findNodeDelete(key, &preds, &succs)
 14587  		if isMarked || // this process mark this node or we can find this node in the skip list
 14588  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 14589  			if !isMarked { // we don't mark this node for now
 14590  				nodeToDelete = succs[lFound]
 14591  				topLayer = lFound
 14592  				nodeToDelete.mu.Lock()
 14593  				if nodeToDelete.flags.Get(marked) {
 14594  					// The node is marked by another process,
 14595  					// the physical deletion will be accomplished by another process.
 14596  					nodeToDelete.mu.Unlock()
 14597  					return nil, false
 14598  				}
 14599  				nodeToDelete.flags.SetTrue(marked)
 14600  				isMarked = true
 14601  			}
 14602  			// Accomplish the physical deletion.
 14603  			var (
 14604  				highestLocked        = -1 // the highest level being locked by this process
 14605  				valid                = true
 14606  				pred, succ, prevPred *stringNode
 14607  			)
 14608  			for layer := 0; valid && (layer <= topLayer); layer++ {
 14609  				pred, succ = preds[layer], succs[layer]
 14610  				if pred != prevPred { // the node in this layer could be locked by previous loop
 14611  					pred.mu.Lock()
 14612  					highestLocked = layer
 14613  					prevPred = pred
 14614  				}
 14615  				// valid check if there is another node has inserted into the skip list in this layer
 14616  				// during this process, or the previous is deleted by another process.
 14617  				// It is valid if:
 14618  				// 1. the previous node exists.
 14619  				// 2. no another node has inserted into the skip list in this layer.
 14620  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
 14621  			}
 14622  			if !valid {
 14623  				unlockString(preds, highestLocked)
 14624  				continue
 14625  			}
 14626  			for i := topLayer; i >= 0; i-- {
 14627  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 14628  				// So we don't need `nodeToDelete.loadNext`
 14629  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 14630  			}
 14631  			nodeToDelete.mu.Unlock()
 14632  			unlockString(preds, highestLocked)
 14633  			atomic.AddInt64(&s.length, -1)
 14634  			return nodeToDelete.loadVal(), true
 14635  		}
 14636  		return nil, false
 14637  	}
 14638  }
 14639  
 14640  // LoadOrStore returns the existing value for the key if present.
 14641  // Otherwise, it stores and returns the given value.
 14642  // The loaded result is true if the value was loaded, false if stored.
 14643  // (Modified from Store)
 14644  func (s *StringMap) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
 14645  	var (
 14646  		level        int
 14647  		preds, succs [maxLevel]*stringNode
 14648  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 14649  	)
 14650  	for {
 14651  		nodeFound := s.findNode(key, &preds, &succs)
 14652  		if nodeFound != nil { // indicating the key is already in the skip-list
 14653  			if !nodeFound.flags.Get(marked) {
 14654  				// We don't need to care about whether or not the node is fully linked,
 14655  				// just return the value.
 14656  				return nodeFound.loadVal(), true
 14657  			}
 14658  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 14659  			// we need to add this node in next loop.
 14660  			continue
 14661  		}
 14662  
 14663  		// Add this node into skip list.
 14664  		var (
 14665  			highestLocked        = -1 // the highest level being locked by this process
 14666  			valid                = true
 14667  			pred, succ, prevPred *stringNode
 14668  		)
 14669  		if level == 0 {
 14670  			level = s.randomlevel()
 14671  			if level > hl {
 14672  				// If the highest level is updated, usually means that many goroutines
 14673  				// are inserting items. Hopefully we can find a better path in next loop.
 14674  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 14675  				// but this strategy's performance is almost the same as the existing method.
 14676  				continue
 14677  			}
 14678  		}
 14679  		for layer := 0; valid && layer < level; layer++ {
 14680  			pred = preds[layer]   // target node's previous node
 14681  			succ = succs[layer]   // target node's next node
 14682  			if pred != prevPred { // the node in this layer could be locked by previous loop
 14683  				pred.mu.Lock()
 14684  				highestLocked = layer
 14685  				prevPred = pred
 14686  			}
 14687  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 14688  			// It is valid if:
 14689  			// 1. The previous node and next node both are not marked.
 14690  			// 2. The previous node's next node is succ in this layer.
 14691  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
 14692  		}
 14693  		if !valid {
 14694  			unlockString(preds, highestLocked)
 14695  			continue
 14696  		}
 14697  
 14698  		nn := newStringNode(key, value, level)
 14699  		for layer := 0; layer < level; layer++ {
 14700  			nn.storeNext(layer, succs[layer])
 14701  			preds[layer].atomicStoreNext(layer, nn)
 14702  		}
 14703  		nn.flags.SetTrue(fullyLinked)
 14704  		unlockString(preds, highestLocked)
 14705  		atomic.AddInt64(&s.length, 1)
 14706  		return value, false
 14707  	}
 14708  }
 14709  
 14710  // LoadOrStoreLazy returns the existing value for the key if present.
 14711  // Otherwise, it stores and returns the given value from f, f will only be called once.
 14712  // The loaded result is true if the value was loaded, false if stored.
 14713  // (Modified from LoadOrStore)
 14714  func (s *StringMap) LoadOrStoreLazy(key string, f func() interface{}) (actual interface{}, loaded bool) {
 14715  	var (
 14716  		level        int
 14717  		preds, succs [maxLevel]*stringNode
 14718  		hl           = int(atomic.LoadInt64(&s.highestLevel))
 14719  	)
 14720  	for {
 14721  		nodeFound := s.findNode(key, &preds, &succs)
 14722  		if nodeFound != nil { // indicating the key is already in the skip-list
 14723  			if !nodeFound.flags.Get(marked) {
 14724  				// We don't need to care about whether or not the node is fully linked,
 14725  				// just return the value.
 14726  				return nodeFound.loadVal(), true
 14727  			}
 14728  			// If the node is marked, represents some other goroutines is in the process of deleting this node,
 14729  			// we need to add this node in next loop.
 14730  			continue
 14731  		}
 14732  
 14733  		// Add this node into skip list.
 14734  		var (
 14735  			highestLocked        = -1 // the highest level being locked by this process
 14736  			valid                = true
 14737  			pred, succ, prevPred *stringNode
 14738  		)
 14739  		if level == 0 {
 14740  			level = s.randomlevel()
 14741  			if level > hl {
 14742  				// If the highest level is updated, usually means that many goroutines
 14743  				// are inserting items. Hopefully we can find a better path in next loop.
 14744  				// TODO(zyh): consider filling the preds if s.header[level].next == nil,
 14745  				// but this strategy's performance is almost the same as the existing method.
 14746  				continue
 14747  			}
 14748  		}
 14749  		for layer := 0; valid && layer < level; layer++ {
 14750  			pred = preds[layer]   // target node's previous node
 14751  			succ = succs[layer]   // target node's next node
 14752  			if pred != prevPred { // the node in this layer could be locked by previous loop
 14753  				pred.mu.Lock()
 14754  				highestLocked = layer
 14755  				prevPred = pred
 14756  			}
 14757  			// valid check if there is another node has inserted into the skip list in this layer during this process.
 14758  			// It is valid if:
 14759  			// 1. The previous node and next node both are not marked.
 14760  			// 2. The previous node's next node is succ in this layer.
 14761  			valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
 14762  		}
 14763  		if !valid {
 14764  			unlockString(preds, highestLocked)
 14765  			continue
 14766  		}
 14767  		value := f()
 14768  		nn := newStringNode(key, value, level)
 14769  		for layer := 0; layer < level; layer++ {
 14770  			nn.storeNext(layer, succs[layer])
 14771  			preds[layer].atomicStoreNext(layer, nn)
 14772  		}
 14773  		nn.flags.SetTrue(fullyLinked)
 14774  		unlockString(preds, highestLocked)
 14775  		atomic.AddInt64(&s.length, 1)
 14776  		return value, false
 14777  	}
 14778  }
 14779  
 14780  // Delete deletes the value for a key.
 14781  func (s *StringMap) Delete(key string) bool {
 14782  	var (
 14783  		nodeToDelete *stringNode
 14784  		isMarked     bool // represents if this operation mark the node
 14785  		topLayer     = -1
 14786  		preds, succs [maxLevel]*stringNode
 14787  	)
 14788  	for {
 14789  		lFound := s.findNodeDelete(key, &preds, &succs)
 14790  		if isMarked || // this process mark this node or we can find this node in the skip list
 14791  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
 14792  			if !isMarked { // we don't mark this node for now
 14793  				nodeToDelete = succs[lFound]
 14794  				topLayer = lFound
 14795  				nodeToDelete.mu.Lock()
 14796  				if nodeToDelete.flags.Get(marked) {
 14797  					// The node is marked by another process,
 14798  					// the physical deletion will be accomplished by another process.
 14799  					nodeToDelete.mu.Unlock()
 14800  					return false
 14801  				}
 14802  				nodeToDelete.flags.SetTrue(marked)
 14803  				isMarked = true
 14804  			}
 14805  			// Accomplish the physical deletion.
 14806  			var (
 14807  				highestLocked        = -1 // the highest level being locked by this process
 14808  				valid                = true
 14809  				pred, succ, prevPred *stringNode
 14810  			)
 14811  			for layer := 0; valid && (layer <= topLayer); layer++ {
 14812  				pred, succ = preds[layer], succs[layer]
 14813  				if pred != prevPred { // the node in this layer could be locked by previous loop
 14814  					pred.mu.Lock()
 14815  					highestLocked = layer
 14816  					prevPred = pred
 14817  				}
 14818  				// valid check if there is another node has inserted into the skip list in this layer
 14819  				// during this process, or the previous is deleted by another process.
 14820  				// It is valid if:
 14821  				// 1. the previous node exists.
 14822  				// 2. no another node has inserted into the skip list in this layer.
 14823  				valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
 14824  			}
 14825  			if !valid {
 14826  				unlockString(preds, highestLocked)
 14827  				continue
 14828  			}
 14829  			for i := topLayer; i >= 0; i-- {
 14830  				// Now we own the `nodeToDelete`, no other goroutine will modify it.
 14831  				// So we don't need `nodeToDelete.loadNext`
 14832  				preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
 14833  			}
 14834  			nodeToDelete.mu.Unlock()
 14835  			unlockString(preds, highestLocked)
 14836  			atomic.AddInt64(&s.length, -1)
 14837  			return true
 14838  		}
 14839  		return false
 14840  	}
 14841  }
 14842  
 14843  // Range calls f sequentially for each key and value present in the skipmap.
 14844  // If f returns false, range stops the iteration.
 14845  //
 14846  // Range does not necessarily correspond to any consistent snapshot of the Map's
 14847  // contents: no key will be visited more than once, but if the value for any key
 14848  // is stored or deleted concurrently, Range may reflect any mapping for that key
 14849  // from any point during the Range call.
 14850  func (s *StringMap) Range(f func(key string, value interface{}) bool) {
 14851  	x := s.header.atomicLoadNext(0)
 14852  	for x != nil {
 14853  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
 14854  			x = x.atomicLoadNext(0)
 14855  			continue
 14856  		}
 14857  		if !f(x.key, x.loadVal()) {
 14858  			break
 14859  		}
 14860  		x = x.atomicLoadNext(0)
 14861  	}
 14862  }
 14863  
 14864  // Len return the length of this skipmap.
 14865  func (s *StringMap) Len() int {
 14866  	return int(atomic.LoadInt64(&s.length))
 14867  }
 14868  
 14869  // Return 1 if n is bigger, 0 if equal, else -1.
 14870  func (n *stringNode) cmp(score uint64, key string) int {
 14871  	if n.score > score {
 14872  		return 1
 14873  	} else if n.score == score {
 14874  		return cmpstring(n.key, key)
 14875  	}
 14876  	return -1
 14877  }