github.com/songzhibin97/gkit@v1.2.13/structure/skipset/types.go (about)

     1  // Copyright 2021 ByteDance Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by go run types_gen.go; DO NOT EDIT.
    16  package skipset
    17  
    18  import (
    19  	"sync"
    20  	"sync/atomic"
    21  	"unsafe"
    22  )
    23  
    24  // ByteSet represents a set based on skip list in ascending order.
    25  type ByteSet struct {
    26  	header       *byteNode
    27  	length       int64
    28  	highestLevel int64 // highest level for now
    29  }
    30  
    31  type byteNode struct {
    32  	value byte
    33  	next  optionalArray // [level]*byteNode
    34  	mu    sync.Mutex
    35  	flags bitflag
    36  	level uint32
    37  }
    38  
    39  func newByteNode(value byte, level int) *byteNode {
    40  	node := &byteNode{
    41  		value: value,
    42  		level: uint32(level),
    43  	}
    44  	if level > op1 {
    45  		node.next.extra = new([op2]unsafe.Pointer)
    46  	}
    47  	return node
    48  }
    49  
    50  func (n *byteNode) loadNext(i int) *byteNode {
    51  	return (*byteNode)(n.next.load(i))
    52  }
    53  
    54  func (n *byteNode) storeNext(i int, node *byteNode) {
    55  	n.next.store(i, unsafe.Pointer(node))
    56  }
    57  
    58  func (n *byteNode) atomicLoadNext(i int) *byteNode {
    59  	return (*byteNode)(n.next.atomicLoad(i))
    60  }
    61  
    62  func (n *byteNode) atomicStoreNext(i int, node *byteNode) {
    63  	n.next.atomicStore(i, unsafe.Pointer(node))
    64  }
    65  
    66  func (n *byteNode) lessthan(value byte) bool {
    67  	return n.value < value
    68  }
    69  
    70  func (n *byteNode) equal(value byte) bool {
    71  	return n.value == value
    72  }
    73  
    74  // NewByte return an empty byte skip set in ascending order.
    75  func NewByte() *ByteSet {
    76  	h := newByteNode(0, maxLevel)
    77  	h.flags.SetTrue(fullyLinked)
    78  	return &ByteSet{
    79  		header:       h,
    80  		highestLevel: defaultHighestLevel,
    81  	}
    82  }
    83  
    84  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
    85  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
    86  func (s *ByteSet) findNodeRemove(value byte, preds *[maxLevel]*byteNode, succs *[maxLevel]*byteNode) int {
    87  	// lFound represents the index of the first layer at which it found a node.
    88  	lFound, x := -1, s.header
    89  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
    90  		succ := x.atomicLoadNext(i)
    91  		for succ != nil && succ.lessthan(value) {
    92  			x = succ
    93  			succ = x.atomicLoadNext(i)
    94  		}
    95  		preds[i] = x
    96  		succs[i] = succ
    97  
    98  		// Check if the value already in the skip list.
    99  		if lFound == -1 && succ != nil && succ.equal(value) {
   100  			lFound = i
   101  		}
   102  	}
   103  	return lFound
   104  }
   105  
   106  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   107  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   108  func (s *ByteSet) findNodeAdd(value byte, preds *[maxLevel]*byteNode, succs *[maxLevel]*byteNode) int {
   109  	x := s.header
   110  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   111  		succ := x.atomicLoadNext(i)
   112  		for succ != nil && succ.lessthan(value) {
   113  			x = succ
   114  			succ = x.atomicLoadNext(i)
   115  		}
   116  		preds[i] = x
   117  		succs[i] = succ
   118  
   119  		// Check if the value already in the skip list.
   120  		if succ != nil && succ.equal(value) {
   121  			return i
   122  		}
   123  	}
   124  	return -1
   125  }
   126  
   127  func unlockByte(preds [maxLevel]*byteNode, highestLevel int) {
   128  	var prevPred *byteNode
   129  	for i := highestLevel; i >= 0; i-- {
   130  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   131  			preds[i].mu.Unlock()
   132  			prevPred = preds[i]
   133  		}
   134  	}
   135  }
   136  
   137  // Add add the value into skip set, return true if this process insert the value into skip set,
   138  // return false if this process can't insert this value, because another process has insert the same value.
   139  //
   140  // If the value is in the skip set but not fully linked, this process will wait until it is.
   141  func (s *ByteSet) Add(value byte) bool {
   142  	level := s.randomLevel()
   143  	var preds, succs [maxLevel]*byteNode
   144  	for {
   145  		lFound := s.findNodeAdd(value, &preds, &succs)
   146  		if lFound != -1 { // indicating the value is already in the skip-list
   147  			nodeFound := succs[lFound]
   148  			if !nodeFound.flags.Get(marked) {
   149  				for !nodeFound.flags.Get(fullyLinked) {
   150  					// The node is not yet fully linked, just waits until it is.
   151  				}
   152  				return false
   153  			}
   154  			// If the node is marked, represents some other thread is in the process of deleting this node,
   155  			// we need to add this node in next loop.
   156  			continue
   157  		}
   158  		// Add this node into skip list.
   159  		var (
   160  			highestLocked        = -1 // the highest level being locked by this process
   161  			valid                = true
   162  			pred, succ, prevPred *byteNode
   163  		)
   164  		for layer := 0; valid && layer < level; layer++ {
   165  			pred = preds[layer]   // target node's previous node
   166  			succ = succs[layer]   // target node's next node
   167  			if pred != prevPred { // the node in this layer could be locked by previous loop
   168  				pred.mu.Lock()
   169  				highestLocked = layer
   170  				prevPred = pred
   171  			}
   172  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   173  			// It is valid if:
   174  			// 1. The previous node and next node both are not marked.
   175  			// 2. The previous node's next node is succ in this layer.
   176  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   177  		}
   178  		if !valid {
   179  			unlockByte(preds, highestLocked)
   180  			continue
   181  		}
   182  
   183  		nn := newByteNode(value, level)
   184  		for layer := 0; layer < level; layer++ {
   185  			nn.storeNext(layer, succs[layer])
   186  			preds[layer].atomicStoreNext(layer, nn)
   187  		}
   188  		nn.flags.SetTrue(fullyLinked)
   189  		unlockByte(preds, highestLocked)
   190  		atomic.AddInt64(&s.length, 1)
   191  		return true
   192  	}
   193  }
   194  
   195  func (s *ByteSet) randomLevel() int {
   196  	// Generate random level.
   197  	level := randomLevel()
   198  	// Update highest level if possible.
   199  	for {
   200  		hl := atomic.LoadInt64(&s.highestLevel)
   201  		if int64(level) <= hl {
   202  			break
   203  		}
   204  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   205  			break
   206  		}
   207  	}
   208  	return level
   209  }
   210  
   211  // Contains check if the value is in the skip set.
   212  func (s *ByteSet) Contains(value byte) bool {
   213  	x := s.header
   214  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   215  		nex := x.atomicLoadNext(i)
   216  		for nex != nil && nex.lessthan(value) {
   217  			x = nex
   218  			nex = x.atomicLoadNext(i)
   219  		}
   220  
   221  		// Check if the value already in the skip list.
   222  		if nex != nil && nex.equal(value) {
   223  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
   224  		}
   225  	}
   226  	return false
   227  }
   228  
   229  // Remove a node from the skip set.
   230  func (s *ByteSet) Remove(value byte) bool {
   231  	var (
   232  		nodeToRemove *byteNode
   233  		isMarked     bool // represents if this operation mark the node
   234  		topLayer     = -1
   235  		preds, succs [maxLevel]*byteNode
   236  	)
   237  	for {
   238  		lFound := s.findNodeRemove(value, &preds, &succs)
   239  		if isMarked || // this process mark this node or we can find this node in the skip list
   240  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   241  			if !isMarked { // we don't mark this node for now
   242  				nodeToRemove = succs[lFound]
   243  				topLayer = lFound
   244  				nodeToRemove.mu.Lock()
   245  				if nodeToRemove.flags.Get(marked) {
   246  					// The node is marked by another process,
   247  					// the physical deletion will be accomplished by another process.
   248  					nodeToRemove.mu.Unlock()
   249  					return false
   250  				}
   251  				nodeToRemove.flags.SetTrue(marked)
   252  				isMarked = true
   253  			}
   254  			// Accomplish the physical deletion.
   255  			var (
   256  				highestLocked        = -1 // the highest level being locked by this process
   257  				valid                = true
   258  				pred, succ, prevPred *byteNode
   259  			)
   260  			for layer := 0; valid && (layer <= topLayer); layer++ {
   261  				pred, succ = preds[layer], succs[layer]
   262  				if pred != prevPred { // the node in this layer could be locked by previous loop
   263  					pred.mu.Lock()
   264  					highestLocked = layer
   265  					prevPred = pred
   266  				}
   267  				// valid check if there is another node has inserted into the skip list in this layer
   268  				// during this process, or the previous is removed by another process.
   269  				// It is valid if:
   270  				// 1. the previous node exists.
   271  				// 2. no another node has inserted into the skip list in this layer.
   272  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   273  			}
   274  			if !valid {
   275  				unlockByte(preds, highestLocked)
   276  				continue
   277  			}
   278  			for i := topLayer; i >= 0; i-- {
   279  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
   280  				// So we don't need `nodeToRemove.loadNext`
   281  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
   282  			}
   283  			nodeToRemove.mu.Unlock()
   284  			unlockByte(preds, highestLocked)
   285  			atomic.AddInt64(&s.length, -1)
   286  			return true
   287  		}
   288  		return false
   289  	}
   290  }
   291  
   292  // Range calls f sequentially for each value present in the skip set.
   293  // If f returns false, range stops the iteration.
   294  func (s *ByteSet) Range(f func(value byte) bool) {
   295  	x := s.header.atomicLoadNext(0)
   296  	for x != nil {
   297  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
   298  			x = x.atomicLoadNext(0)
   299  			continue
   300  		}
   301  		if !f(x.value) {
   302  			break
   303  		}
   304  		x = x.atomicLoadNext(0)
   305  	}
   306  }
   307  
   308  // Len return the length of this skip set.
   309  func (s *ByteSet) Len() int {
   310  	return int(atomic.LoadInt64(&s.length))
   311  }
   312  
   313  // ByteSetDesc represents a set based on skip list in descending order.
   314  type ByteSetDesc struct {
   315  	header       *byteNodeDesc
   316  	length       int64
   317  	highestLevel int64 // highest level for now
   318  }
   319  
   320  type byteNodeDesc struct {
   321  	value byte
   322  	next  optionalArray // [level]*byteNodeDesc
   323  	mu    sync.Mutex
   324  	flags bitflag
   325  	level uint32
   326  }
   327  
   328  func newByteNodeDesc(value byte, level int) *byteNodeDesc {
   329  	node := &byteNodeDesc{
   330  		value: value,
   331  		level: uint32(level),
   332  	}
   333  	if level > op1 {
   334  		node.next.extra = new([op2]unsafe.Pointer)
   335  	}
   336  	return node
   337  }
   338  
   339  func (n *byteNodeDesc) loadNext(i int) *byteNodeDesc {
   340  	return (*byteNodeDesc)(n.next.load(i))
   341  }
   342  
   343  func (n *byteNodeDesc) storeNext(i int, node *byteNodeDesc) {
   344  	n.next.store(i, unsafe.Pointer(node))
   345  }
   346  
   347  func (n *byteNodeDesc) atomicLoadNext(i int) *byteNodeDesc {
   348  	return (*byteNodeDesc)(n.next.atomicLoad(i))
   349  }
   350  
   351  func (n *byteNodeDesc) atomicStoreNext(i int, node *byteNodeDesc) {
   352  	n.next.atomicStore(i, unsafe.Pointer(node))
   353  }
   354  
   355  func (n *byteNodeDesc) lessthan(value byte) bool {
   356  	return n.value > value
   357  }
   358  
   359  func (n *byteNodeDesc) equal(value byte) bool {
   360  	return n.value == value
   361  }
   362  
   363  // NewByteDesc return an empty byte skip set in descending order.
   364  func NewByteDesc() *ByteSetDesc {
   365  	h := newByteNodeDesc(0, maxLevel)
   366  	h.flags.SetTrue(fullyLinked)
   367  	return &ByteSetDesc{
   368  		header:       h,
   369  		highestLevel: defaultHighestLevel,
   370  	}
   371  }
   372  
   373  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
   374  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   375  func (s *ByteSetDesc) findNodeRemove(value byte, preds *[maxLevel]*byteNodeDesc, succs *[maxLevel]*byteNodeDesc) int {
   376  	// lFound represents the index of the first layer at which it found a node.
   377  	lFound, x := -1, s.header
   378  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   379  		succ := x.atomicLoadNext(i)
   380  		for succ != nil && succ.lessthan(value) {
   381  			x = succ
   382  			succ = x.atomicLoadNext(i)
   383  		}
   384  		preds[i] = x
   385  		succs[i] = succ
   386  
   387  		// Check if the value already in the skip list.
   388  		if lFound == -1 && succ != nil && succ.equal(value) {
   389  			lFound = i
   390  		}
   391  	}
   392  	return lFound
   393  }
   394  
   395  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   396  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   397  func (s *ByteSetDesc) findNodeAdd(value byte, preds *[maxLevel]*byteNodeDesc, succs *[maxLevel]*byteNodeDesc) int {
   398  	x := s.header
   399  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   400  		succ := x.atomicLoadNext(i)
   401  		for succ != nil && succ.lessthan(value) {
   402  			x = succ
   403  			succ = x.atomicLoadNext(i)
   404  		}
   405  		preds[i] = x
   406  		succs[i] = succ
   407  
   408  		// Check if the value already in the skip list.
   409  		if succ != nil && succ.equal(value) {
   410  			return i
   411  		}
   412  	}
   413  	return -1
   414  }
   415  
   416  func unlockByteDesc(preds [maxLevel]*byteNodeDesc, highestLevel int) {
   417  	var prevPred *byteNodeDesc
   418  	for i := highestLevel; i >= 0; i-- {
   419  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   420  			preds[i].mu.Unlock()
   421  			prevPred = preds[i]
   422  		}
   423  	}
   424  }
   425  
   426  // Add add the value into skip set, return true if this process insert the value into skip set,
   427  // return false if this process can't insert this value, because another process has insert the same value.
   428  //
   429  // If the value is in the skip set but not fully linked, this process will wait until it is.
   430  func (s *ByteSetDesc) Add(value byte) bool {
   431  	level := s.randomLevel()
   432  	var preds, succs [maxLevel]*byteNodeDesc
   433  	for {
   434  		lFound := s.findNodeAdd(value, &preds, &succs)
   435  		if lFound != -1 { // indicating the value is already in the skip-list
   436  			nodeFound := succs[lFound]
   437  			if !nodeFound.flags.Get(marked) {
   438  				for !nodeFound.flags.Get(fullyLinked) {
   439  					// The node is not yet fully linked, just waits until it is.
   440  				}
   441  				return false
   442  			}
   443  			// If the node is marked, represents some other thread is in the process of deleting this node,
   444  			// we need to add this node in next loop.
   445  			continue
   446  		}
   447  		// Add this node into skip list.
   448  		var (
   449  			highestLocked        = -1 // the highest level being locked by this process
   450  			valid                = true
   451  			pred, succ, prevPred *byteNodeDesc
   452  		)
   453  		for layer := 0; valid && layer < level; layer++ {
   454  			pred = preds[layer]   // target node's previous node
   455  			succ = succs[layer]   // target node's next node
   456  			if pred != prevPred { // the node in this layer could be locked by previous loop
   457  				pred.mu.Lock()
   458  				highestLocked = layer
   459  				prevPred = pred
   460  			}
   461  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   462  			// It is valid if:
   463  			// 1. The previous node and next node both are not marked.
   464  			// 2. The previous node's next node is succ in this layer.
   465  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   466  		}
   467  		if !valid {
   468  			unlockByteDesc(preds, highestLocked)
   469  			continue
   470  		}
   471  
   472  		nn := newByteNodeDesc(value, level)
   473  		for layer := 0; layer < level; layer++ {
   474  			nn.storeNext(layer, succs[layer])
   475  			preds[layer].atomicStoreNext(layer, nn)
   476  		}
   477  		nn.flags.SetTrue(fullyLinked)
   478  		unlockByteDesc(preds, highestLocked)
   479  		atomic.AddInt64(&s.length, 1)
   480  		return true
   481  	}
   482  }
   483  
   484  func (s *ByteSetDesc) randomLevel() int {
   485  	// Generate random level.
   486  	level := randomLevel()
   487  	// Update highest level if possible.
   488  	for {
   489  		hl := atomic.LoadInt64(&s.highestLevel)
   490  		if int64(level) <= hl {
   491  			break
   492  		}
   493  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   494  			break
   495  		}
   496  	}
   497  	return level
   498  }
   499  
   500  // Contains check if the value is in the skip set.
   501  func (s *ByteSetDesc) Contains(value byte) bool {
   502  	x := s.header
   503  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   504  		nex := x.atomicLoadNext(i)
   505  		for nex != nil && nex.lessthan(value) {
   506  			x = nex
   507  			nex = x.atomicLoadNext(i)
   508  		}
   509  
   510  		// Check if the value already in the skip list.
   511  		if nex != nil && nex.equal(value) {
   512  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
   513  		}
   514  	}
   515  	return false
   516  }
   517  
   518  // Remove a node from the skip set.
   519  func (s *ByteSetDesc) Remove(value byte) bool {
   520  	var (
   521  		nodeToRemove *byteNodeDesc
   522  		isMarked     bool // represents if this operation mark the node
   523  		topLayer     = -1
   524  		preds, succs [maxLevel]*byteNodeDesc
   525  	)
   526  	for {
   527  		lFound := s.findNodeRemove(value, &preds, &succs)
   528  		if isMarked || // this process mark this node or we can find this node in the skip list
   529  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   530  			if !isMarked { // we don't mark this node for now
   531  				nodeToRemove = succs[lFound]
   532  				topLayer = lFound
   533  				nodeToRemove.mu.Lock()
   534  				if nodeToRemove.flags.Get(marked) {
   535  					// The node is marked by another process,
   536  					// the physical deletion will be accomplished by another process.
   537  					nodeToRemove.mu.Unlock()
   538  					return false
   539  				}
   540  				nodeToRemove.flags.SetTrue(marked)
   541  				isMarked = true
   542  			}
   543  			// Accomplish the physical deletion.
   544  			var (
   545  				highestLocked        = -1 // the highest level being locked by this process
   546  				valid                = true
   547  				pred, succ, prevPred *byteNodeDesc
   548  			)
   549  			for layer := 0; valid && (layer <= topLayer); layer++ {
   550  				pred, succ = preds[layer], succs[layer]
   551  				if pred != prevPred { // the node in this layer could be locked by previous loop
   552  					pred.mu.Lock()
   553  					highestLocked = layer
   554  					prevPred = pred
   555  				}
   556  				// valid check if there is another node has inserted into the skip list in this layer
   557  				// during this process, or the previous is removed by another process.
   558  				// It is valid if:
   559  				// 1. the previous node exists.
   560  				// 2. no another node has inserted into the skip list in this layer.
   561  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   562  			}
   563  			if !valid {
   564  				unlockByteDesc(preds, highestLocked)
   565  				continue
   566  			}
   567  			for i := topLayer; i >= 0; i-- {
   568  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
   569  				// So we don't need `nodeToRemove.loadNext`
   570  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
   571  			}
   572  			nodeToRemove.mu.Unlock()
   573  			unlockByteDesc(preds, highestLocked)
   574  			atomic.AddInt64(&s.length, -1)
   575  			return true
   576  		}
   577  		return false
   578  	}
   579  }
   580  
   581  // Range calls f sequentially for each value present in the skip set.
   582  // If f returns false, range stops the iteration.
   583  func (s *ByteSetDesc) Range(f func(value byte) bool) {
   584  	x := s.header.atomicLoadNext(0)
   585  	for x != nil {
   586  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
   587  			x = x.atomicLoadNext(0)
   588  			continue
   589  		}
   590  		if !f(x.value) {
   591  			break
   592  		}
   593  		x = x.atomicLoadNext(0)
   594  	}
   595  }
   596  
   597  // Len return the length of this skip set.
   598  func (s *ByteSetDesc) Len() int {
   599  	return int(atomic.LoadInt64(&s.length))
   600  }
   601  
   602  // Float32Set represents a set based on skip list in ascending order.
   603  type Float32Set struct {
   604  	header       *float32Node
   605  	length       int64
   606  	highestLevel int64 // highest level for now
   607  }
   608  
   609  type float32Node struct {
   610  	value float32
   611  	next  optionalArray // [level]*float32Node
   612  	mu    sync.Mutex
   613  	flags bitflag
   614  	level uint32
   615  }
   616  
   617  func newFloat32Node(value float32, level int) *float32Node {
   618  	node := &float32Node{
   619  		value: value,
   620  		level: uint32(level),
   621  	}
   622  	if level > op1 {
   623  		node.next.extra = new([op2]unsafe.Pointer)
   624  	}
   625  	return node
   626  }
   627  
   628  func (n *float32Node) loadNext(i int) *float32Node {
   629  	return (*float32Node)(n.next.load(i))
   630  }
   631  
   632  func (n *float32Node) storeNext(i int, node *float32Node) {
   633  	n.next.store(i, unsafe.Pointer(node))
   634  }
   635  
   636  func (n *float32Node) atomicLoadNext(i int) *float32Node {
   637  	return (*float32Node)(n.next.atomicLoad(i))
   638  }
   639  
   640  func (n *float32Node) atomicStoreNext(i int, node *float32Node) {
   641  	n.next.atomicStore(i, unsafe.Pointer(node))
   642  }
   643  
   644  func (n *float32Node) lessthan(value float32) bool {
   645  	return n.value < value
   646  }
   647  
   648  func (n *float32Node) equal(value float32) bool {
   649  	return n.value == value
   650  }
   651  
   652  // NewFloat32 return an empty float32 skip set in ascending order.
   653  func NewFloat32() *Float32Set {
   654  	h := newFloat32Node(0, maxLevel)
   655  	h.flags.SetTrue(fullyLinked)
   656  	return &Float32Set{
   657  		header:       h,
   658  		highestLevel: defaultHighestLevel,
   659  	}
   660  }
   661  
   662  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
   663  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   664  func (s *Float32Set) findNodeRemove(value float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int {
   665  	// lFound represents the index of the first layer at which it found a node.
   666  	lFound, x := -1, s.header
   667  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   668  		succ := x.atomicLoadNext(i)
   669  		for succ != nil && succ.lessthan(value) {
   670  			x = succ
   671  			succ = x.atomicLoadNext(i)
   672  		}
   673  		preds[i] = x
   674  		succs[i] = succ
   675  
   676  		// Check if the value already in the skip list.
   677  		if lFound == -1 && succ != nil && succ.equal(value) {
   678  			lFound = i
   679  		}
   680  	}
   681  	return lFound
   682  }
   683  
   684  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   685  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   686  func (s *Float32Set) findNodeAdd(value float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int {
   687  	x := s.header
   688  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   689  		succ := x.atomicLoadNext(i)
   690  		for succ != nil && succ.lessthan(value) {
   691  			x = succ
   692  			succ = x.atomicLoadNext(i)
   693  		}
   694  		preds[i] = x
   695  		succs[i] = succ
   696  
   697  		// Check if the value already in the skip list.
   698  		if succ != nil && succ.equal(value) {
   699  			return i
   700  		}
   701  	}
   702  	return -1
   703  }
   704  
   705  func unlockFloat32(preds [maxLevel]*float32Node, highestLevel int) {
   706  	var prevPred *float32Node
   707  	for i := highestLevel; i >= 0; i-- {
   708  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   709  			preds[i].mu.Unlock()
   710  			prevPred = preds[i]
   711  		}
   712  	}
   713  }
   714  
   715  // Add add the value into skip set, return true if this process insert the value into skip set,
   716  // return false if this process can't insert this value, because another process has insert the same value.
   717  //
   718  // If the value is in the skip set but not fully linked, this process will wait until it is.
   719  func (s *Float32Set) Add(value float32) bool {
   720  	level := s.randomLevel()
   721  	var preds, succs [maxLevel]*float32Node
   722  	for {
   723  		lFound := s.findNodeAdd(value, &preds, &succs)
   724  		if lFound != -1 { // indicating the value is already in the skip-list
   725  			nodeFound := succs[lFound]
   726  			if !nodeFound.flags.Get(marked) {
   727  				for !nodeFound.flags.Get(fullyLinked) {
   728  					// The node is not yet fully linked, just waits until it is.
   729  				}
   730  				return false
   731  			}
   732  			// If the node is marked, represents some other thread is in the process of deleting this node,
   733  			// we need to add this node in next loop.
   734  			continue
   735  		}
   736  		// Add this node into skip list.
   737  		var (
   738  			highestLocked        = -1 // the highest level being locked by this process
   739  			valid                = true
   740  			pred, succ, prevPred *float32Node
   741  		)
   742  		for layer := 0; valid && layer < level; layer++ {
   743  			pred = preds[layer]   // target node's previous node
   744  			succ = succs[layer]   // target node's next node
   745  			if pred != prevPred { // the node in this layer could be locked by previous loop
   746  				pred.mu.Lock()
   747  				highestLocked = layer
   748  				prevPred = pred
   749  			}
   750  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   751  			// It is valid if:
   752  			// 1. The previous node and next node both are not marked.
   753  			// 2. The previous node's next node is succ in this layer.
   754  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   755  		}
   756  		if !valid {
   757  			unlockFloat32(preds, highestLocked)
   758  			continue
   759  		}
   760  
   761  		nn := newFloat32Node(value, level)
   762  		for layer := 0; layer < level; layer++ {
   763  			nn.storeNext(layer, succs[layer])
   764  			preds[layer].atomicStoreNext(layer, nn)
   765  		}
   766  		nn.flags.SetTrue(fullyLinked)
   767  		unlockFloat32(preds, highestLocked)
   768  		atomic.AddInt64(&s.length, 1)
   769  		return true
   770  	}
   771  }
   772  
   773  func (s *Float32Set) randomLevel() int {
   774  	// Generate random level.
   775  	level := randomLevel()
   776  	// Update highest level if possible.
   777  	for {
   778  		hl := atomic.LoadInt64(&s.highestLevel)
   779  		if int64(level) <= hl {
   780  			break
   781  		}
   782  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   783  			break
   784  		}
   785  	}
   786  	return level
   787  }
   788  
   789  // Contains check if the value is in the skip set.
   790  func (s *Float32Set) Contains(value float32) bool {
   791  	x := s.header
   792  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   793  		nex := x.atomicLoadNext(i)
   794  		for nex != nil && nex.lessthan(value) {
   795  			x = nex
   796  			nex = x.atomicLoadNext(i)
   797  		}
   798  
   799  		// Check if the value already in the skip list.
   800  		if nex != nil && nex.equal(value) {
   801  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
   802  		}
   803  	}
   804  	return false
   805  }
   806  
   807  // Remove a node from the skip set.
   808  func (s *Float32Set) Remove(value float32) bool {
   809  	var (
   810  		nodeToRemove *float32Node
   811  		isMarked     bool // represents if this operation mark the node
   812  		topLayer     = -1
   813  		preds, succs [maxLevel]*float32Node
   814  	)
   815  	for {
   816  		lFound := s.findNodeRemove(value, &preds, &succs)
   817  		if isMarked || // this process mark this node or we can find this node in the skip list
   818  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   819  			if !isMarked { // we don't mark this node for now
   820  				nodeToRemove = succs[lFound]
   821  				topLayer = lFound
   822  				nodeToRemove.mu.Lock()
   823  				if nodeToRemove.flags.Get(marked) {
   824  					// The node is marked by another process,
   825  					// the physical deletion will be accomplished by another process.
   826  					nodeToRemove.mu.Unlock()
   827  					return false
   828  				}
   829  				nodeToRemove.flags.SetTrue(marked)
   830  				isMarked = true
   831  			}
   832  			// Accomplish the physical deletion.
   833  			var (
   834  				highestLocked        = -1 // the highest level being locked by this process
   835  				valid                = true
   836  				pred, succ, prevPred *float32Node
   837  			)
   838  			for layer := 0; valid && (layer <= topLayer); layer++ {
   839  				pred, succ = preds[layer], succs[layer]
   840  				if pred != prevPred { // the node in this layer could be locked by previous loop
   841  					pred.mu.Lock()
   842  					highestLocked = layer
   843  					prevPred = pred
   844  				}
   845  				// valid check if there is another node has inserted into the skip list in this layer
   846  				// during this process, or the previous is removed by another process.
   847  				// It is valid if:
   848  				// 1. the previous node exists.
   849  				// 2. no another node has inserted into the skip list in this layer.
   850  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   851  			}
   852  			if !valid {
   853  				unlockFloat32(preds, highestLocked)
   854  				continue
   855  			}
   856  			for i := topLayer; i >= 0; i-- {
   857  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
   858  				// So we don't need `nodeToRemove.loadNext`
   859  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
   860  			}
   861  			nodeToRemove.mu.Unlock()
   862  			unlockFloat32(preds, highestLocked)
   863  			atomic.AddInt64(&s.length, -1)
   864  			return true
   865  		}
   866  		return false
   867  	}
   868  }
   869  
   870  // Range calls f sequentially for each value present in the skip set.
   871  // If f returns false, range stops the iteration.
   872  func (s *Float32Set) Range(f func(value float32) bool) {
   873  	x := s.header.atomicLoadNext(0)
   874  	for x != nil {
   875  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
   876  			x = x.atomicLoadNext(0)
   877  			continue
   878  		}
   879  		if !f(x.value) {
   880  			break
   881  		}
   882  		x = x.atomicLoadNext(0)
   883  	}
   884  }
   885  
   886  // Len return the length of this skip set.
   887  func (s *Float32Set) Len() int {
   888  	return int(atomic.LoadInt64(&s.length))
   889  }
   890  
   891  // Float32SetDesc represents a set based on skip list in descending order.
   892  type Float32SetDesc struct {
   893  	header       *float32NodeDesc
   894  	length       int64
   895  	highestLevel int64 // highest level for now
   896  }
   897  
   898  type float32NodeDesc struct {
   899  	value float32
   900  	next  optionalArray // [level]*float32NodeDesc
   901  	mu    sync.Mutex
   902  	flags bitflag
   903  	level uint32
   904  }
   905  
   906  func newFloat32NodeDesc(value float32, level int) *float32NodeDesc {
   907  	node := &float32NodeDesc{
   908  		value: value,
   909  		level: uint32(level),
   910  	}
   911  	if level > op1 {
   912  		node.next.extra = new([op2]unsafe.Pointer)
   913  	}
   914  	return node
   915  }
   916  
   917  func (n *float32NodeDesc) loadNext(i int) *float32NodeDesc {
   918  	return (*float32NodeDesc)(n.next.load(i))
   919  }
   920  
   921  func (n *float32NodeDesc) storeNext(i int, node *float32NodeDesc) {
   922  	n.next.store(i, unsafe.Pointer(node))
   923  }
   924  
   925  func (n *float32NodeDesc) atomicLoadNext(i int) *float32NodeDesc {
   926  	return (*float32NodeDesc)(n.next.atomicLoad(i))
   927  }
   928  
   929  func (n *float32NodeDesc) atomicStoreNext(i int, node *float32NodeDesc) {
   930  	n.next.atomicStore(i, unsafe.Pointer(node))
   931  }
   932  
   933  func (n *float32NodeDesc) lessthan(value float32) bool {
   934  	return n.value > value
   935  }
   936  
   937  func (n *float32NodeDesc) equal(value float32) bool {
   938  	return n.value == value
   939  }
   940  
   941  // NewFloat32Desc return an empty float32 skip set in descending order.
   942  func NewFloat32Desc() *Float32SetDesc {
   943  	h := newFloat32NodeDesc(0, maxLevel)
   944  	h.flags.SetTrue(fullyLinked)
   945  	return &Float32SetDesc{
   946  		header:       h,
   947  		highestLevel: defaultHighestLevel,
   948  	}
   949  }
   950  
   951  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
   952  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   953  func (s *Float32SetDesc) findNodeRemove(value float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int {
   954  	// lFound represents the index of the first layer at which it found a node.
   955  	lFound, x := -1, s.header
   956  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   957  		succ := x.atomicLoadNext(i)
   958  		for succ != nil && succ.lessthan(value) {
   959  			x = succ
   960  			succ = x.atomicLoadNext(i)
   961  		}
   962  		preds[i] = x
   963  		succs[i] = succ
   964  
   965  		// Check if the value already in the skip list.
   966  		if lFound == -1 && succ != nil && succ.equal(value) {
   967  			lFound = i
   968  		}
   969  	}
   970  	return lFound
   971  }
   972  
   973  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   974  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   975  func (s *Float32SetDesc) findNodeAdd(value float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int {
   976  	x := s.header
   977  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   978  		succ := x.atomicLoadNext(i)
   979  		for succ != nil && succ.lessthan(value) {
   980  			x = succ
   981  			succ = x.atomicLoadNext(i)
   982  		}
   983  		preds[i] = x
   984  		succs[i] = succ
   985  
   986  		// Check if the value already in the skip list.
   987  		if succ != nil && succ.equal(value) {
   988  			return i
   989  		}
   990  	}
   991  	return -1
   992  }
   993  
   994  func unlockFloat32Desc(preds [maxLevel]*float32NodeDesc, highestLevel int) {
   995  	var prevPred *float32NodeDesc
   996  	for i := highestLevel; i >= 0; i-- {
   997  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   998  			preds[i].mu.Unlock()
   999  			prevPred = preds[i]
  1000  		}
  1001  	}
  1002  }
  1003  
  1004  // Add add the value into skip set, return true if this process insert the value into skip set,
  1005  // return false if this process can't insert this value, because another process has insert the same value.
  1006  //
  1007  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1008  func (s *Float32SetDesc) Add(value float32) bool {
  1009  	level := s.randomLevel()
  1010  	var preds, succs [maxLevel]*float32NodeDesc
  1011  	for {
  1012  		lFound := s.findNodeAdd(value, &preds, &succs)
  1013  		if lFound != -1 { // indicating the value is already in the skip-list
  1014  			nodeFound := succs[lFound]
  1015  			if !nodeFound.flags.Get(marked) {
  1016  				for !nodeFound.flags.Get(fullyLinked) {
  1017  					// The node is not yet fully linked, just waits until it is.
  1018  				}
  1019  				return false
  1020  			}
  1021  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1022  			// we need to add this node in next loop.
  1023  			continue
  1024  		}
  1025  		// Add this node into skip list.
  1026  		var (
  1027  			highestLocked        = -1 // the highest level being locked by this process
  1028  			valid                = true
  1029  			pred, succ, prevPred *float32NodeDesc
  1030  		)
  1031  		for layer := 0; valid && layer < level; layer++ {
  1032  			pred = preds[layer]   // target node's previous node
  1033  			succ = succs[layer]   // target node's next node
  1034  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1035  				pred.mu.Lock()
  1036  				highestLocked = layer
  1037  				prevPred = pred
  1038  			}
  1039  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1040  			// It is valid if:
  1041  			// 1. The previous node and next node both are not marked.
  1042  			// 2. The previous node's next node is succ in this layer.
  1043  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1044  		}
  1045  		if !valid {
  1046  			unlockFloat32Desc(preds, highestLocked)
  1047  			continue
  1048  		}
  1049  
  1050  		nn := newFloat32NodeDesc(value, level)
  1051  		for layer := 0; layer < level; layer++ {
  1052  			nn.storeNext(layer, succs[layer])
  1053  			preds[layer].atomicStoreNext(layer, nn)
  1054  		}
  1055  		nn.flags.SetTrue(fullyLinked)
  1056  		unlockFloat32Desc(preds, highestLocked)
  1057  		atomic.AddInt64(&s.length, 1)
  1058  		return true
  1059  	}
  1060  }
  1061  
  1062  func (s *Float32SetDesc) randomLevel() int {
  1063  	// Generate random level.
  1064  	level := randomLevel()
  1065  	// Update highest level if possible.
  1066  	for {
  1067  		hl := atomic.LoadInt64(&s.highestLevel)
  1068  		if int64(level) <= hl {
  1069  			break
  1070  		}
  1071  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1072  			break
  1073  		}
  1074  	}
  1075  	return level
  1076  }
  1077  
  1078  // Contains check if the value is in the skip set.
  1079  func (s *Float32SetDesc) Contains(value float32) bool {
  1080  	x := s.header
  1081  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1082  		nex := x.atomicLoadNext(i)
  1083  		for nex != nil && nex.lessthan(value) {
  1084  			x = nex
  1085  			nex = x.atomicLoadNext(i)
  1086  		}
  1087  
  1088  		// Check if the value already in the skip list.
  1089  		if nex != nil && nex.equal(value) {
  1090  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1091  		}
  1092  	}
  1093  	return false
  1094  }
  1095  
  1096  // Remove a node from the skip set.
  1097  func (s *Float32SetDesc) Remove(value float32) bool {
  1098  	var (
  1099  		nodeToRemove *float32NodeDesc
  1100  		isMarked     bool // represents if this operation mark the node
  1101  		topLayer     = -1
  1102  		preds, succs [maxLevel]*float32NodeDesc
  1103  	)
  1104  	for {
  1105  		lFound := s.findNodeRemove(value, &preds, &succs)
  1106  		if isMarked || // this process mark this node or we can find this node in the skip list
  1107  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1108  			if !isMarked { // we don't mark this node for now
  1109  				nodeToRemove = succs[lFound]
  1110  				topLayer = lFound
  1111  				nodeToRemove.mu.Lock()
  1112  				if nodeToRemove.flags.Get(marked) {
  1113  					// The node is marked by another process,
  1114  					// the physical deletion will be accomplished by another process.
  1115  					nodeToRemove.mu.Unlock()
  1116  					return false
  1117  				}
  1118  				nodeToRemove.flags.SetTrue(marked)
  1119  				isMarked = true
  1120  			}
  1121  			// Accomplish the physical deletion.
  1122  			var (
  1123  				highestLocked        = -1 // the highest level being locked by this process
  1124  				valid                = true
  1125  				pred, succ, prevPred *float32NodeDesc
  1126  			)
  1127  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1128  				pred, succ = preds[layer], succs[layer]
  1129  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1130  					pred.mu.Lock()
  1131  					highestLocked = layer
  1132  					prevPred = pred
  1133  				}
  1134  				// valid check if there is another node has inserted into the skip list in this layer
  1135  				// during this process, or the previous is removed by another process.
  1136  				// It is valid if:
  1137  				// 1. the previous node exists.
  1138  				// 2. no another node has inserted into the skip list in this layer.
  1139  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1140  			}
  1141  			if !valid {
  1142  				unlockFloat32Desc(preds, highestLocked)
  1143  				continue
  1144  			}
  1145  			for i := topLayer; i >= 0; i-- {
  1146  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  1147  				// So we don't need `nodeToRemove.loadNext`
  1148  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  1149  			}
  1150  			nodeToRemove.mu.Unlock()
  1151  			unlockFloat32Desc(preds, highestLocked)
  1152  			atomic.AddInt64(&s.length, -1)
  1153  			return true
  1154  		}
  1155  		return false
  1156  	}
  1157  }
  1158  
  1159  // Range calls f sequentially for each value present in the skip set.
  1160  // If f returns false, range stops the iteration.
  1161  func (s *Float32SetDesc) Range(f func(value float32) bool) {
  1162  	x := s.header.atomicLoadNext(0)
  1163  	for x != nil {
  1164  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1165  			x = x.atomicLoadNext(0)
  1166  			continue
  1167  		}
  1168  		if !f(x.value) {
  1169  			break
  1170  		}
  1171  		x = x.atomicLoadNext(0)
  1172  	}
  1173  }
  1174  
  1175  // Len return the length of this skip set.
  1176  func (s *Float32SetDesc) Len() int {
  1177  	return int(atomic.LoadInt64(&s.length))
  1178  }
  1179  
  1180  // Float64Set represents a set based on skip list in ascending order.
  1181  type Float64Set struct {
  1182  	header       *float64Node
  1183  	length       int64
  1184  	highestLevel int64 // highest level for now
  1185  }
  1186  
  1187  type float64Node struct {
  1188  	value float64
  1189  	next  optionalArray // [level]*float64Node
  1190  	mu    sync.Mutex
  1191  	flags bitflag
  1192  	level uint32
  1193  }
  1194  
  1195  func newFloat64Node(value float64, level int) *float64Node {
  1196  	node := &float64Node{
  1197  		value: value,
  1198  		level: uint32(level),
  1199  	}
  1200  	if level > op1 {
  1201  		node.next.extra = new([op2]unsafe.Pointer)
  1202  	}
  1203  	return node
  1204  }
  1205  
  1206  func (n *float64Node) loadNext(i int) *float64Node {
  1207  	return (*float64Node)(n.next.load(i))
  1208  }
  1209  
  1210  func (n *float64Node) storeNext(i int, node *float64Node) {
  1211  	n.next.store(i, unsafe.Pointer(node))
  1212  }
  1213  
  1214  func (n *float64Node) atomicLoadNext(i int) *float64Node {
  1215  	return (*float64Node)(n.next.atomicLoad(i))
  1216  }
  1217  
  1218  func (n *float64Node) atomicStoreNext(i int, node *float64Node) {
  1219  	n.next.atomicStore(i, unsafe.Pointer(node))
  1220  }
  1221  
  1222  func (n *float64Node) lessthan(value float64) bool {
  1223  	return n.value < value
  1224  }
  1225  
  1226  func (n *float64Node) equal(value float64) bool {
  1227  	return n.value == value
  1228  }
  1229  
  1230  // NewFloat64 return an empty float64 skip set in ascending order.
  1231  func NewFloat64() *Float64Set {
  1232  	h := newFloat64Node(0, maxLevel)
  1233  	h.flags.SetTrue(fullyLinked)
  1234  	return &Float64Set{
  1235  		header:       h,
  1236  		highestLevel: defaultHighestLevel,
  1237  	}
  1238  }
  1239  
  1240  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1241  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1242  func (s *Float64Set) findNodeRemove(value float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int {
  1243  	// lFound represents the index of the first layer at which it found a node.
  1244  	lFound, x := -1, s.header
  1245  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1246  		succ := x.atomicLoadNext(i)
  1247  		for succ != nil && succ.lessthan(value) {
  1248  			x = succ
  1249  			succ = x.atomicLoadNext(i)
  1250  		}
  1251  		preds[i] = x
  1252  		succs[i] = succ
  1253  
  1254  		// Check if the value already in the skip list.
  1255  		if lFound == -1 && succ != nil && succ.equal(value) {
  1256  			lFound = i
  1257  		}
  1258  	}
  1259  	return lFound
  1260  }
  1261  
  1262  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  1263  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1264  func (s *Float64Set) findNodeAdd(value float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int {
  1265  	x := s.header
  1266  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1267  		succ := x.atomicLoadNext(i)
  1268  		for succ != nil && succ.lessthan(value) {
  1269  			x = succ
  1270  			succ = x.atomicLoadNext(i)
  1271  		}
  1272  		preds[i] = x
  1273  		succs[i] = succ
  1274  
  1275  		// Check if the value already in the skip list.
  1276  		if succ != nil && succ.equal(value) {
  1277  			return i
  1278  		}
  1279  	}
  1280  	return -1
  1281  }
  1282  
  1283  func unlockFloat64(preds [maxLevel]*float64Node, highestLevel int) {
  1284  	var prevPred *float64Node
  1285  	for i := highestLevel; i >= 0; i-- {
  1286  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1287  			preds[i].mu.Unlock()
  1288  			prevPred = preds[i]
  1289  		}
  1290  	}
  1291  }
  1292  
  1293  // Add add the value into skip set, return true if this process insert the value into skip set,
  1294  // return false if this process can't insert this value, because another process has insert the same value.
  1295  //
  1296  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1297  func (s *Float64Set) Add(value float64) bool {
  1298  	level := s.randomLevel()
  1299  	var preds, succs [maxLevel]*float64Node
  1300  	for {
  1301  		lFound := s.findNodeAdd(value, &preds, &succs)
  1302  		if lFound != -1 { // indicating the value is already in the skip-list
  1303  			nodeFound := succs[lFound]
  1304  			if !nodeFound.flags.Get(marked) {
  1305  				for !nodeFound.flags.Get(fullyLinked) {
  1306  					// The node is not yet fully linked, just waits until it is.
  1307  				}
  1308  				return false
  1309  			}
  1310  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1311  			// we need to add this node in next loop.
  1312  			continue
  1313  		}
  1314  		// Add this node into skip list.
  1315  		var (
  1316  			highestLocked        = -1 // the highest level being locked by this process
  1317  			valid                = true
  1318  			pred, succ, prevPred *float64Node
  1319  		)
  1320  		for layer := 0; valid && layer < level; layer++ {
  1321  			pred = preds[layer]   // target node's previous node
  1322  			succ = succs[layer]   // target node's next node
  1323  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1324  				pred.mu.Lock()
  1325  				highestLocked = layer
  1326  				prevPred = pred
  1327  			}
  1328  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1329  			// It is valid if:
  1330  			// 1. The previous node and next node both are not marked.
  1331  			// 2. The previous node's next node is succ in this layer.
  1332  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1333  		}
  1334  		if !valid {
  1335  			unlockFloat64(preds, highestLocked)
  1336  			continue
  1337  		}
  1338  
  1339  		nn := newFloat64Node(value, level)
  1340  		for layer := 0; layer < level; layer++ {
  1341  			nn.storeNext(layer, succs[layer])
  1342  			preds[layer].atomicStoreNext(layer, nn)
  1343  		}
  1344  		nn.flags.SetTrue(fullyLinked)
  1345  		unlockFloat64(preds, highestLocked)
  1346  		atomic.AddInt64(&s.length, 1)
  1347  		return true
  1348  	}
  1349  }
  1350  
  1351  func (s *Float64Set) randomLevel() int {
  1352  	// Generate random level.
  1353  	level := randomLevel()
  1354  	// Update highest level if possible.
  1355  	for {
  1356  		hl := atomic.LoadInt64(&s.highestLevel)
  1357  		if int64(level) <= hl {
  1358  			break
  1359  		}
  1360  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1361  			break
  1362  		}
  1363  	}
  1364  	return level
  1365  }
  1366  
  1367  // Contains check if the value is in the skip set.
  1368  func (s *Float64Set) Contains(value float64) bool {
  1369  	x := s.header
  1370  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1371  		nex := x.atomicLoadNext(i)
  1372  		for nex != nil && nex.lessthan(value) {
  1373  			x = nex
  1374  			nex = x.atomicLoadNext(i)
  1375  		}
  1376  
  1377  		// Check if the value already in the skip list.
  1378  		if nex != nil && nex.equal(value) {
  1379  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1380  		}
  1381  	}
  1382  	return false
  1383  }
  1384  
  1385  // Remove a node from the skip set.
  1386  func (s *Float64Set) Remove(value float64) bool {
  1387  	var (
  1388  		nodeToRemove *float64Node
  1389  		isMarked     bool // represents if this operation mark the node
  1390  		topLayer     = -1
  1391  		preds, succs [maxLevel]*float64Node
  1392  	)
  1393  	for {
  1394  		lFound := s.findNodeRemove(value, &preds, &succs)
  1395  		if isMarked || // this process mark this node or we can find this node in the skip list
  1396  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1397  			if !isMarked { // we don't mark this node for now
  1398  				nodeToRemove = succs[lFound]
  1399  				topLayer = lFound
  1400  				nodeToRemove.mu.Lock()
  1401  				if nodeToRemove.flags.Get(marked) {
  1402  					// The node is marked by another process,
  1403  					// the physical deletion will be accomplished by another process.
  1404  					nodeToRemove.mu.Unlock()
  1405  					return false
  1406  				}
  1407  				nodeToRemove.flags.SetTrue(marked)
  1408  				isMarked = true
  1409  			}
  1410  			// Accomplish the physical deletion.
  1411  			var (
  1412  				highestLocked        = -1 // the highest level being locked by this process
  1413  				valid                = true
  1414  				pred, succ, prevPred *float64Node
  1415  			)
  1416  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1417  				pred, succ = preds[layer], succs[layer]
  1418  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1419  					pred.mu.Lock()
  1420  					highestLocked = layer
  1421  					prevPred = pred
  1422  				}
  1423  				// valid check if there is another node has inserted into the skip list in this layer
  1424  				// during this process, or the previous is removed by another process.
  1425  				// It is valid if:
  1426  				// 1. the previous node exists.
  1427  				// 2. no another node has inserted into the skip list in this layer.
  1428  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1429  			}
  1430  			if !valid {
  1431  				unlockFloat64(preds, highestLocked)
  1432  				continue
  1433  			}
  1434  			for i := topLayer; i >= 0; i-- {
  1435  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  1436  				// So we don't need `nodeToRemove.loadNext`
  1437  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  1438  			}
  1439  			nodeToRemove.mu.Unlock()
  1440  			unlockFloat64(preds, highestLocked)
  1441  			atomic.AddInt64(&s.length, -1)
  1442  			return true
  1443  		}
  1444  		return false
  1445  	}
  1446  }
  1447  
  1448  // Range calls f sequentially for each value present in the skip set.
  1449  // If f returns false, range stops the iteration.
  1450  func (s *Float64Set) Range(f func(value float64) bool) {
  1451  	x := s.header.atomicLoadNext(0)
  1452  	for x != nil {
  1453  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1454  			x = x.atomicLoadNext(0)
  1455  			continue
  1456  		}
  1457  		if !f(x.value) {
  1458  			break
  1459  		}
  1460  		x = x.atomicLoadNext(0)
  1461  	}
  1462  }
  1463  
  1464  // Len return the length of this skip set.
  1465  func (s *Float64Set) Len() int {
  1466  	return int(atomic.LoadInt64(&s.length))
  1467  }
  1468  
  1469  // Float64SetDesc represents a set based on skip list in descending order.
  1470  type Float64SetDesc struct {
  1471  	header       *float64NodeDesc
  1472  	length       int64
  1473  	highestLevel int64 // highest level for now
  1474  }
  1475  
  1476  type float64NodeDesc struct {
  1477  	value float64
  1478  	next  optionalArray // [level]*float64NodeDesc
  1479  	mu    sync.Mutex
  1480  	flags bitflag
  1481  	level uint32
  1482  }
  1483  
  1484  func newFloat64NodeDesc(value float64, level int) *float64NodeDesc {
  1485  	node := &float64NodeDesc{
  1486  		value: value,
  1487  		level: uint32(level),
  1488  	}
  1489  	if level > op1 {
  1490  		node.next.extra = new([op2]unsafe.Pointer)
  1491  	}
  1492  	return node
  1493  }
  1494  
  1495  func (n *float64NodeDesc) loadNext(i int) *float64NodeDesc {
  1496  	return (*float64NodeDesc)(n.next.load(i))
  1497  }
  1498  
  1499  func (n *float64NodeDesc) storeNext(i int, node *float64NodeDesc) {
  1500  	n.next.store(i, unsafe.Pointer(node))
  1501  }
  1502  
  1503  func (n *float64NodeDesc) atomicLoadNext(i int) *float64NodeDesc {
  1504  	return (*float64NodeDesc)(n.next.atomicLoad(i))
  1505  }
  1506  
  1507  func (n *float64NodeDesc) atomicStoreNext(i int, node *float64NodeDesc) {
  1508  	n.next.atomicStore(i, unsafe.Pointer(node))
  1509  }
  1510  
  1511  func (n *float64NodeDesc) lessthan(value float64) bool {
  1512  	return n.value > value
  1513  }
  1514  
  1515  func (n *float64NodeDesc) equal(value float64) bool {
  1516  	return n.value == value
  1517  }
  1518  
  1519  // NewFloat64Desc return an empty float64 skip set in descending order.
  1520  func NewFloat64Desc() *Float64SetDesc {
  1521  	h := newFloat64NodeDesc(0, maxLevel)
  1522  	h.flags.SetTrue(fullyLinked)
  1523  	return &Float64SetDesc{
  1524  		header:       h,
  1525  		highestLevel: defaultHighestLevel,
  1526  	}
  1527  }
  1528  
  1529  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1530  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1531  func (s *Float64SetDesc) findNodeRemove(value float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int {
  1532  	// lFound represents the index of the first layer at which it found a node.
  1533  	lFound, x := -1, s.header
  1534  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1535  		succ := x.atomicLoadNext(i)
  1536  		for succ != nil && succ.lessthan(value) {
  1537  			x = succ
  1538  			succ = x.atomicLoadNext(i)
  1539  		}
  1540  		preds[i] = x
  1541  		succs[i] = succ
  1542  
  1543  		// Check if the value already in the skip list.
  1544  		if lFound == -1 && succ != nil && succ.equal(value) {
  1545  			lFound = i
  1546  		}
  1547  	}
  1548  	return lFound
  1549  }
  1550  
  1551  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  1552  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1553  func (s *Float64SetDesc) findNodeAdd(value float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int {
  1554  	x := s.header
  1555  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1556  		succ := x.atomicLoadNext(i)
  1557  		for succ != nil && succ.lessthan(value) {
  1558  			x = succ
  1559  			succ = x.atomicLoadNext(i)
  1560  		}
  1561  		preds[i] = x
  1562  		succs[i] = succ
  1563  
  1564  		// Check if the value already in the skip list.
  1565  		if succ != nil && succ.equal(value) {
  1566  			return i
  1567  		}
  1568  	}
  1569  	return -1
  1570  }
  1571  
  1572  func unlockFloat64Desc(preds [maxLevel]*float64NodeDesc, highestLevel int) {
  1573  	var prevPred *float64NodeDesc
  1574  	for i := highestLevel; i >= 0; i-- {
  1575  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1576  			preds[i].mu.Unlock()
  1577  			prevPred = preds[i]
  1578  		}
  1579  	}
  1580  }
  1581  
  1582  // Add add the value into skip set, return true if this process insert the value into skip set,
  1583  // return false if this process can't insert this value, because another process has insert the same value.
  1584  //
  1585  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1586  func (s *Float64SetDesc) Add(value float64) bool {
  1587  	level := s.randomLevel()
  1588  	var preds, succs [maxLevel]*float64NodeDesc
  1589  	for {
  1590  		lFound := s.findNodeAdd(value, &preds, &succs)
  1591  		if lFound != -1 { // indicating the value is already in the skip-list
  1592  			nodeFound := succs[lFound]
  1593  			if !nodeFound.flags.Get(marked) {
  1594  				for !nodeFound.flags.Get(fullyLinked) {
  1595  					// The node is not yet fully linked, just waits until it is.
  1596  				}
  1597  				return false
  1598  			}
  1599  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1600  			// we need to add this node in next loop.
  1601  			continue
  1602  		}
  1603  		// Add this node into skip list.
  1604  		var (
  1605  			highestLocked        = -1 // the highest level being locked by this process
  1606  			valid                = true
  1607  			pred, succ, prevPred *float64NodeDesc
  1608  		)
  1609  		for layer := 0; valid && layer < level; layer++ {
  1610  			pred = preds[layer]   // target node's previous node
  1611  			succ = succs[layer]   // target node's next node
  1612  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1613  				pred.mu.Lock()
  1614  				highestLocked = layer
  1615  				prevPred = pred
  1616  			}
  1617  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1618  			// It is valid if:
  1619  			// 1. The previous node and next node both are not marked.
  1620  			// 2. The previous node's next node is succ in this layer.
  1621  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1622  		}
  1623  		if !valid {
  1624  			unlockFloat64Desc(preds, highestLocked)
  1625  			continue
  1626  		}
  1627  
  1628  		nn := newFloat64NodeDesc(value, level)
  1629  		for layer := 0; layer < level; layer++ {
  1630  			nn.storeNext(layer, succs[layer])
  1631  			preds[layer].atomicStoreNext(layer, nn)
  1632  		}
  1633  		nn.flags.SetTrue(fullyLinked)
  1634  		unlockFloat64Desc(preds, highestLocked)
  1635  		atomic.AddInt64(&s.length, 1)
  1636  		return true
  1637  	}
  1638  }
  1639  
  1640  func (s *Float64SetDesc) randomLevel() int {
  1641  	// Generate random level.
  1642  	level := randomLevel()
  1643  	// Update highest level if possible.
  1644  	for {
  1645  		hl := atomic.LoadInt64(&s.highestLevel)
  1646  		if int64(level) <= hl {
  1647  			break
  1648  		}
  1649  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1650  			break
  1651  		}
  1652  	}
  1653  	return level
  1654  }
  1655  
  1656  // Contains check if the value is in the skip set.
  1657  func (s *Float64SetDesc) Contains(value float64) bool {
  1658  	x := s.header
  1659  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1660  		nex := x.atomicLoadNext(i)
  1661  		for nex != nil && nex.lessthan(value) {
  1662  			x = nex
  1663  			nex = x.atomicLoadNext(i)
  1664  		}
  1665  
  1666  		// Check if the value already in the skip list.
  1667  		if nex != nil && nex.equal(value) {
  1668  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1669  		}
  1670  	}
  1671  	return false
  1672  }
  1673  
  1674  // Remove a node from the skip set.
  1675  func (s *Float64SetDesc) Remove(value float64) bool {
  1676  	var (
  1677  		nodeToRemove *float64NodeDesc
  1678  		isMarked     bool // represents if this operation mark the node
  1679  		topLayer     = -1
  1680  		preds, succs [maxLevel]*float64NodeDesc
  1681  	)
  1682  	for {
  1683  		lFound := s.findNodeRemove(value, &preds, &succs)
  1684  		if isMarked || // this process mark this node or we can find this node in the skip list
  1685  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1686  			if !isMarked { // we don't mark this node for now
  1687  				nodeToRemove = succs[lFound]
  1688  				topLayer = lFound
  1689  				nodeToRemove.mu.Lock()
  1690  				if nodeToRemove.flags.Get(marked) {
  1691  					// The node is marked by another process,
  1692  					// the physical deletion will be accomplished by another process.
  1693  					nodeToRemove.mu.Unlock()
  1694  					return false
  1695  				}
  1696  				nodeToRemove.flags.SetTrue(marked)
  1697  				isMarked = true
  1698  			}
  1699  			// Accomplish the physical deletion.
  1700  			var (
  1701  				highestLocked        = -1 // the highest level being locked by this process
  1702  				valid                = true
  1703  				pred, succ, prevPred *float64NodeDesc
  1704  			)
  1705  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1706  				pred, succ = preds[layer], succs[layer]
  1707  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1708  					pred.mu.Lock()
  1709  					highestLocked = layer
  1710  					prevPred = pred
  1711  				}
  1712  				// valid check if there is another node has inserted into the skip list in this layer
  1713  				// during this process, or the previous is removed by another process.
  1714  				// It is valid if:
  1715  				// 1. the previous node exists.
  1716  				// 2. no another node has inserted into the skip list in this layer.
  1717  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1718  			}
  1719  			if !valid {
  1720  				unlockFloat64Desc(preds, highestLocked)
  1721  				continue
  1722  			}
  1723  			for i := topLayer; i >= 0; i-- {
  1724  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  1725  				// So we don't need `nodeToRemove.loadNext`
  1726  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  1727  			}
  1728  			nodeToRemove.mu.Unlock()
  1729  			unlockFloat64Desc(preds, highestLocked)
  1730  			atomic.AddInt64(&s.length, -1)
  1731  			return true
  1732  		}
  1733  		return false
  1734  	}
  1735  }
  1736  
  1737  // Range calls f sequentially for each value present in the skip set.
  1738  // If f returns false, range stops the iteration.
  1739  func (s *Float64SetDesc) Range(f func(value float64) bool) {
  1740  	x := s.header.atomicLoadNext(0)
  1741  	for x != nil {
  1742  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1743  			x = x.atomicLoadNext(0)
  1744  			continue
  1745  		}
  1746  		if !f(x.value) {
  1747  			break
  1748  		}
  1749  		x = x.atomicLoadNext(0)
  1750  	}
  1751  }
  1752  
  1753  // Len return the length of this skip set.
  1754  func (s *Float64SetDesc) Len() int {
  1755  	return int(atomic.LoadInt64(&s.length))
  1756  }
  1757  
  1758  // IntSet represents a set based on skip list in ascending order.
  1759  type IntSet struct {
  1760  	header       *intNode
  1761  	length       int64
  1762  	highestLevel int64 // highest level for now
  1763  }
  1764  
  1765  type intNode struct {
  1766  	value int
  1767  	next  optionalArray // [level]*intNode
  1768  	mu    sync.Mutex
  1769  	flags bitflag
  1770  	level uint32
  1771  }
  1772  
  1773  func newIntNode(value int, level int) *intNode {
  1774  	node := &intNode{
  1775  		value: value,
  1776  		level: uint32(level),
  1777  	}
  1778  	if level > op1 {
  1779  		node.next.extra = new([op2]unsafe.Pointer)
  1780  	}
  1781  	return node
  1782  }
  1783  
  1784  func (n *intNode) loadNext(i int) *intNode {
  1785  	return (*intNode)(n.next.load(i))
  1786  }
  1787  
  1788  func (n *intNode) storeNext(i int, node *intNode) {
  1789  	n.next.store(i, unsafe.Pointer(node))
  1790  }
  1791  
  1792  func (n *intNode) atomicLoadNext(i int) *intNode {
  1793  	return (*intNode)(n.next.atomicLoad(i))
  1794  }
  1795  
  1796  func (n *intNode) atomicStoreNext(i int, node *intNode) {
  1797  	n.next.atomicStore(i, unsafe.Pointer(node))
  1798  }
  1799  
  1800  func (n *intNode) lessthan(value int) bool {
  1801  	return n.value < value
  1802  }
  1803  
  1804  func (n *intNode) equal(value int) bool {
  1805  	return n.value == value
  1806  }
  1807  
  1808  // NewInt return an empty int skip set in ascending order.
  1809  func NewInt() *IntSet {
  1810  	h := newIntNode(0, maxLevel)
  1811  	h.flags.SetTrue(fullyLinked)
  1812  	return &IntSet{
  1813  		header:       h,
  1814  		highestLevel: defaultHighestLevel,
  1815  	}
  1816  }
  1817  
  1818  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1819  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1820  func (s *IntSet) findNodeRemove(value int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int {
  1821  	// lFound represents the index of the first layer at which it found a node.
  1822  	lFound, x := -1, s.header
  1823  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1824  		succ := x.atomicLoadNext(i)
  1825  		for succ != nil && succ.lessthan(value) {
  1826  			x = succ
  1827  			succ = x.atomicLoadNext(i)
  1828  		}
  1829  		preds[i] = x
  1830  		succs[i] = succ
  1831  
  1832  		// Check if the value already in the skip list.
  1833  		if lFound == -1 && succ != nil && succ.equal(value) {
  1834  			lFound = i
  1835  		}
  1836  	}
  1837  	return lFound
  1838  }
  1839  
  1840  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  1841  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1842  func (s *IntSet) findNodeAdd(value int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int {
  1843  	x := s.header
  1844  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1845  		succ := x.atomicLoadNext(i)
  1846  		for succ != nil && succ.lessthan(value) {
  1847  			x = succ
  1848  			succ = x.atomicLoadNext(i)
  1849  		}
  1850  		preds[i] = x
  1851  		succs[i] = succ
  1852  
  1853  		// Check if the value already in the skip list.
  1854  		if succ != nil && succ.equal(value) {
  1855  			return i
  1856  		}
  1857  	}
  1858  	return -1
  1859  }
  1860  
  1861  func unlockInt(preds [maxLevel]*intNode, highestLevel int) {
  1862  	var prevPred *intNode
  1863  	for i := highestLevel; i >= 0; i-- {
  1864  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1865  			preds[i].mu.Unlock()
  1866  			prevPred = preds[i]
  1867  		}
  1868  	}
  1869  }
  1870  
  1871  // Add add the value into skip set, return true if this process insert the value into skip set,
  1872  // return false if this process can't insert this value, because another process has insert the same value.
  1873  //
  1874  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1875  func (s *IntSet) Add(value int) bool {
  1876  	level := s.randomLevel()
  1877  	var preds, succs [maxLevel]*intNode
  1878  	for {
  1879  		lFound := s.findNodeAdd(value, &preds, &succs)
  1880  		if lFound != -1 { // indicating the value is already in the skip-list
  1881  			nodeFound := succs[lFound]
  1882  			if !nodeFound.flags.Get(marked) {
  1883  				for !nodeFound.flags.Get(fullyLinked) {
  1884  					// The node is not yet fully linked, just waits until it is.
  1885  				}
  1886  				return false
  1887  			}
  1888  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1889  			// we need to add this node in next loop.
  1890  			continue
  1891  		}
  1892  		// Add this node into skip list.
  1893  		var (
  1894  			highestLocked        = -1 // the highest level being locked by this process
  1895  			valid                = true
  1896  			pred, succ, prevPred *intNode
  1897  		)
  1898  		for layer := 0; valid && layer < level; layer++ {
  1899  			pred = preds[layer]   // target node's previous node
  1900  			succ = succs[layer]   // target node's next node
  1901  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1902  				pred.mu.Lock()
  1903  				highestLocked = layer
  1904  				prevPred = pred
  1905  			}
  1906  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1907  			// It is valid if:
  1908  			// 1. The previous node and next node both are not marked.
  1909  			// 2. The previous node's next node is succ in this layer.
  1910  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1911  		}
  1912  		if !valid {
  1913  			unlockInt(preds, highestLocked)
  1914  			continue
  1915  		}
  1916  
  1917  		nn := newIntNode(value, level)
  1918  		for layer := 0; layer < level; layer++ {
  1919  			nn.storeNext(layer, succs[layer])
  1920  			preds[layer].atomicStoreNext(layer, nn)
  1921  		}
  1922  		nn.flags.SetTrue(fullyLinked)
  1923  		unlockInt(preds, highestLocked)
  1924  		atomic.AddInt64(&s.length, 1)
  1925  		return true
  1926  	}
  1927  }
  1928  
  1929  func (s *IntSet) randomLevel() int {
  1930  	// Generate random level.
  1931  	level := randomLevel()
  1932  	// Update highest level if possible.
  1933  	for {
  1934  		hl := atomic.LoadInt64(&s.highestLevel)
  1935  		if int64(level) <= hl {
  1936  			break
  1937  		}
  1938  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1939  			break
  1940  		}
  1941  	}
  1942  	return level
  1943  }
  1944  
  1945  // Contains check if the value is in the skip set.
  1946  func (s *IntSet) Contains(value int) bool {
  1947  	x := s.header
  1948  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1949  		nex := x.atomicLoadNext(i)
  1950  		for nex != nil && nex.lessthan(value) {
  1951  			x = nex
  1952  			nex = x.atomicLoadNext(i)
  1953  		}
  1954  
  1955  		// Check if the value already in the skip list.
  1956  		if nex != nil && nex.equal(value) {
  1957  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1958  		}
  1959  	}
  1960  	return false
  1961  }
  1962  
  1963  // Remove a node from the skip set.
  1964  func (s *IntSet) Remove(value int) bool {
  1965  	var (
  1966  		nodeToRemove *intNode
  1967  		isMarked     bool // represents if this operation mark the node
  1968  		topLayer     = -1
  1969  		preds, succs [maxLevel]*intNode
  1970  	)
  1971  	for {
  1972  		lFound := s.findNodeRemove(value, &preds, &succs)
  1973  		if isMarked || // this process mark this node or we can find this node in the skip list
  1974  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1975  			if !isMarked { // we don't mark this node for now
  1976  				nodeToRemove = succs[lFound]
  1977  				topLayer = lFound
  1978  				nodeToRemove.mu.Lock()
  1979  				if nodeToRemove.flags.Get(marked) {
  1980  					// The node is marked by another process,
  1981  					// the physical deletion will be accomplished by another process.
  1982  					nodeToRemove.mu.Unlock()
  1983  					return false
  1984  				}
  1985  				nodeToRemove.flags.SetTrue(marked)
  1986  				isMarked = true
  1987  			}
  1988  			// Accomplish the physical deletion.
  1989  			var (
  1990  				highestLocked        = -1 // the highest level being locked by this process
  1991  				valid                = true
  1992  				pred, succ, prevPred *intNode
  1993  			)
  1994  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1995  				pred, succ = preds[layer], succs[layer]
  1996  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1997  					pred.mu.Lock()
  1998  					highestLocked = layer
  1999  					prevPred = pred
  2000  				}
  2001  				// valid check if there is another node has inserted into the skip list in this layer
  2002  				// during this process, or the previous is removed by another process.
  2003  				// It is valid if:
  2004  				// 1. the previous node exists.
  2005  				// 2. no another node has inserted into the skip list in this layer.
  2006  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2007  			}
  2008  			if !valid {
  2009  				unlockInt(preds, highestLocked)
  2010  				continue
  2011  			}
  2012  			for i := topLayer; i >= 0; i-- {
  2013  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2014  				// So we don't need `nodeToRemove.loadNext`
  2015  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2016  			}
  2017  			nodeToRemove.mu.Unlock()
  2018  			unlockInt(preds, highestLocked)
  2019  			atomic.AddInt64(&s.length, -1)
  2020  			return true
  2021  		}
  2022  		return false
  2023  	}
  2024  }
  2025  
  2026  // Range calls f sequentially for each value present in the skip set.
  2027  // If f returns false, range stops the iteration.
  2028  func (s *IntSet) Range(f func(value int) bool) {
  2029  	x := s.header.atomicLoadNext(0)
  2030  	for x != nil {
  2031  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2032  			x = x.atomicLoadNext(0)
  2033  			continue
  2034  		}
  2035  		if !f(x.value) {
  2036  			break
  2037  		}
  2038  		x = x.atomicLoadNext(0)
  2039  	}
  2040  }
  2041  
  2042  // Len return the length of this skip set.
  2043  func (s *IntSet) Len() int {
  2044  	return int(atomic.LoadInt64(&s.length))
  2045  }
  2046  
  2047  // IntSetDesc represents a set based on skip list in descending order.
  2048  type IntSetDesc struct {
  2049  	header       *intNodeDesc
  2050  	length       int64
  2051  	highestLevel int64 // highest level for now
  2052  }
  2053  
  2054  type intNodeDesc struct {
  2055  	value int
  2056  	next  optionalArray // [level]*intNodeDesc
  2057  	mu    sync.Mutex
  2058  	flags bitflag
  2059  	level uint32
  2060  }
  2061  
  2062  func newIntNodeDesc(value int, level int) *intNodeDesc {
  2063  	node := &intNodeDesc{
  2064  		value: value,
  2065  		level: uint32(level),
  2066  	}
  2067  	if level > op1 {
  2068  		node.next.extra = new([op2]unsafe.Pointer)
  2069  	}
  2070  	return node
  2071  }
  2072  
  2073  func (n *intNodeDesc) loadNext(i int) *intNodeDesc {
  2074  	return (*intNodeDesc)(n.next.load(i))
  2075  }
  2076  
  2077  func (n *intNodeDesc) storeNext(i int, node *intNodeDesc) {
  2078  	n.next.store(i, unsafe.Pointer(node))
  2079  }
  2080  
  2081  func (n *intNodeDesc) atomicLoadNext(i int) *intNodeDesc {
  2082  	return (*intNodeDesc)(n.next.atomicLoad(i))
  2083  }
  2084  
  2085  func (n *intNodeDesc) atomicStoreNext(i int, node *intNodeDesc) {
  2086  	n.next.atomicStore(i, unsafe.Pointer(node))
  2087  }
  2088  
  2089  func (n *intNodeDesc) lessthan(value int) bool {
  2090  	return n.value > value
  2091  }
  2092  
  2093  func (n *intNodeDesc) equal(value int) bool {
  2094  	return n.value == value
  2095  }
  2096  
  2097  // NewIntDesc return an empty int skip set in descending order.
  2098  func NewIntDesc() *IntSetDesc {
  2099  	h := newIntNodeDesc(0, maxLevel)
  2100  	h.flags.SetTrue(fullyLinked)
  2101  	return &IntSetDesc{
  2102  		header:       h,
  2103  		highestLevel: defaultHighestLevel,
  2104  	}
  2105  }
  2106  
  2107  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2108  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2109  func (s *IntSetDesc) findNodeRemove(value int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int {
  2110  	// lFound represents the index of the first layer at which it found a node.
  2111  	lFound, x := -1, s.header
  2112  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2113  		succ := x.atomicLoadNext(i)
  2114  		for succ != nil && succ.lessthan(value) {
  2115  			x = succ
  2116  			succ = x.atomicLoadNext(i)
  2117  		}
  2118  		preds[i] = x
  2119  		succs[i] = succ
  2120  
  2121  		// Check if the value already in the skip list.
  2122  		if lFound == -1 && succ != nil && succ.equal(value) {
  2123  			lFound = i
  2124  		}
  2125  	}
  2126  	return lFound
  2127  }
  2128  
  2129  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2130  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2131  func (s *IntSetDesc) findNodeAdd(value int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int {
  2132  	x := s.header
  2133  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2134  		succ := x.atomicLoadNext(i)
  2135  		for succ != nil && succ.lessthan(value) {
  2136  			x = succ
  2137  			succ = x.atomicLoadNext(i)
  2138  		}
  2139  		preds[i] = x
  2140  		succs[i] = succ
  2141  
  2142  		// Check if the value already in the skip list.
  2143  		if succ != nil && succ.equal(value) {
  2144  			return i
  2145  		}
  2146  	}
  2147  	return -1
  2148  }
  2149  
  2150  func unlockIntDesc(preds [maxLevel]*intNodeDesc, highestLevel int) {
  2151  	var prevPred *intNodeDesc
  2152  	for i := highestLevel; i >= 0; i-- {
  2153  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2154  			preds[i].mu.Unlock()
  2155  			prevPred = preds[i]
  2156  		}
  2157  	}
  2158  }
  2159  
  2160  // Add add the value into skip set, return true if this process insert the value into skip set,
  2161  // return false if this process can't insert this value, because another process has insert the same value.
  2162  //
  2163  // If the value is in the skip set but not fully linked, this process will wait until it is.
  2164  func (s *IntSetDesc) Add(value int) bool {
  2165  	level := s.randomLevel()
  2166  	var preds, succs [maxLevel]*intNodeDesc
  2167  	for {
  2168  		lFound := s.findNodeAdd(value, &preds, &succs)
  2169  		if lFound != -1 { // indicating the value is already in the skip-list
  2170  			nodeFound := succs[lFound]
  2171  			if !nodeFound.flags.Get(marked) {
  2172  				for !nodeFound.flags.Get(fullyLinked) {
  2173  					// The node is not yet fully linked, just waits until it is.
  2174  				}
  2175  				return false
  2176  			}
  2177  			// If the node is marked, represents some other thread is in the process of deleting this node,
  2178  			// we need to add this node in next loop.
  2179  			continue
  2180  		}
  2181  		// Add this node into skip list.
  2182  		var (
  2183  			highestLocked        = -1 // the highest level being locked by this process
  2184  			valid                = true
  2185  			pred, succ, prevPred *intNodeDesc
  2186  		)
  2187  		for layer := 0; valid && layer < level; layer++ {
  2188  			pred = preds[layer]   // target node's previous node
  2189  			succ = succs[layer]   // target node's next node
  2190  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2191  				pred.mu.Lock()
  2192  				highestLocked = layer
  2193  				prevPred = pred
  2194  			}
  2195  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2196  			// It is valid if:
  2197  			// 1. The previous node and next node both are not marked.
  2198  			// 2. The previous node's next node is succ in this layer.
  2199  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2200  		}
  2201  		if !valid {
  2202  			unlockIntDesc(preds, highestLocked)
  2203  			continue
  2204  		}
  2205  
  2206  		nn := newIntNodeDesc(value, level)
  2207  		for layer := 0; layer < level; layer++ {
  2208  			nn.storeNext(layer, succs[layer])
  2209  			preds[layer].atomicStoreNext(layer, nn)
  2210  		}
  2211  		nn.flags.SetTrue(fullyLinked)
  2212  		unlockIntDesc(preds, highestLocked)
  2213  		atomic.AddInt64(&s.length, 1)
  2214  		return true
  2215  	}
  2216  }
  2217  
  2218  func (s *IntSetDesc) randomLevel() int {
  2219  	// Generate random level.
  2220  	level := randomLevel()
  2221  	// Update highest level if possible.
  2222  	for {
  2223  		hl := atomic.LoadInt64(&s.highestLevel)
  2224  		if int64(level) <= hl {
  2225  			break
  2226  		}
  2227  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2228  			break
  2229  		}
  2230  	}
  2231  	return level
  2232  }
  2233  
  2234  // Contains check if the value is in the skip set.
  2235  func (s *IntSetDesc) Contains(value int) bool {
  2236  	x := s.header
  2237  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2238  		nex := x.atomicLoadNext(i)
  2239  		for nex != nil && nex.lessthan(value) {
  2240  			x = nex
  2241  			nex = x.atomicLoadNext(i)
  2242  		}
  2243  
  2244  		// Check if the value already in the skip list.
  2245  		if nex != nil && nex.equal(value) {
  2246  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  2247  		}
  2248  	}
  2249  	return false
  2250  }
  2251  
  2252  // Remove a node from the skip set.
  2253  func (s *IntSetDesc) Remove(value int) bool {
  2254  	var (
  2255  		nodeToRemove *intNodeDesc
  2256  		isMarked     bool // represents if this operation mark the node
  2257  		topLayer     = -1
  2258  		preds, succs [maxLevel]*intNodeDesc
  2259  	)
  2260  	for {
  2261  		lFound := s.findNodeRemove(value, &preds, &succs)
  2262  		if isMarked || // this process mark this node or we can find this node in the skip list
  2263  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2264  			if !isMarked { // we don't mark this node for now
  2265  				nodeToRemove = succs[lFound]
  2266  				topLayer = lFound
  2267  				nodeToRemove.mu.Lock()
  2268  				if nodeToRemove.flags.Get(marked) {
  2269  					// The node is marked by another process,
  2270  					// the physical deletion will be accomplished by another process.
  2271  					nodeToRemove.mu.Unlock()
  2272  					return false
  2273  				}
  2274  				nodeToRemove.flags.SetTrue(marked)
  2275  				isMarked = true
  2276  			}
  2277  			// Accomplish the physical deletion.
  2278  			var (
  2279  				highestLocked        = -1 // the highest level being locked by this process
  2280  				valid                = true
  2281  				pred, succ, prevPred *intNodeDesc
  2282  			)
  2283  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2284  				pred, succ = preds[layer], succs[layer]
  2285  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2286  					pred.mu.Lock()
  2287  					highestLocked = layer
  2288  					prevPred = pred
  2289  				}
  2290  				// valid check if there is another node has inserted into the skip list in this layer
  2291  				// during this process, or the previous is removed by another process.
  2292  				// It is valid if:
  2293  				// 1. the previous node exists.
  2294  				// 2. no another node has inserted into the skip list in this layer.
  2295  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2296  			}
  2297  			if !valid {
  2298  				unlockIntDesc(preds, highestLocked)
  2299  				continue
  2300  			}
  2301  			for i := topLayer; i >= 0; i-- {
  2302  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2303  				// So we don't need `nodeToRemove.loadNext`
  2304  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2305  			}
  2306  			nodeToRemove.mu.Unlock()
  2307  			unlockIntDesc(preds, highestLocked)
  2308  			atomic.AddInt64(&s.length, -1)
  2309  			return true
  2310  		}
  2311  		return false
  2312  	}
  2313  }
  2314  
  2315  // Range calls f sequentially for each value present in the skip set.
  2316  // If f returns false, range stops the iteration.
  2317  func (s *IntSetDesc) Range(f func(value int) bool) {
  2318  	x := s.header.atomicLoadNext(0)
  2319  	for x != nil {
  2320  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2321  			x = x.atomicLoadNext(0)
  2322  			continue
  2323  		}
  2324  		if !f(x.value) {
  2325  			break
  2326  		}
  2327  		x = x.atomicLoadNext(0)
  2328  	}
  2329  }
  2330  
  2331  // Len return the length of this skip set.
  2332  func (s *IntSetDesc) Len() int {
  2333  	return int(atomic.LoadInt64(&s.length))
  2334  }
  2335  
  2336  // Int8Set represents a set based on skip list in ascending order.
  2337  type Int8Set struct {
  2338  	header       *int8Node
  2339  	length       int64
  2340  	highestLevel int64 // highest level for now
  2341  }
  2342  
  2343  type int8Node struct {
  2344  	value int8
  2345  	next  optionalArray // [level]*int8Node
  2346  	mu    sync.Mutex
  2347  	flags bitflag
  2348  	level uint32
  2349  }
  2350  
  2351  func newInt8Node(value int8, level int) *int8Node {
  2352  	node := &int8Node{
  2353  		value: value,
  2354  		level: uint32(level),
  2355  	}
  2356  	if level > op1 {
  2357  		node.next.extra = new([op2]unsafe.Pointer)
  2358  	}
  2359  	return node
  2360  }
  2361  
  2362  func (n *int8Node) loadNext(i int) *int8Node {
  2363  	return (*int8Node)(n.next.load(i))
  2364  }
  2365  
  2366  func (n *int8Node) storeNext(i int, node *int8Node) {
  2367  	n.next.store(i, unsafe.Pointer(node))
  2368  }
  2369  
  2370  func (n *int8Node) atomicLoadNext(i int) *int8Node {
  2371  	return (*int8Node)(n.next.atomicLoad(i))
  2372  }
  2373  
  2374  func (n *int8Node) atomicStoreNext(i int, node *int8Node) {
  2375  	n.next.atomicStore(i, unsafe.Pointer(node))
  2376  }
  2377  
  2378  func (n *int8Node) lessthan(value int8) bool {
  2379  	return n.value < value
  2380  }
  2381  
  2382  func (n *int8Node) equal(value int8) bool {
  2383  	return n.value == value
  2384  }
  2385  
  2386  // NewInt8 return an empty int8 skip set in ascending order.
  2387  func NewInt8() *Int8Set {
  2388  	h := newInt8Node(0, maxLevel)
  2389  	h.flags.SetTrue(fullyLinked)
  2390  	return &Int8Set{
  2391  		header:       h,
  2392  		highestLevel: defaultHighestLevel,
  2393  	}
  2394  }
  2395  
  2396  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2397  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2398  func (s *Int8Set) findNodeRemove(value int8, preds *[maxLevel]*int8Node, succs *[maxLevel]*int8Node) int {
  2399  	// lFound represents the index of the first layer at which it found a node.
  2400  	lFound, x := -1, s.header
  2401  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2402  		succ := x.atomicLoadNext(i)
  2403  		for succ != nil && succ.lessthan(value) {
  2404  			x = succ
  2405  			succ = x.atomicLoadNext(i)
  2406  		}
  2407  		preds[i] = x
  2408  		succs[i] = succ
  2409  
  2410  		// Check if the value already in the skip list.
  2411  		if lFound == -1 && succ != nil && succ.equal(value) {
  2412  			lFound = i
  2413  		}
  2414  	}
  2415  	return lFound
  2416  }
  2417  
  2418  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2419  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2420  func (s *Int8Set) findNodeAdd(value int8, preds *[maxLevel]*int8Node, succs *[maxLevel]*int8Node) int {
  2421  	x := s.header
  2422  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2423  		succ := x.atomicLoadNext(i)
  2424  		for succ != nil && succ.lessthan(value) {
  2425  			x = succ
  2426  			succ = x.atomicLoadNext(i)
  2427  		}
  2428  		preds[i] = x
  2429  		succs[i] = succ
  2430  
  2431  		// Check if the value already in the skip list.
  2432  		if succ != nil && succ.equal(value) {
  2433  			return i
  2434  		}
  2435  	}
  2436  	return -1
  2437  }
  2438  
  2439  func unlockInt8(preds [maxLevel]*int8Node, highestLevel int) {
  2440  	var prevPred *int8Node
  2441  	for i := highestLevel; i >= 0; i-- {
  2442  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2443  			preds[i].mu.Unlock()
  2444  			prevPred = preds[i]
  2445  		}
  2446  	}
  2447  }
  2448  
  2449  // Add add the value into skip set, return true if this process insert the value into skip set,
  2450  // return false if this process can't insert this value, because another process has insert the same value.
  2451  //
  2452  // If the value is in the skip set but not fully linked, this process will wait until it is.
  2453  func (s *Int8Set) Add(value int8) bool {
  2454  	level := s.randomLevel()
  2455  	var preds, succs [maxLevel]*int8Node
  2456  	for {
  2457  		lFound := s.findNodeAdd(value, &preds, &succs)
  2458  		if lFound != -1 { // indicating the value is already in the skip-list
  2459  			nodeFound := succs[lFound]
  2460  			if !nodeFound.flags.Get(marked) {
  2461  				for !nodeFound.flags.Get(fullyLinked) {
  2462  					// The node is not yet fully linked, just waits until it is.
  2463  				}
  2464  				return false
  2465  			}
  2466  			// If the node is marked, represents some other thread is in the process of deleting this node,
  2467  			// we need to add this node in next loop.
  2468  			continue
  2469  		}
  2470  		// Add this node into skip list.
  2471  		var (
  2472  			highestLocked        = -1 // the highest level being locked by this process
  2473  			valid                = true
  2474  			pred, succ, prevPred *int8Node
  2475  		)
  2476  		for layer := 0; valid && layer < level; layer++ {
  2477  			pred = preds[layer]   // target node's previous node
  2478  			succ = succs[layer]   // target node's next node
  2479  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2480  				pred.mu.Lock()
  2481  				highestLocked = layer
  2482  				prevPred = pred
  2483  			}
  2484  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2485  			// It is valid if:
  2486  			// 1. The previous node and next node both are not marked.
  2487  			// 2. The previous node's next node is succ in this layer.
  2488  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2489  		}
  2490  		if !valid {
  2491  			unlockInt8(preds, highestLocked)
  2492  			continue
  2493  		}
  2494  
  2495  		nn := newInt8Node(value, level)
  2496  		for layer := 0; layer < level; layer++ {
  2497  			nn.storeNext(layer, succs[layer])
  2498  			preds[layer].atomicStoreNext(layer, nn)
  2499  		}
  2500  		nn.flags.SetTrue(fullyLinked)
  2501  		unlockInt8(preds, highestLocked)
  2502  		atomic.AddInt64(&s.length, 1)
  2503  		return true
  2504  	}
  2505  }
  2506  
  2507  func (s *Int8Set) randomLevel() int {
  2508  	// Generate random level.
  2509  	level := randomLevel()
  2510  	// Update highest level if possible.
  2511  	for {
  2512  		hl := atomic.LoadInt64(&s.highestLevel)
  2513  		if int64(level) <= hl {
  2514  			break
  2515  		}
  2516  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2517  			break
  2518  		}
  2519  	}
  2520  	return level
  2521  }
  2522  
  2523  // Contains check if the value is in the skip set.
  2524  func (s *Int8Set) Contains(value int8) bool {
  2525  	x := s.header
  2526  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2527  		nex := x.atomicLoadNext(i)
  2528  		for nex != nil && nex.lessthan(value) {
  2529  			x = nex
  2530  			nex = x.atomicLoadNext(i)
  2531  		}
  2532  
  2533  		// Check if the value already in the skip list.
  2534  		if nex != nil && nex.equal(value) {
  2535  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  2536  		}
  2537  	}
  2538  	return false
  2539  }
  2540  
  2541  // Remove a node from the skip set.
  2542  func (s *Int8Set) Remove(value int8) bool {
  2543  	var (
  2544  		nodeToRemove *int8Node
  2545  		isMarked     bool // represents if this operation mark the node
  2546  		topLayer     = -1
  2547  		preds, succs [maxLevel]*int8Node
  2548  	)
  2549  	for {
  2550  		lFound := s.findNodeRemove(value, &preds, &succs)
  2551  		if isMarked || // this process mark this node or we can find this node in the skip list
  2552  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2553  			if !isMarked { // we don't mark this node for now
  2554  				nodeToRemove = succs[lFound]
  2555  				topLayer = lFound
  2556  				nodeToRemove.mu.Lock()
  2557  				if nodeToRemove.flags.Get(marked) {
  2558  					// The node is marked by another process,
  2559  					// the physical deletion will be accomplished by another process.
  2560  					nodeToRemove.mu.Unlock()
  2561  					return false
  2562  				}
  2563  				nodeToRemove.flags.SetTrue(marked)
  2564  				isMarked = true
  2565  			}
  2566  			// Accomplish the physical deletion.
  2567  			var (
  2568  				highestLocked        = -1 // the highest level being locked by this process
  2569  				valid                = true
  2570  				pred, succ, prevPred *int8Node
  2571  			)
  2572  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2573  				pred, succ = preds[layer], succs[layer]
  2574  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2575  					pred.mu.Lock()
  2576  					highestLocked = layer
  2577  					prevPred = pred
  2578  				}
  2579  				// valid check if there is another node has inserted into the skip list in this layer
  2580  				// during this process, or the previous is removed by another process.
  2581  				// It is valid if:
  2582  				// 1. the previous node exists.
  2583  				// 2. no another node has inserted into the skip list in this layer.
  2584  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2585  			}
  2586  			if !valid {
  2587  				unlockInt8(preds, highestLocked)
  2588  				continue
  2589  			}
  2590  			for i := topLayer; i >= 0; i-- {
  2591  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2592  				// So we don't need `nodeToRemove.loadNext`
  2593  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2594  			}
  2595  			nodeToRemove.mu.Unlock()
  2596  			unlockInt8(preds, highestLocked)
  2597  			atomic.AddInt64(&s.length, -1)
  2598  			return true
  2599  		}
  2600  		return false
  2601  	}
  2602  }
  2603  
  2604  // Range calls f sequentially for each value present in the skip set.
  2605  // If f returns false, range stops the iteration.
  2606  func (s *Int8Set) Range(f func(value int8) bool) {
  2607  	x := s.header.atomicLoadNext(0)
  2608  	for x != nil {
  2609  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2610  			x = x.atomicLoadNext(0)
  2611  			continue
  2612  		}
  2613  		if !f(x.value) {
  2614  			break
  2615  		}
  2616  		x = x.atomicLoadNext(0)
  2617  	}
  2618  }
  2619  
  2620  // Len return the length of this skip set.
  2621  func (s *Int8Set) Len() int {
  2622  	return int(atomic.LoadInt64(&s.length))
  2623  }
  2624  
  2625  // Int8SetDesc represents a set based on skip list in descending order.
  2626  type Int8SetDesc struct {
  2627  	header       *int8NodeDesc
  2628  	length       int64
  2629  	highestLevel int64 // highest level for now
  2630  }
  2631  
  2632  type int8NodeDesc struct {
  2633  	value int8
  2634  	next  optionalArray // [level]*int8NodeDesc
  2635  	mu    sync.Mutex
  2636  	flags bitflag
  2637  	level uint32
  2638  }
  2639  
  2640  func newInt8NodeDesc(value int8, level int) *int8NodeDesc {
  2641  	node := &int8NodeDesc{
  2642  		value: value,
  2643  		level: uint32(level),
  2644  	}
  2645  	if level > op1 {
  2646  		node.next.extra = new([op2]unsafe.Pointer)
  2647  	}
  2648  	return node
  2649  }
  2650  
  2651  func (n *int8NodeDesc) loadNext(i int) *int8NodeDesc {
  2652  	return (*int8NodeDesc)(n.next.load(i))
  2653  }
  2654  
  2655  func (n *int8NodeDesc) storeNext(i int, node *int8NodeDesc) {
  2656  	n.next.store(i, unsafe.Pointer(node))
  2657  }
  2658  
  2659  func (n *int8NodeDesc) atomicLoadNext(i int) *int8NodeDesc {
  2660  	return (*int8NodeDesc)(n.next.atomicLoad(i))
  2661  }
  2662  
  2663  func (n *int8NodeDesc) atomicStoreNext(i int, node *int8NodeDesc) {
  2664  	n.next.atomicStore(i, unsafe.Pointer(node))
  2665  }
  2666  
  2667  func (n *int8NodeDesc) lessthan(value int8) bool {
  2668  	return n.value > value
  2669  }
  2670  
  2671  func (n *int8NodeDesc) equal(value int8) bool {
  2672  	return n.value == value
  2673  }
  2674  
  2675  // NewInt8Desc return an empty int8 skip set in descending order.
  2676  func NewInt8Desc() *Int8SetDesc {
  2677  	h := newInt8NodeDesc(0, maxLevel)
  2678  	h.flags.SetTrue(fullyLinked)
  2679  	return &Int8SetDesc{
  2680  		header:       h,
  2681  		highestLevel: defaultHighestLevel,
  2682  	}
  2683  }
  2684  
  2685  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2686  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2687  func (s *Int8SetDesc) findNodeRemove(value int8, preds *[maxLevel]*int8NodeDesc, succs *[maxLevel]*int8NodeDesc) int {
  2688  	// lFound represents the index of the first layer at which it found a node.
  2689  	lFound, x := -1, s.header
  2690  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2691  		succ := x.atomicLoadNext(i)
  2692  		for succ != nil && succ.lessthan(value) {
  2693  			x = succ
  2694  			succ = x.atomicLoadNext(i)
  2695  		}
  2696  		preds[i] = x
  2697  		succs[i] = succ
  2698  
  2699  		// Check if the value already in the skip list.
  2700  		if lFound == -1 && succ != nil && succ.equal(value) {
  2701  			lFound = i
  2702  		}
  2703  	}
  2704  	return lFound
  2705  }
  2706  
  2707  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2708  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2709  func (s *Int8SetDesc) findNodeAdd(value int8, preds *[maxLevel]*int8NodeDesc, succs *[maxLevel]*int8NodeDesc) int {
  2710  	x := s.header
  2711  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2712  		succ := x.atomicLoadNext(i)
  2713  		for succ != nil && succ.lessthan(value) {
  2714  			x = succ
  2715  			succ = x.atomicLoadNext(i)
  2716  		}
  2717  		preds[i] = x
  2718  		succs[i] = succ
  2719  
  2720  		// Check if the value already in the skip list.
  2721  		if succ != nil && succ.equal(value) {
  2722  			return i
  2723  		}
  2724  	}
  2725  	return -1
  2726  }
  2727  
  2728  func unlockInt8Desc(preds [maxLevel]*int8NodeDesc, highestLevel int) {
  2729  	var prevPred *int8NodeDesc
  2730  	for i := highestLevel; i >= 0; i-- {
  2731  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2732  			preds[i].mu.Unlock()
  2733  			prevPred = preds[i]
  2734  		}
  2735  	}
  2736  }
  2737  
  2738  // Add add the value into skip set, return true if this process insert the value into skip set,
  2739  // return false if this process can't insert this value, because another process has insert the same value.
  2740  //
  2741  // If the value is in the skip set but not fully linked, this process will wait until it is.
  2742  func (s *Int8SetDesc) Add(value int8) bool {
  2743  	level := s.randomLevel()
  2744  	var preds, succs [maxLevel]*int8NodeDesc
  2745  	for {
  2746  		lFound := s.findNodeAdd(value, &preds, &succs)
  2747  		if lFound != -1 { // indicating the value is already in the skip-list
  2748  			nodeFound := succs[lFound]
  2749  			if !nodeFound.flags.Get(marked) {
  2750  				for !nodeFound.flags.Get(fullyLinked) {
  2751  					// The node is not yet fully linked, just waits until it is.
  2752  				}
  2753  				return false
  2754  			}
  2755  			// If the node is marked, represents some other thread is in the process of deleting this node,
  2756  			// we need to add this node in next loop.
  2757  			continue
  2758  		}
  2759  		// Add this node into skip list.
  2760  		var (
  2761  			highestLocked        = -1 // the highest level being locked by this process
  2762  			valid                = true
  2763  			pred, succ, prevPred *int8NodeDesc
  2764  		)
  2765  		for layer := 0; valid && layer < level; layer++ {
  2766  			pred = preds[layer]   // target node's previous node
  2767  			succ = succs[layer]   // target node's next node
  2768  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2769  				pred.mu.Lock()
  2770  				highestLocked = layer
  2771  				prevPred = pred
  2772  			}
  2773  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2774  			// It is valid if:
  2775  			// 1. The previous node and next node both are not marked.
  2776  			// 2. The previous node's next node is succ in this layer.
  2777  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2778  		}
  2779  		if !valid {
  2780  			unlockInt8Desc(preds, highestLocked)
  2781  			continue
  2782  		}
  2783  
  2784  		nn := newInt8NodeDesc(value, level)
  2785  		for layer := 0; layer < level; layer++ {
  2786  			nn.storeNext(layer, succs[layer])
  2787  			preds[layer].atomicStoreNext(layer, nn)
  2788  		}
  2789  		nn.flags.SetTrue(fullyLinked)
  2790  		unlockInt8Desc(preds, highestLocked)
  2791  		atomic.AddInt64(&s.length, 1)
  2792  		return true
  2793  	}
  2794  }
  2795  
  2796  func (s *Int8SetDesc) randomLevel() int {
  2797  	// Generate random level.
  2798  	level := randomLevel()
  2799  	// Update highest level if possible.
  2800  	for {
  2801  		hl := atomic.LoadInt64(&s.highestLevel)
  2802  		if int64(level) <= hl {
  2803  			break
  2804  		}
  2805  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2806  			break
  2807  		}
  2808  	}
  2809  	return level
  2810  }
  2811  
  2812  // Contains check if the value is in the skip set.
  2813  func (s *Int8SetDesc) Contains(value int8) bool {
  2814  	x := s.header
  2815  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2816  		nex := x.atomicLoadNext(i)
  2817  		for nex != nil && nex.lessthan(value) {
  2818  			x = nex
  2819  			nex = x.atomicLoadNext(i)
  2820  		}
  2821  
  2822  		// Check if the value already in the skip list.
  2823  		if nex != nil && nex.equal(value) {
  2824  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  2825  		}
  2826  	}
  2827  	return false
  2828  }
  2829  
  2830  // Remove a node from the skip set.
  2831  func (s *Int8SetDesc) Remove(value int8) bool {
  2832  	var (
  2833  		nodeToRemove *int8NodeDesc
  2834  		isMarked     bool // represents if this operation mark the node
  2835  		topLayer     = -1
  2836  		preds, succs [maxLevel]*int8NodeDesc
  2837  	)
  2838  	for {
  2839  		lFound := s.findNodeRemove(value, &preds, &succs)
  2840  		if isMarked || // this process mark this node or we can find this node in the skip list
  2841  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2842  			if !isMarked { // we don't mark this node for now
  2843  				nodeToRemove = succs[lFound]
  2844  				topLayer = lFound
  2845  				nodeToRemove.mu.Lock()
  2846  				if nodeToRemove.flags.Get(marked) {
  2847  					// The node is marked by another process,
  2848  					// the physical deletion will be accomplished by another process.
  2849  					nodeToRemove.mu.Unlock()
  2850  					return false
  2851  				}
  2852  				nodeToRemove.flags.SetTrue(marked)
  2853  				isMarked = true
  2854  			}
  2855  			// Accomplish the physical deletion.
  2856  			var (
  2857  				highestLocked        = -1 // the highest level being locked by this process
  2858  				valid                = true
  2859  				pred, succ, prevPred *int8NodeDesc
  2860  			)
  2861  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2862  				pred, succ = preds[layer], succs[layer]
  2863  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2864  					pred.mu.Lock()
  2865  					highestLocked = layer
  2866  					prevPred = pred
  2867  				}
  2868  				// valid check if there is another node has inserted into the skip list in this layer
  2869  				// during this process, or the previous is removed by another process.
  2870  				// It is valid if:
  2871  				// 1. the previous node exists.
  2872  				// 2. no another node has inserted into the skip list in this layer.
  2873  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2874  			}
  2875  			if !valid {
  2876  				unlockInt8Desc(preds, highestLocked)
  2877  				continue
  2878  			}
  2879  			for i := topLayer; i >= 0; i-- {
  2880  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2881  				// So we don't need `nodeToRemove.loadNext`
  2882  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2883  			}
  2884  			nodeToRemove.mu.Unlock()
  2885  			unlockInt8Desc(preds, highestLocked)
  2886  			atomic.AddInt64(&s.length, -1)
  2887  			return true
  2888  		}
  2889  		return false
  2890  	}
  2891  }
  2892  
  2893  // Range calls f sequentially for each value present in the skip set.
  2894  // If f returns false, range stops the iteration.
  2895  func (s *Int8SetDesc) Range(f func(value int8) bool) {
  2896  	x := s.header.atomicLoadNext(0)
  2897  	for x != nil {
  2898  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2899  			x = x.atomicLoadNext(0)
  2900  			continue
  2901  		}
  2902  		if !f(x.value) {
  2903  			break
  2904  		}
  2905  		x = x.atomicLoadNext(0)
  2906  	}
  2907  }
  2908  
  2909  // Len return the length of this skip set.
  2910  func (s *Int8SetDesc) Len() int {
  2911  	return int(atomic.LoadInt64(&s.length))
  2912  }
  2913  
  2914  // Int16Set represents a set based on skip list in ascending order.
  2915  type Int16Set struct {
  2916  	header       *int16Node
  2917  	length       int64
  2918  	highestLevel int64 // highest level for now
  2919  }
  2920  
  2921  type int16Node struct {
  2922  	value int16
  2923  	next  optionalArray // [level]*int16Node
  2924  	mu    sync.Mutex
  2925  	flags bitflag
  2926  	level uint32
  2927  }
  2928  
  2929  func newInt16Node(value int16, level int) *int16Node {
  2930  	node := &int16Node{
  2931  		value: value,
  2932  		level: uint32(level),
  2933  	}
  2934  	if level > op1 {
  2935  		node.next.extra = new([op2]unsafe.Pointer)
  2936  	}
  2937  	return node
  2938  }
  2939  
  2940  func (n *int16Node) loadNext(i int) *int16Node {
  2941  	return (*int16Node)(n.next.load(i))
  2942  }
  2943  
  2944  func (n *int16Node) storeNext(i int, node *int16Node) {
  2945  	n.next.store(i, unsafe.Pointer(node))
  2946  }
  2947  
  2948  func (n *int16Node) atomicLoadNext(i int) *int16Node {
  2949  	return (*int16Node)(n.next.atomicLoad(i))
  2950  }
  2951  
  2952  func (n *int16Node) atomicStoreNext(i int, node *int16Node) {
  2953  	n.next.atomicStore(i, unsafe.Pointer(node))
  2954  }
  2955  
  2956  func (n *int16Node) lessthan(value int16) bool {
  2957  	return n.value < value
  2958  }
  2959  
  2960  func (n *int16Node) equal(value int16) bool {
  2961  	return n.value == value
  2962  }
  2963  
  2964  // NewInt16 return an empty int16 skip set in ascending order.
  2965  func NewInt16() *Int16Set {
  2966  	h := newInt16Node(0, maxLevel)
  2967  	h.flags.SetTrue(fullyLinked)
  2968  	return &Int16Set{
  2969  		header:       h,
  2970  		highestLevel: defaultHighestLevel,
  2971  	}
  2972  }
  2973  
  2974  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2975  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2976  func (s *Int16Set) findNodeRemove(value int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int {
  2977  	// lFound represents the index of the first layer at which it found a node.
  2978  	lFound, x := -1, s.header
  2979  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2980  		succ := x.atomicLoadNext(i)
  2981  		for succ != nil && succ.lessthan(value) {
  2982  			x = succ
  2983  			succ = x.atomicLoadNext(i)
  2984  		}
  2985  		preds[i] = x
  2986  		succs[i] = succ
  2987  
  2988  		// Check if the value already in the skip list.
  2989  		if lFound == -1 && succ != nil && succ.equal(value) {
  2990  			lFound = i
  2991  		}
  2992  	}
  2993  	return lFound
  2994  }
  2995  
  2996  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2997  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2998  func (s *Int16Set) findNodeAdd(value int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int {
  2999  	x := s.header
  3000  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3001  		succ := x.atomicLoadNext(i)
  3002  		for succ != nil && succ.lessthan(value) {
  3003  			x = succ
  3004  			succ = x.atomicLoadNext(i)
  3005  		}
  3006  		preds[i] = x
  3007  		succs[i] = succ
  3008  
  3009  		// Check if the value already in the skip list.
  3010  		if succ != nil && succ.equal(value) {
  3011  			return i
  3012  		}
  3013  	}
  3014  	return -1
  3015  }
  3016  
  3017  func unlockInt16(preds [maxLevel]*int16Node, highestLevel int) {
  3018  	var prevPred *int16Node
  3019  	for i := highestLevel; i >= 0; i-- {
  3020  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3021  			preds[i].mu.Unlock()
  3022  			prevPred = preds[i]
  3023  		}
  3024  	}
  3025  }
  3026  
  3027  // Add add the value into skip set, return true if this process insert the value into skip set,
  3028  // return false if this process can't insert this value, because another process has insert the same value.
  3029  //
  3030  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3031  func (s *Int16Set) Add(value int16) bool {
  3032  	level := s.randomLevel()
  3033  	var preds, succs [maxLevel]*int16Node
  3034  	for {
  3035  		lFound := s.findNodeAdd(value, &preds, &succs)
  3036  		if lFound != -1 { // indicating the value is already in the skip-list
  3037  			nodeFound := succs[lFound]
  3038  			if !nodeFound.flags.Get(marked) {
  3039  				for !nodeFound.flags.Get(fullyLinked) {
  3040  					// The node is not yet fully linked, just waits until it is.
  3041  				}
  3042  				return false
  3043  			}
  3044  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3045  			// we need to add this node in next loop.
  3046  			continue
  3047  		}
  3048  		// Add this node into skip list.
  3049  		var (
  3050  			highestLocked        = -1 // the highest level being locked by this process
  3051  			valid                = true
  3052  			pred, succ, prevPred *int16Node
  3053  		)
  3054  		for layer := 0; valid && layer < level; layer++ {
  3055  			pred = preds[layer]   // target node's previous node
  3056  			succ = succs[layer]   // target node's next node
  3057  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3058  				pred.mu.Lock()
  3059  				highestLocked = layer
  3060  				prevPred = pred
  3061  			}
  3062  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3063  			// It is valid if:
  3064  			// 1. The previous node and next node both are not marked.
  3065  			// 2. The previous node's next node is succ in this layer.
  3066  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3067  		}
  3068  		if !valid {
  3069  			unlockInt16(preds, highestLocked)
  3070  			continue
  3071  		}
  3072  
  3073  		nn := newInt16Node(value, level)
  3074  		for layer := 0; layer < level; layer++ {
  3075  			nn.storeNext(layer, succs[layer])
  3076  			preds[layer].atomicStoreNext(layer, nn)
  3077  		}
  3078  		nn.flags.SetTrue(fullyLinked)
  3079  		unlockInt16(preds, highestLocked)
  3080  		atomic.AddInt64(&s.length, 1)
  3081  		return true
  3082  	}
  3083  }
  3084  
  3085  func (s *Int16Set) randomLevel() int {
  3086  	// Generate random level.
  3087  	level := randomLevel()
  3088  	// Update highest level if possible.
  3089  	for {
  3090  		hl := atomic.LoadInt64(&s.highestLevel)
  3091  		if int64(level) <= hl {
  3092  			break
  3093  		}
  3094  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3095  			break
  3096  		}
  3097  	}
  3098  	return level
  3099  }
  3100  
  3101  // Contains check if the value is in the skip set.
  3102  func (s *Int16Set) Contains(value int16) bool {
  3103  	x := s.header
  3104  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3105  		nex := x.atomicLoadNext(i)
  3106  		for nex != nil && nex.lessthan(value) {
  3107  			x = nex
  3108  			nex = x.atomicLoadNext(i)
  3109  		}
  3110  
  3111  		// Check if the value already in the skip list.
  3112  		if nex != nil && nex.equal(value) {
  3113  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3114  		}
  3115  	}
  3116  	return false
  3117  }
  3118  
  3119  // Remove a node from the skip set.
  3120  func (s *Int16Set) Remove(value int16) bool {
  3121  	var (
  3122  		nodeToRemove *int16Node
  3123  		isMarked     bool // represents if this operation mark the node
  3124  		topLayer     = -1
  3125  		preds, succs [maxLevel]*int16Node
  3126  	)
  3127  	for {
  3128  		lFound := s.findNodeRemove(value, &preds, &succs)
  3129  		if isMarked || // this process mark this node or we can find this node in the skip list
  3130  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3131  			if !isMarked { // we don't mark this node for now
  3132  				nodeToRemove = succs[lFound]
  3133  				topLayer = lFound
  3134  				nodeToRemove.mu.Lock()
  3135  				if nodeToRemove.flags.Get(marked) {
  3136  					// The node is marked by another process,
  3137  					// the physical deletion will be accomplished by another process.
  3138  					nodeToRemove.mu.Unlock()
  3139  					return false
  3140  				}
  3141  				nodeToRemove.flags.SetTrue(marked)
  3142  				isMarked = true
  3143  			}
  3144  			// Accomplish the physical deletion.
  3145  			var (
  3146  				highestLocked        = -1 // the highest level being locked by this process
  3147  				valid                = true
  3148  				pred, succ, prevPred *int16Node
  3149  			)
  3150  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3151  				pred, succ = preds[layer], succs[layer]
  3152  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3153  					pred.mu.Lock()
  3154  					highestLocked = layer
  3155  					prevPred = pred
  3156  				}
  3157  				// valid check if there is another node has inserted into the skip list in this layer
  3158  				// during this process, or the previous is removed by another process.
  3159  				// It is valid if:
  3160  				// 1. the previous node exists.
  3161  				// 2. no another node has inserted into the skip list in this layer.
  3162  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3163  			}
  3164  			if !valid {
  3165  				unlockInt16(preds, highestLocked)
  3166  				continue
  3167  			}
  3168  			for i := topLayer; i >= 0; i-- {
  3169  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  3170  				// So we don't need `nodeToRemove.loadNext`
  3171  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  3172  			}
  3173  			nodeToRemove.mu.Unlock()
  3174  			unlockInt16(preds, highestLocked)
  3175  			atomic.AddInt64(&s.length, -1)
  3176  			return true
  3177  		}
  3178  		return false
  3179  	}
  3180  }
  3181  
  3182  // Range calls f sequentially for each value present in the skip set.
  3183  // If f returns false, range stops the iteration.
  3184  func (s *Int16Set) Range(f func(value int16) bool) {
  3185  	x := s.header.atomicLoadNext(0)
  3186  	for x != nil {
  3187  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3188  			x = x.atomicLoadNext(0)
  3189  			continue
  3190  		}
  3191  		if !f(x.value) {
  3192  			break
  3193  		}
  3194  		x = x.atomicLoadNext(0)
  3195  	}
  3196  }
  3197  
  3198  // Len return the length of this skip set.
  3199  func (s *Int16Set) Len() int {
  3200  	return int(atomic.LoadInt64(&s.length))
  3201  }
  3202  
  3203  // Int16SetDesc represents a set based on skip list in descending order.
  3204  type Int16SetDesc struct {
  3205  	header       *int16NodeDesc
  3206  	length       int64
  3207  	highestLevel int64 // highest level for now
  3208  }
  3209  
  3210  type int16NodeDesc struct {
  3211  	value int16
  3212  	next  optionalArray // [level]*int16NodeDesc
  3213  	mu    sync.Mutex
  3214  	flags bitflag
  3215  	level uint32
  3216  }
  3217  
  3218  func newInt16NodeDesc(value int16, level int) *int16NodeDesc {
  3219  	node := &int16NodeDesc{
  3220  		value: value,
  3221  		level: uint32(level),
  3222  	}
  3223  	if level > op1 {
  3224  		node.next.extra = new([op2]unsafe.Pointer)
  3225  	}
  3226  	return node
  3227  }
  3228  
  3229  func (n *int16NodeDesc) loadNext(i int) *int16NodeDesc {
  3230  	return (*int16NodeDesc)(n.next.load(i))
  3231  }
  3232  
  3233  func (n *int16NodeDesc) storeNext(i int, node *int16NodeDesc) {
  3234  	n.next.store(i, unsafe.Pointer(node))
  3235  }
  3236  
  3237  func (n *int16NodeDesc) atomicLoadNext(i int) *int16NodeDesc {
  3238  	return (*int16NodeDesc)(n.next.atomicLoad(i))
  3239  }
  3240  
  3241  func (n *int16NodeDesc) atomicStoreNext(i int, node *int16NodeDesc) {
  3242  	n.next.atomicStore(i, unsafe.Pointer(node))
  3243  }
  3244  
  3245  func (n *int16NodeDesc) lessthan(value int16) bool {
  3246  	return n.value > value
  3247  }
  3248  
  3249  func (n *int16NodeDesc) equal(value int16) bool {
  3250  	return n.value == value
  3251  }
  3252  
  3253  // NewInt16Desc return an empty int16 skip set in descending order.
  3254  func NewInt16Desc() *Int16SetDesc {
  3255  	h := newInt16NodeDesc(0, maxLevel)
  3256  	h.flags.SetTrue(fullyLinked)
  3257  	return &Int16SetDesc{
  3258  		header:       h,
  3259  		highestLevel: defaultHighestLevel,
  3260  	}
  3261  }
  3262  
  3263  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3264  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3265  func (s *Int16SetDesc) findNodeRemove(value int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int {
  3266  	// lFound represents the index of the first layer at which it found a node.
  3267  	lFound, x := -1, s.header
  3268  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3269  		succ := x.atomicLoadNext(i)
  3270  		for succ != nil && succ.lessthan(value) {
  3271  			x = succ
  3272  			succ = x.atomicLoadNext(i)
  3273  		}
  3274  		preds[i] = x
  3275  		succs[i] = succ
  3276  
  3277  		// Check if the value already in the skip list.
  3278  		if lFound == -1 && succ != nil && succ.equal(value) {
  3279  			lFound = i
  3280  		}
  3281  	}
  3282  	return lFound
  3283  }
  3284  
  3285  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  3286  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3287  func (s *Int16SetDesc) findNodeAdd(value int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int {
  3288  	x := s.header
  3289  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3290  		succ := x.atomicLoadNext(i)
  3291  		for succ != nil && succ.lessthan(value) {
  3292  			x = succ
  3293  			succ = x.atomicLoadNext(i)
  3294  		}
  3295  		preds[i] = x
  3296  		succs[i] = succ
  3297  
  3298  		// Check if the value already in the skip list.
  3299  		if succ != nil && succ.equal(value) {
  3300  			return i
  3301  		}
  3302  	}
  3303  	return -1
  3304  }
  3305  
  3306  func unlockInt16Desc(preds [maxLevel]*int16NodeDesc, highestLevel int) {
  3307  	var prevPred *int16NodeDesc
  3308  	for i := highestLevel; i >= 0; i-- {
  3309  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3310  			preds[i].mu.Unlock()
  3311  			prevPred = preds[i]
  3312  		}
  3313  	}
  3314  }
  3315  
  3316  // Add add the value into skip set, return true if this process insert the value into skip set,
  3317  // return false if this process can't insert this value, because another process has insert the same value.
  3318  //
  3319  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3320  func (s *Int16SetDesc) Add(value int16) bool {
  3321  	level := s.randomLevel()
  3322  	var preds, succs [maxLevel]*int16NodeDesc
  3323  	for {
  3324  		lFound := s.findNodeAdd(value, &preds, &succs)
  3325  		if lFound != -1 { // indicating the value is already in the skip-list
  3326  			nodeFound := succs[lFound]
  3327  			if !nodeFound.flags.Get(marked) {
  3328  				for !nodeFound.flags.Get(fullyLinked) {
  3329  					// The node is not yet fully linked, just waits until it is.
  3330  				}
  3331  				return false
  3332  			}
  3333  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3334  			// we need to add this node in next loop.
  3335  			continue
  3336  		}
  3337  		// Add this node into skip list.
  3338  		var (
  3339  			highestLocked        = -1 // the highest level being locked by this process
  3340  			valid                = true
  3341  			pred, succ, prevPred *int16NodeDesc
  3342  		)
  3343  		for layer := 0; valid && layer < level; layer++ {
  3344  			pred = preds[layer]   // target node's previous node
  3345  			succ = succs[layer]   // target node's next node
  3346  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3347  				pred.mu.Lock()
  3348  				highestLocked = layer
  3349  				prevPred = pred
  3350  			}
  3351  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3352  			// It is valid if:
  3353  			// 1. The previous node and next node both are not marked.
  3354  			// 2. The previous node's next node is succ in this layer.
  3355  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3356  		}
  3357  		if !valid {
  3358  			unlockInt16Desc(preds, highestLocked)
  3359  			continue
  3360  		}
  3361  
  3362  		nn := newInt16NodeDesc(value, level)
  3363  		for layer := 0; layer < level; layer++ {
  3364  			nn.storeNext(layer, succs[layer])
  3365  			preds[layer].atomicStoreNext(layer, nn)
  3366  		}
  3367  		nn.flags.SetTrue(fullyLinked)
  3368  		unlockInt16Desc(preds, highestLocked)
  3369  		atomic.AddInt64(&s.length, 1)
  3370  		return true
  3371  	}
  3372  }
  3373  
  3374  func (s *Int16SetDesc) randomLevel() int {
  3375  	// Generate random level.
  3376  	level := randomLevel()
  3377  	// Update highest level if possible.
  3378  	for {
  3379  		hl := atomic.LoadInt64(&s.highestLevel)
  3380  		if int64(level) <= hl {
  3381  			break
  3382  		}
  3383  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3384  			break
  3385  		}
  3386  	}
  3387  	return level
  3388  }
  3389  
  3390  // Contains check if the value is in the skip set.
  3391  func (s *Int16SetDesc) Contains(value int16) bool {
  3392  	x := s.header
  3393  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3394  		nex := x.atomicLoadNext(i)
  3395  		for nex != nil && nex.lessthan(value) {
  3396  			x = nex
  3397  			nex = x.atomicLoadNext(i)
  3398  		}
  3399  
  3400  		// Check if the value already in the skip list.
  3401  		if nex != nil && nex.equal(value) {
  3402  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3403  		}
  3404  	}
  3405  	return false
  3406  }
  3407  
  3408  // Remove a node from the skip set.
  3409  func (s *Int16SetDesc) Remove(value int16) bool {
  3410  	var (
  3411  		nodeToRemove *int16NodeDesc
  3412  		isMarked     bool // represents if this operation mark the node
  3413  		topLayer     = -1
  3414  		preds, succs [maxLevel]*int16NodeDesc
  3415  	)
  3416  	for {
  3417  		lFound := s.findNodeRemove(value, &preds, &succs)
  3418  		if isMarked || // this process mark this node or we can find this node in the skip list
  3419  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3420  			if !isMarked { // we don't mark this node for now
  3421  				nodeToRemove = succs[lFound]
  3422  				topLayer = lFound
  3423  				nodeToRemove.mu.Lock()
  3424  				if nodeToRemove.flags.Get(marked) {
  3425  					// The node is marked by another process,
  3426  					// the physical deletion will be accomplished by another process.
  3427  					nodeToRemove.mu.Unlock()
  3428  					return false
  3429  				}
  3430  				nodeToRemove.flags.SetTrue(marked)
  3431  				isMarked = true
  3432  			}
  3433  			// Accomplish the physical deletion.
  3434  			var (
  3435  				highestLocked        = -1 // the highest level being locked by this process
  3436  				valid                = true
  3437  				pred, succ, prevPred *int16NodeDesc
  3438  			)
  3439  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3440  				pred, succ = preds[layer], succs[layer]
  3441  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3442  					pred.mu.Lock()
  3443  					highestLocked = layer
  3444  					prevPred = pred
  3445  				}
  3446  				// valid check if there is another node has inserted into the skip list in this layer
  3447  				// during this process, or the previous is removed by another process.
  3448  				// It is valid if:
  3449  				// 1. the previous node exists.
  3450  				// 2. no another node has inserted into the skip list in this layer.
  3451  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3452  			}
  3453  			if !valid {
  3454  				unlockInt16Desc(preds, highestLocked)
  3455  				continue
  3456  			}
  3457  			for i := topLayer; i >= 0; i-- {
  3458  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  3459  				// So we don't need `nodeToRemove.loadNext`
  3460  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  3461  			}
  3462  			nodeToRemove.mu.Unlock()
  3463  			unlockInt16Desc(preds, highestLocked)
  3464  			atomic.AddInt64(&s.length, -1)
  3465  			return true
  3466  		}
  3467  		return false
  3468  	}
  3469  }
  3470  
  3471  // Range calls f sequentially for each value present in the skip set.
  3472  // If f returns false, range stops the iteration.
  3473  func (s *Int16SetDesc) Range(f func(value int16) bool) {
  3474  	x := s.header.atomicLoadNext(0)
  3475  	for x != nil {
  3476  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3477  			x = x.atomicLoadNext(0)
  3478  			continue
  3479  		}
  3480  		if !f(x.value) {
  3481  			break
  3482  		}
  3483  		x = x.atomicLoadNext(0)
  3484  	}
  3485  }
  3486  
  3487  // Len return the length of this skip set.
  3488  func (s *Int16SetDesc) Len() int {
  3489  	return int(atomic.LoadInt64(&s.length))
  3490  }
  3491  
  3492  // Int32Set represents a set based on skip list in ascending order.
  3493  type Int32Set struct {
  3494  	header       *int32Node
  3495  	length       int64
  3496  	highestLevel int64 // highest level for now
  3497  }
  3498  
  3499  type int32Node struct {
  3500  	value int32
  3501  	next  optionalArray // [level]*int32Node
  3502  	mu    sync.Mutex
  3503  	flags bitflag
  3504  	level uint32
  3505  }
  3506  
  3507  func newInt32Node(value int32, level int) *int32Node {
  3508  	node := &int32Node{
  3509  		value: value,
  3510  		level: uint32(level),
  3511  	}
  3512  	if level > op1 {
  3513  		node.next.extra = new([op2]unsafe.Pointer)
  3514  	}
  3515  	return node
  3516  }
  3517  
  3518  func (n *int32Node) loadNext(i int) *int32Node {
  3519  	return (*int32Node)(n.next.load(i))
  3520  }
  3521  
  3522  func (n *int32Node) storeNext(i int, node *int32Node) {
  3523  	n.next.store(i, unsafe.Pointer(node))
  3524  }
  3525  
  3526  func (n *int32Node) atomicLoadNext(i int) *int32Node {
  3527  	return (*int32Node)(n.next.atomicLoad(i))
  3528  }
  3529  
  3530  func (n *int32Node) atomicStoreNext(i int, node *int32Node) {
  3531  	n.next.atomicStore(i, unsafe.Pointer(node))
  3532  }
  3533  
  3534  func (n *int32Node) lessthan(value int32) bool {
  3535  	return n.value < value
  3536  }
  3537  
  3538  func (n *int32Node) equal(value int32) bool {
  3539  	return n.value == value
  3540  }
  3541  
  3542  // NewInt32 return an empty int32 skip set in ascending order.
  3543  func NewInt32() *Int32Set {
  3544  	h := newInt32Node(0, maxLevel)
  3545  	h.flags.SetTrue(fullyLinked)
  3546  	return &Int32Set{
  3547  		header:       h,
  3548  		highestLevel: defaultHighestLevel,
  3549  	}
  3550  }
  3551  
  3552  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3553  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3554  func (s *Int32Set) findNodeRemove(value int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int {
  3555  	// lFound represents the index of the first layer at which it found a node.
  3556  	lFound, x := -1, s.header
  3557  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3558  		succ := x.atomicLoadNext(i)
  3559  		for succ != nil && succ.lessthan(value) {
  3560  			x = succ
  3561  			succ = x.atomicLoadNext(i)
  3562  		}
  3563  		preds[i] = x
  3564  		succs[i] = succ
  3565  
  3566  		// Check if the value already in the skip list.
  3567  		if lFound == -1 && succ != nil && succ.equal(value) {
  3568  			lFound = i
  3569  		}
  3570  	}
  3571  	return lFound
  3572  }
  3573  
  3574  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  3575  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3576  func (s *Int32Set) findNodeAdd(value int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int {
  3577  	x := s.header
  3578  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3579  		succ := x.atomicLoadNext(i)
  3580  		for succ != nil && succ.lessthan(value) {
  3581  			x = succ
  3582  			succ = x.atomicLoadNext(i)
  3583  		}
  3584  		preds[i] = x
  3585  		succs[i] = succ
  3586  
  3587  		// Check if the value already in the skip list.
  3588  		if succ != nil && succ.equal(value) {
  3589  			return i
  3590  		}
  3591  	}
  3592  	return -1
  3593  }
  3594  
  3595  func unlockInt32(preds [maxLevel]*int32Node, highestLevel int) {
  3596  	var prevPred *int32Node
  3597  	for i := highestLevel; i >= 0; i-- {
  3598  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3599  			preds[i].mu.Unlock()
  3600  			prevPred = preds[i]
  3601  		}
  3602  	}
  3603  }
  3604  
  3605  // Add add the value into skip set, return true if this process insert the value into skip set,
  3606  // return false if this process can't insert this value, because another process has insert the same value.
  3607  //
  3608  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3609  func (s *Int32Set) Add(value int32) bool {
  3610  	level := s.randomLevel()
  3611  	var preds, succs [maxLevel]*int32Node
  3612  	for {
  3613  		lFound := s.findNodeAdd(value, &preds, &succs)
  3614  		if lFound != -1 { // indicating the value is already in the skip-list
  3615  			nodeFound := succs[lFound]
  3616  			if !nodeFound.flags.Get(marked) {
  3617  				for !nodeFound.flags.Get(fullyLinked) {
  3618  					// The node is not yet fully linked, just waits until it is.
  3619  				}
  3620  				return false
  3621  			}
  3622  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3623  			// we need to add this node in next loop.
  3624  			continue
  3625  		}
  3626  		// Add this node into skip list.
  3627  		var (
  3628  			highestLocked        = -1 // the highest level being locked by this process
  3629  			valid                = true
  3630  			pred, succ, prevPred *int32Node
  3631  		)
  3632  		for layer := 0; valid && layer < level; layer++ {
  3633  			pred = preds[layer]   // target node's previous node
  3634  			succ = succs[layer]   // target node's next node
  3635  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3636  				pred.mu.Lock()
  3637  				highestLocked = layer
  3638  				prevPred = pred
  3639  			}
  3640  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3641  			// It is valid if:
  3642  			// 1. The previous node and next node both are not marked.
  3643  			// 2. The previous node's next node is succ in this layer.
  3644  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3645  		}
  3646  		if !valid {
  3647  			unlockInt32(preds, highestLocked)
  3648  			continue
  3649  		}
  3650  
  3651  		nn := newInt32Node(value, level)
  3652  		for layer := 0; layer < level; layer++ {
  3653  			nn.storeNext(layer, succs[layer])
  3654  			preds[layer].atomicStoreNext(layer, nn)
  3655  		}
  3656  		nn.flags.SetTrue(fullyLinked)
  3657  		unlockInt32(preds, highestLocked)
  3658  		atomic.AddInt64(&s.length, 1)
  3659  		return true
  3660  	}
  3661  }
  3662  
  3663  func (s *Int32Set) randomLevel() int {
  3664  	// Generate random level.
  3665  	level := randomLevel()
  3666  	// Update highest level if possible.
  3667  	for {
  3668  		hl := atomic.LoadInt64(&s.highestLevel)
  3669  		if int64(level) <= hl {
  3670  			break
  3671  		}
  3672  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3673  			break
  3674  		}
  3675  	}
  3676  	return level
  3677  }
  3678  
  3679  // Contains check if the value is in the skip set.
  3680  func (s *Int32Set) Contains(value int32) bool {
  3681  	x := s.header
  3682  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3683  		nex := x.atomicLoadNext(i)
  3684  		for nex != nil && nex.lessthan(value) {
  3685  			x = nex
  3686  			nex = x.atomicLoadNext(i)
  3687  		}
  3688  
  3689  		// Check if the value already in the skip list.
  3690  		if nex != nil && nex.equal(value) {
  3691  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3692  		}
  3693  	}
  3694  	return false
  3695  }
  3696  
  3697  // Remove a node from the skip set.
  3698  func (s *Int32Set) Remove(value int32) bool {
  3699  	var (
  3700  		nodeToRemove *int32Node
  3701  		isMarked     bool // represents if this operation mark the node
  3702  		topLayer     = -1
  3703  		preds, succs [maxLevel]*int32Node
  3704  	)
  3705  	for {
  3706  		lFound := s.findNodeRemove(value, &preds, &succs)
  3707  		if isMarked || // this process mark this node or we can find this node in the skip list
  3708  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3709  			if !isMarked { // we don't mark this node for now
  3710  				nodeToRemove = succs[lFound]
  3711  				topLayer = lFound
  3712  				nodeToRemove.mu.Lock()
  3713  				if nodeToRemove.flags.Get(marked) {
  3714  					// The node is marked by another process,
  3715  					// the physical deletion will be accomplished by another process.
  3716  					nodeToRemove.mu.Unlock()
  3717  					return false
  3718  				}
  3719  				nodeToRemove.flags.SetTrue(marked)
  3720  				isMarked = true
  3721  			}
  3722  			// Accomplish the physical deletion.
  3723  			var (
  3724  				highestLocked        = -1 // the highest level being locked by this process
  3725  				valid                = true
  3726  				pred, succ, prevPred *int32Node
  3727  			)
  3728  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3729  				pred, succ = preds[layer], succs[layer]
  3730  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3731  					pred.mu.Lock()
  3732  					highestLocked = layer
  3733  					prevPred = pred
  3734  				}
  3735  				// valid check if there is another node has inserted into the skip list in this layer
  3736  				// during this process, or the previous is removed by another process.
  3737  				// It is valid if:
  3738  				// 1. the previous node exists.
  3739  				// 2. no another node has inserted into the skip list in this layer.
  3740  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3741  			}
  3742  			if !valid {
  3743  				unlockInt32(preds, highestLocked)
  3744  				continue
  3745  			}
  3746  			for i := topLayer; i >= 0; i-- {
  3747  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  3748  				// So we don't need `nodeToRemove.loadNext`
  3749  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  3750  			}
  3751  			nodeToRemove.mu.Unlock()
  3752  			unlockInt32(preds, highestLocked)
  3753  			atomic.AddInt64(&s.length, -1)
  3754  			return true
  3755  		}
  3756  		return false
  3757  	}
  3758  }
  3759  
  3760  // Range calls f sequentially for each value present in the skip set.
  3761  // If f returns false, range stops the iteration.
  3762  func (s *Int32Set) Range(f func(value int32) bool) {
  3763  	x := s.header.atomicLoadNext(0)
  3764  	for x != nil {
  3765  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3766  			x = x.atomicLoadNext(0)
  3767  			continue
  3768  		}
  3769  		if !f(x.value) {
  3770  			break
  3771  		}
  3772  		x = x.atomicLoadNext(0)
  3773  	}
  3774  }
  3775  
  3776  // Len return the length of this skip set.
  3777  func (s *Int32Set) Len() int {
  3778  	return int(atomic.LoadInt64(&s.length))
  3779  }
  3780  
  3781  // Int32SetDesc represents a set based on skip list in descending order.
  3782  type Int32SetDesc struct {
  3783  	header       *int32NodeDesc
  3784  	length       int64
  3785  	highestLevel int64 // highest level for now
  3786  }
  3787  
  3788  type int32NodeDesc struct {
  3789  	value int32
  3790  	next  optionalArray // [level]*int32NodeDesc
  3791  	mu    sync.Mutex
  3792  	flags bitflag
  3793  	level uint32
  3794  }
  3795  
  3796  func newInt32NodeDesc(value int32, level int) *int32NodeDesc {
  3797  	node := &int32NodeDesc{
  3798  		value: value,
  3799  		level: uint32(level),
  3800  	}
  3801  	if level > op1 {
  3802  		node.next.extra = new([op2]unsafe.Pointer)
  3803  	}
  3804  	return node
  3805  }
  3806  
  3807  func (n *int32NodeDesc) loadNext(i int) *int32NodeDesc {
  3808  	return (*int32NodeDesc)(n.next.load(i))
  3809  }
  3810  
  3811  func (n *int32NodeDesc) storeNext(i int, node *int32NodeDesc) {
  3812  	n.next.store(i, unsafe.Pointer(node))
  3813  }
  3814  
  3815  func (n *int32NodeDesc) atomicLoadNext(i int) *int32NodeDesc {
  3816  	return (*int32NodeDesc)(n.next.atomicLoad(i))
  3817  }
  3818  
  3819  func (n *int32NodeDesc) atomicStoreNext(i int, node *int32NodeDesc) {
  3820  	n.next.atomicStore(i, unsafe.Pointer(node))
  3821  }
  3822  
  3823  func (n *int32NodeDesc) lessthan(value int32) bool {
  3824  	return n.value > value
  3825  }
  3826  
  3827  func (n *int32NodeDesc) equal(value int32) bool {
  3828  	return n.value == value
  3829  }
  3830  
  3831  // NewInt32Desc return an empty int32 skip set in descending order.
  3832  func NewInt32Desc() *Int32SetDesc {
  3833  	h := newInt32NodeDesc(0, maxLevel)
  3834  	h.flags.SetTrue(fullyLinked)
  3835  	return &Int32SetDesc{
  3836  		header:       h,
  3837  		highestLevel: defaultHighestLevel,
  3838  	}
  3839  }
  3840  
  3841  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3842  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3843  func (s *Int32SetDesc) findNodeRemove(value int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int {
  3844  	// lFound represents the index of the first layer at which it found a node.
  3845  	lFound, x := -1, s.header
  3846  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3847  		succ := x.atomicLoadNext(i)
  3848  		for succ != nil && succ.lessthan(value) {
  3849  			x = succ
  3850  			succ = x.atomicLoadNext(i)
  3851  		}
  3852  		preds[i] = x
  3853  		succs[i] = succ
  3854  
  3855  		// Check if the value already in the skip list.
  3856  		if lFound == -1 && succ != nil && succ.equal(value) {
  3857  			lFound = i
  3858  		}
  3859  	}
  3860  	return lFound
  3861  }
  3862  
  3863  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  3864  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3865  func (s *Int32SetDesc) findNodeAdd(value int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int {
  3866  	x := s.header
  3867  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3868  		succ := x.atomicLoadNext(i)
  3869  		for succ != nil && succ.lessthan(value) {
  3870  			x = succ
  3871  			succ = x.atomicLoadNext(i)
  3872  		}
  3873  		preds[i] = x
  3874  		succs[i] = succ
  3875  
  3876  		// Check if the value already in the skip list.
  3877  		if succ != nil && succ.equal(value) {
  3878  			return i
  3879  		}
  3880  	}
  3881  	return -1
  3882  }
  3883  
  3884  func unlockInt32Desc(preds [maxLevel]*int32NodeDesc, highestLevel int) {
  3885  	var prevPred *int32NodeDesc
  3886  	for i := highestLevel; i >= 0; i-- {
  3887  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3888  			preds[i].mu.Unlock()
  3889  			prevPred = preds[i]
  3890  		}
  3891  	}
  3892  }
  3893  
  3894  // Add add the value into skip set, return true if this process insert the value into skip set,
  3895  // return false if this process can't insert this value, because another process has insert the same value.
  3896  //
  3897  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3898  func (s *Int32SetDesc) Add(value int32) bool {
  3899  	level := s.randomLevel()
  3900  	var preds, succs [maxLevel]*int32NodeDesc
  3901  	for {
  3902  		lFound := s.findNodeAdd(value, &preds, &succs)
  3903  		if lFound != -1 { // indicating the value is already in the skip-list
  3904  			nodeFound := succs[lFound]
  3905  			if !nodeFound.flags.Get(marked) {
  3906  				for !nodeFound.flags.Get(fullyLinked) {
  3907  					// The node is not yet fully linked, just waits until it is.
  3908  				}
  3909  				return false
  3910  			}
  3911  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3912  			// we need to add this node in next loop.
  3913  			continue
  3914  		}
  3915  		// Add this node into skip list.
  3916  		var (
  3917  			highestLocked        = -1 // the highest level being locked by this process
  3918  			valid                = true
  3919  			pred, succ, prevPred *int32NodeDesc
  3920  		)
  3921  		for layer := 0; valid && layer < level; layer++ {
  3922  			pred = preds[layer]   // target node's previous node
  3923  			succ = succs[layer]   // target node's next node
  3924  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3925  				pred.mu.Lock()
  3926  				highestLocked = layer
  3927  				prevPred = pred
  3928  			}
  3929  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3930  			// It is valid if:
  3931  			// 1. The previous node and next node both are not marked.
  3932  			// 2. The previous node's next node is succ in this layer.
  3933  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3934  		}
  3935  		if !valid {
  3936  			unlockInt32Desc(preds, highestLocked)
  3937  			continue
  3938  		}
  3939  
  3940  		nn := newInt32NodeDesc(value, level)
  3941  		for layer := 0; layer < level; layer++ {
  3942  			nn.storeNext(layer, succs[layer])
  3943  			preds[layer].atomicStoreNext(layer, nn)
  3944  		}
  3945  		nn.flags.SetTrue(fullyLinked)
  3946  		unlockInt32Desc(preds, highestLocked)
  3947  		atomic.AddInt64(&s.length, 1)
  3948  		return true
  3949  	}
  3950  }
  3951  
  3952  func (s *Int32SetDesc) randomLevel() int {
  3953  	// Generate random level.
  3954  	level := randomLevel()
  3955  	// Update highest level if possible.
  3956  	for {
  3957  		hl := atomic.LoadInt64(&s.highestLevel)
  3958  		if int64(level) <= hl {
  3959  			break
  3960  		}
  3961  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3962  			break
  3963  		}
  3964  	}
  3965  	return level
  3966  }
  3967  
  3968  // Contains check if the value is in the skip set.
  3969  func (s *Int32SetDesc) Contains(value int32) bool {
  3970  	x := s.header
  3971  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3972  		nex := x.atomicLoadNext(i)
  3973  		for nex != nil && nex.lessthan(value) {
  3974  			x = nex
  3975  			nex = x.atomicLoadNext(i)
  3976  		}
  3977  
  3978  		// Check if the value already in the skip list.
  3979  		if nex != nil && nex.equal(value) {
  3980  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3981  		}
  3982  	}
  3983  	return false
  3984  }
  3985  
  3986  // Remove a node from the skip set.
  3987  func (s *Int32SetDesc) Remove(value int32) bool {
  3988  	var (
  3989  		nodeToRemove *int32NodeDesc
  3990  		isMarked     bool // represents if this operation mark the node
  3991  		topLayer     = -1
  3992  		preds, succs [maxLevel]*int32NodeDesc
  3993  	)
  3994  	for {
  3995  		lFound := s.findNodeRemove(value, &preds, &succs)
  3996  		if isMarked || // this process mark this node or we can find this node in the skip list
  3997  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3998  			if !isMarked { // we don't mark this node for now
  3999  				nodeToRemove = succs[lFound]
  4000  				topLayer = lFound
  4001  				nodeToRemove.mu.Lock()
  4002  				if nodeToRemove.flags.Get(marked) {
  4003  					// The node is marked by another process,
  4004  					// the physical deletion will be accomplished by another process.
  4005  					nodeToRemove.mu.Unlock()
  4006  					return false
  4007  				}
  4008  				nodeToRemove.flags.SetTrue(marked)
  4009  				isMarked = true
  4010  			}
  4011  			// Accomplish the physical deletion.
  4012  			var (
  4013  				highestLocked        = -1 // the highest level being locked by this process
  4014  				valid                = true
  4015  				pred, succ, prevPred *int32NodeDesc
  4016  			)
  4017  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4018  				pred, succ = preds[layer], succs[layer]
  4019  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4020  					pred.mu.Lock()
  4021  					highestLocked = layer
  4022  					prevPred = pred
  4023  				}
  4024  				// valid check if there is another node has inserted into the skip list in this layer
  4025  				// during this process, or the previous is removed by another process.
  4026  				// It is valid if:
  4027  				// 1. the previous node exists.
  4028  				// 2. no another node has inserted into the skip list in this layer.
  4029  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4030  			}
  4031  			if !valid {
  4032  				unlockInt32Desc(preds, highestLocked)
  4033  				continue
  4034  			}
  4035  			for i := topLayer; i >= 0; i-- {
  4036  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4037  				// So we don't need `nodeToRemove.loadNext`
  4038  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4039  			}
  4040  			nodeToRemove.mu.Unlock()
  4041  			unlockInt32Desc(preds, highestLocked)
  4042  			atomic.AddInt64(&s.length, -1)
  4043  			return true
  4044  		}
  4045  		return false
  4046  	}
  4047  }
  4048  
  4049  // Range calls f sequentially for each value present in the skip set.
  4050  // If f returns false, range stops the iteration.
  4051  func (s *Int32SetDesc) Range(f func(value int32) bool) {
  4052  	x := s.header.atomicLoadNext(0)
  4053  	for x != nil {
  4054  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4055  			x = x.atomicLoadNext(0)
  4056  			continue
  4057  		}
  4058  		if !f(x.value) {
  4059  			break
  4060  		}
  4061  		x = x.atomicLoadNext(0)
  4062  	}
  4063  }
  4064  
  4065  // Len return the length of this skip set.
  4066  func (s *Int32SetDesc) Len() int {
  4067  	return int(atomic.LoadInt64(&s.length))
  4068  }
  4069  
  4070  // RuneSet represents a set based on skip list in ascending order.
  4071  type RuneSet struct {
  4072  	header       *runeNode
  4073  	length       int64
  4074  	highestLevel int64 // highest level for now
  4075  }
  4076  
  4077  type runeNode struct {
  4078  	value rune
  4079  	next  optionalArray // [level]*runeNode
  4080  	mu    sync.Mutex
  4081  	flags bitflag
  4082  	level uint32
  4083  }
  4084  
  4085  func newRuneNode(value rune, level int) *runeNode {
  4086  	node := &runeNode{
  4087  		value: value,
  4088  		level: uint32(level),
  4089  	}
  4090  	if level > op1 {
  4091  		node.next.extra = new([op2]unsafe.Pointer)
  4092  	}
  4093  	return node
  4094  }
  4095  
  4096  func (n *runeNode) loadNext(i int) *runeNode {
  4097  	return (*runeNode)(n.next.load(i))
  4098  }
  4099  
  4100  func (n *runeNode) storeNext(i int, node *runeNode) {
  4101  	n.next.store(i, unsafe.Pointer(node))
  4102  }
  4103  
  4104  func (n *runeNode) atomicLoadNext(i int) *runeNode {
  4105  	return (*runeNode)(n.next.atomicLoad(i))
  4106  }
  4107  
  4108  func (n *runeNode) atomicStoreNext(i int, node *runeNode) {
  4109  	n.next.atomicStore(i, unsafe.Pointer(node))
  4110  }
  4111  
  4112  func (n *runeNode) lessthan(value rune) bool {
  4113  	return n.value < value
  4114  }
  4115  
  4116  func (n *runeNode) equal(value rune) bool {
  4117  	return n.value == value
  4118  }
  4119  
  4120  // NewRune return an empty rune skip set in ascending order.
  4121  func NewRune() *RuneSet {
  4122  	h := newRuneNode(0, maxLevel)
  4123  	h.flags.SetTrue(fullyLinked)
  4124  	return &RuneSet{
  4125  		header:       h,
  4126  		highestLevel: defaultHighestLevel,
  4127  	}
  4128  }
  4129  
  4130  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4131  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4132  func (s *RuneSet) findNodeRemove(value rune, preds *[maxLevel]*runeNode, succs *[maxLevel]*runeNode) int {
  4133  	// lFound represents the index of the first layer at which it found a node.
  4134  	lFound, x := -1, s.header
  4135  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4136  		succ := x.atomicLoadNext(i)
  4137  		for succ != nil && succ.lessthan(value) {
  4138  			x = succ
  4139  			succ = x.atomicLoadNext(i)
  4140  		}
  4141  		preds[i] = x
  4142  		succs[i] = succ
  4143  
  4144  		// Check if the value already in the skip list.
  4145  		if lFound == -1 && succ != nil && succ.equal(value) {
  4146  			lFound = i
  4147  		}
  4148  	}
  4149  	return lFound
  4150  }
  4151  
  4152  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  4153  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4154  func (s *RuneSet) findNodeAdd(value rune, preds *[maxLevel]*runeNode, succs *[maxLevel]*runeNode) int {
  4155  	x := s.header
  4156  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4157  		succ := x.atomicLoadNext(i)
  4158  		for succ != nil && succ.lessthan(value) {
  4159  			x = succ
  4160  			succ = x.atomicLoadNext(i)
  4161  		}
  4162  		preds[i] = x
  4163  		succs[i] = succ
  4164  
  4165  		// Check if the value already in the skip list.
  4166  		if succ != nil && succ.equal(value) {
  4167  			return i
  4168  		}
  4169  	}
  4170  	return -1
  4171  }
  4172  
  4173  func unlockRune(preds [maxLevel]*runeNode, highestLevel int) {
  4174  	var prevPred *runeNode
  4175  	for i := highestLevel; i >= 0; i-- {
  4176  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4177  			preds[i].mu.Unlock()
  4178  			prevPred = preds[i]
  4179  		}
  4180  	}
  4181  }
  4182  
  4183  // Add add the value into skip set, return true if this process insert the value into skip set,
  4184  // return false if this process can't insert this value, because another process has insert the same value.
  4185  //
  4186  // If the value is in the skip set but not fully linked, this process will wait until it is.
  4187  func (s *RuneSet) Add(value rune) bool {
  4188  	level := s.randomLevel()
  4189  	var preds, succs [maxLevel]*runeNode
  4190  	for {
  4191  		lFound := s.findNodeAdd(value, &preds, &succs)
  4192  		if lFound != -1 { // indicating the value is already in the skip-list
  4193  			nodeFound := succs[lFound]
  4194  			if !nodeFound.flags.Get(marked) {
  4195  				for !nodeFound.flags.Get(fullyLinked) {
  4196  					// The node is not yet fully linked, just waits until it is.
  4197  				}
  4198  				return false
  4199  			}
  4200  			// If the node is marked, represents some other thread is in the process of deleting this node,
  4201  			// we need to add this node in next loop.
  4202  			continue
  4203  		}
  4204  		// Add this node into skip list.
  4205  		var (
  4206  			highestLocked        = -1 // the highest level being locked by this process
  4207  			valid                = true
  4208  			pred, succ, prevPred *runeNode
  4209  		)
  4210  		for layer := 0; valid && layer < level; layer++ {
  4211  			pred = preds[layer]   // target node's previous node
  4212  			succ = succs[layer]   // target node's next node
  4213  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4214  				pred.mu.Lock()
  4215  				highestLocked = layer
  4216  				prevPred = pred
  4217  			}
  4218  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4219  			// It is valid if:
  4220  			// 1. The previous node and next node both are not marked.
  4221  			// 2. The previous node's next node is succ in this layer.
  4222  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4223  		}
  4224  		if !valid {
  4225  			unlockRune(preds, highestLocked)
  4226  			continue
  4227  		}
  4228  
  4229  		nn := newRuneNode(value, level)
  4230  		for layer := 0; layer < level; layer++ {
  4231  			nn.storeNext(layer, succs[layer])
  4232  			preds[layer].atomicStoreNext(layer, nn)
  4233  		}
  4234  		nn.flags.SetTrue(fullyLinked)
  4235  		unlockRune(preds, highestLocked)
  4236  		atomic.AddInt64(&s.length, 1)
  4237  		return true
  4238  	}
  4239  }
  4240  
  4241  func (s *RuneSet) randomLevel() int {
  4242  	// Generate random level.
  4243  	level := randomLevel()
  4244  	// Update highest level if possible.
  4245  	for {
  4246  		hl := atomic.LoadInt64(&s.highestLevel)
  4247  		if int64(level) <= hl {
  4248  			break
  4249  		}
  4250  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4251  			break
  4252  		}
  4253  	}
  4254  	return level
  4255  }
  4256  
  4257  // Contains check if the value is in the skip set.
  4258  func (s *RuneSet) Contains(value rune) bool {
  4259  	x := s.header
  4260  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4261  		nex := x.atomicLoadNext(i)
  4262  		for nex != nil && nex.lessthan(value) {
  4263  			x = nex
  4264  			nex = x.atomicLoadNext(i)
  4265  		}
  4266  
  4267  		// Check if the value already in the skip list.
  4268  		if nex != nil && nex.equal(value) {
  4269  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  4270  		}
  4271  	}
  4272  	return false
  4273  }
  4274  
  4275  // Remove a node from the skip set.
  4276  func (s *RuneSet) Remove(value rune) bool {
  4277  	var (
  4278  		nodeToRemove *runeNode
  4279  		isMarked     bool // represents if this operation mark the node
  4280  		topLayer     = -1
  4281  		preds, succs [maxLevel]*runeNode
  4282  	)
  4283  	for {
  4284  		lFound := s.findNodeRemove(value, &preds, &succs)
  4285  		if isMarked || // this process mark this node or we can find this node in the skip list
  4286  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4287  			if !isMarked { // we don't mark this node for now
  4288  				nodeToRemove = succs[lFound]
  4289  				topLayer = lFound
  4290  				nodeToRemove.mu.Lock()
  4291  				if nodeToRemove.flags.Get(marked) {
  4292  					// The node is marked by another process,
  4293  					// the physical deletion will be accomplished by another process.
  4294  					nodeToRemove.mu.Unlock()
  4295  					return false
  4296  				}
  4297  				nodeToRemove.flags.SetTrue(marked)
  4298  				isMarked = true
  4299  			}
  4300  			// Accomplish the physical deletion.
  4301  			var (
  4302  				highestLocked        = -1 // the highest level being locked by this process
  4303  				valid                = true
  4304  				pred, succ, prevPred *runeNode
  4305  			)
  4306  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4307  				pred, succ = preds[layer], succs[layer]
  4308  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4309  					pred.mu.Lock()
  4310  					highestLocked = layer
  4311  					prevPred = pred
  4312  				}
  4313  				// valid check if there is another node has inserted into the skip list in this layer
  4314  				// during this process, or the previous is removed by another process.
  4315  				// It is valid if:
  4316  				// 1. the previous node exists.
  4317  				// 2. no another node has inserted into the skip list in this layer.
  4318  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4319  			}
  4320  			if !valid {
  4321  				unlockRune(preds, highestLocked)
  4322  				continue
  4323  			}
  4324  			for i := topLayer; i >= 0; i-- {
  4325  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4326  				// So we don't need `nodeToRemove.loadNext`
  4327  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4328  			}
  4329  			nodeToRemove.mu.Unlock()
  4330  			unlockRune(preds, highestLocked)
  4331  			atomic.AddInt64(&s.length, -1)
  4332  			return true
  4333  		}
  4334  		return false
  4335  	}
  4336  }
  4337  
  4338  // Range calls f sequentially for each value present in the skip set.
  4339  // If f returns false, range stops the iteration.
  4340  func (s *RuneSet) Range(f func(value rune) bool) {
  4341  	x := s.header.atomicLoadNext(0)
  4342  	for x != nil {
  4343  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4344  			x = x.atomicLoadNext(0)
  4345  			continue
  4346  		}
  4347  		if !f(x.value) {
  4348  			break
  4349  		}
  4350  		x = x.atomicLoadNext(0)
  4351  	}
  4352  }
  4353  
  4354  // Len return the length of this skip set.
  4355  func (s *RuneSet) Len() int {
  4356  	return int(atomic.LoadInt64(&s.length))
  4357  }
  4358  
  4359  // RuneSetDesc represents a set based on skip list in descending order.
  4360  type RuneSetDesc struct {
  4361  	header       *runeNodeDesc
  4362  	length       int64
  4363  	highestLevel int64 // highest level for now
  4364  }
  4365  
  4366  type runeNodeDesc struct {
  4367  	value rune
  4368  	next  optionalArray // [level]*runeNodeDesc
  4369  	mu    sync.Mutex
  4370  	flags bitflag
  4371  	level uint32
  4372  }
  4373  
  4374  func newRuneNodeDesc(value rune, level int) *runeNodeDesc {
  4375  	node := &runeNodeDesc{
  4376  		value: value,
  4377  		level: uint32(level),
  4378  	}
  4379  	if level > op1 {
  4380  		node.next.extra = new([op2]unsafe.Pointer)
  4381  	}
  4382  	return node
  4383  }
  4384  
  4385  func (n *runeNodeDesc) loadNext(i int) *runeNodeDesc {
  4386  	return (*runeNodeDesc)(n.next.load(i))
  4387  }
  4388  
  4389  func (n *runeNodeDesc) storeNext(i int, node *runeNodeDesc) {
  4390  	n.next.store(i, unsafe.Pointer(node))
  4391  }
  4392  
  4393  func (n *runeNodeDesc) atomicLoadNext(i int) *runeNodeDesc {
  4394  	return (*runeNodeDesc)(n.next.atomicLoad(i))
  4395  }
  4396  
  4397  func (n *runeNodeDesc) atomicStoreNext(i int, node *runeNodeDesc) {
  4398  	n.next.atomicStore(i, unsafe.Pointer(node))
  4399  }
  4400  
  4401  func (n *runeNodeDesc) lessthan(value rune) bool {
  4402  	return n.value > value
  4403  }
  4404  
  4405  func (n *runeNodeDesc) equal(value rune) bool {
  4406  	return n.value == value
  4407  }
  4408  
  4409  // NewRuneDesc return an empty rune skip set in descending order.
  4410  func NewRuneDesc() *RuneSetDesc {
  4411  	h := newRuneNodeDesc(0, maxLevel)
  4412  	h.flags.SetTrue(fullyLinked)
  4413  	return &RuneSetDesc{
  4414  		header:       h,
  4415  		highestLevel: defaultHighestLevel,
  4416  	}
  4417  }
  4418  
  4419  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4420  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4421  func (s *RuneSetDesc) findNodeRemove(value rune, preds *[maxLevel]*runeNodeDesc, succs *[maxLevel]*runeNodeDesc) int {
  4422  	// lFound represents the index of the first layer at which it found a node.
  4423  	lFound, x := -1, s.header
  4424  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4425  		succ := x.atomicLoadNext(i)
  4426  		for succ != nil && succ.lessthan(value) {
  4427  			x = succ
  4428  			succ = x.atomicLoadNext(i)
  4429  		}
  4430  		preds[i] = x
  4431  		succs[i] = succ
  4432  
  4433  		// Check if the value already in the skip list.
  4434  		if lFound == -1 && succ != nil && succ.equal(value) {
  4435  			lFound = i
  4436  		}
  4437  	}
  4438  	return lFound
  4439  }
  4440  
  4441  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  4442  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4443  func (s *RuneSetDesc) findNodeAdd(value rune, preds *[maxLevel]*runeNodeDesc, succs *[maxLevel]*runeNodeDesc) int {
  4444  	x := s.header
  4445  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4446  		succ := x.atomicLoadNext(i)
  4447  		for succ != nil && succ.lessthan(value) {
  4448  			x = succ
  4449  			succ = x.atomicLoadNext(i)
  4450  		}
  4451  		preds[i] = x
  4452  		succs[i] = succ
  4453  
  4454  		// Check if the value already in the skip list.
  4455  		if succ != nil && succ.equal(value) {
  4456  			return i
  4457  		}
  4458  	}
  4459  	return -1
  4460  }
  4461  
  4462  func unlockRuneDesc(preds [maxLevel]*runeNodeDesc, highestLevel int) {
  4463  	var prevPred *runeNodeDesc
  4464  	for i := highestLevel; i >= 0; i-- {
  4465  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4466  			preds[i].mu.Unlock()
  4467  			prevPred = preds[i]
  4468  		}
  4469  	}
  4470  }
  4471  
  4472  // Add add the value into skip set, return true if this process insert the value into skip set,
  4473  // return false if this process can't insert this value, because another process has insert the same value.
  4474  //
  4475  // If the value is in the skip set but not fully linked, this process will wait until it is.
  4476  func (s *RuneSetDesc) Add(value rune) bool {
  4477  	level := s.randomLevel()
  4478  	var preds, succs [maxLevel]*runeNodeDesc
  4479  	for {
  4480  		lFound := s.findNodeAdd(value, &preds, &succs)
  4481  		if lFound != -1 { // indicating the value is already in the skip-list
  4482  			nodeFound := succs[lFound]
  4483  			if !nodeFound.flags.Get(marked) {
  4484  				for !nodeFound.flags.Get(fullyLinked) {
  4485  					// The node is not yet fully linked, just waits until it is.
  4486  				}
  4487  				return false
  4488  			}
  4489  			// If the node is marked, represents some other thread is in the process of deleting this node,
  4490  			// we need to add this node in next loop.
  4491  			continue
  4492  		}
  4493  		// Add this node into skip list.
  4494  		var (
  4495  			highestLocked        = -1 // the highest level being locked by this process
  4496  			valid                = true
  4497  			pred, succ, prevPred *runeNodeDesc
  4498  		)
  4499  		for layer := 0; valid && layer < level; layer++ {
  4500  			pred = preds[layer]   // target node's previous node
  4501  			succ = succs[layer]   // target node's next node
  4502  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4503  				pred.mu.Lock()
  4504  				highestLocked = layer
  4505  				prevPred = pred
  4506  			}
  4507  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4508  			// It is valid if:
  4509  			// 1. The previous node and next node both are not marked.
  4510  			// 2. The previous node's next node is succ in this layer.
  4511  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4512  		}
  4513  		if !valid {
  4514  			unlockRuneDesc(preds, highestLocked)
  4515  			continue
  4516  		}
  4517  
  4518  		nn := newRuneNodeDesc(value, level)
  4519  		for layer := 0; layer < level; layer++ {
  4520  			nn.storeNext(layer, succs[layer])
  4521  			preds[layer].atomicStoreNext(layer, nn)
  4522  		}
  4523  		nn.flags.SetTrue(fullyLinked)
  4524  		unlockRuneDesc(preds, highestLocked)
  4525  		atomic.AddInt64(&s.length, 1)
  4526  		return true
  4527  	}
  4528  }
  4529  
  4530  func (s *RuneSetDesc) randomLevel() int {
  4531  	// Generate random level.
  4532  	level := randomLevel()
  4533  	// Update highest level if possible.
  4534  	for {
  4535  		hl := atomic.LoadInt64(&s.highestLevel)
  4536  		if int64(level) <= hl {
  4537  			break
  4538  		}
  4539  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4540  			break
  4541  		}
  4542  	}
  4543  	return level
  4544  }
  4545  
  4546  // Contains check if the value is in the skip set.
  4547  func (s *RuneSetDesc) Contains(value rune) bool {
  4548  	x := s.header
  4549  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4550  		nex := x.atomicLoadNext(i)
  4551  		for nex != nil && nex.lessthan(value) {
  4552  			x = nex
  4553  			nex = x.atomicLoadNext(i)
  4554  		}
  4555  
  4556  		// Check if the value already in the skip list.
  4557  		if nex != nil && nex.equal(value) {
  4558  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  4559  		}
  4560  	}
  4561  	return false
  4562  }
  4563  
  4564  // Remove a node from the skip set.
  4565  func (s *RuneSetDesc) Remove(value rune) bool {
  4566  	var (
  4567  		nodeToRemove *runeNodeDesc
  4568  		isMarked     bool // represents if this operation mark the node
  4569  		topLayer     = -1
  4570  		preds, succs [maxLevel]*runeNodeDesc
  4571  	)
  4572  	for {
  4573  		lFound := s.findNodeRemove(value, &preds, &succs)
  4574  		if isMarked || // this process mark this node or we can find this node in the skip list
  4575  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4576  			if !isMarked { // we don't mark this node for now
  4577  				nodeToRemove = succs[lFound]
  4578  				topLayer = lFound
  4579  				nodeToRemove.mu.Lock()
  4580  				if nodeToRemove.flags.Get(marked) {
  4581  					// The node is marked by another process,
  4582  					// the physical deletion will be accomplished by another process.
  4583  					nodeToRemove.mu.Unlock()
  4584  					return false
  4585  				}
  4586  				nodeToRemove.flags.SetTrue(marked)
  4587  				isMarked = true
  4588  			}
  4589  			// Accomplish the physical deletion.
  4590  			var (
  4591  				highestLocked        = -1 // the highest level being locked by this process
  4592  				valid                = true
  4593  				pred, succ, prevPred *runeNodeDesc
  4594  			)
  4595  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4596  				pred, succ = preds[layer], succs[layer]
  4597  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4598  					pred.mu.Lock()
  4599  					highestLocked = layer
  4600  					prevPred = pred
  4601  				}
  4602  				// valid check if there is another node has inserted into the skip list in this layer
  4603  				// during this process, or the previous is removed by another process.
  4604  				// It is valid if:
  4605  				// 1. the previous node exists.
  4606  				// 2. no another node has inserted into the skip list in this layer.
  4607  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4608  			}
  4609  			if !valid {
  4610  				unlockRuneDesc(preds, highestLocked)
  4611  				continue
  4612  			}
  4613  			for i := topLayer; i >= 0; i-- {
  4614  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4615  				// So we don't need `nodeToRemove.loadNext`
  4616  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4617  			}
  4618  			nodeToRemove.mu.Unlock()
  4619  			unlockRuneDesc(preds, highestLocked)
  4620  			atomic.AddInt64(&s.length, -1)
  4621  			return true
  4622  		}
  4623  		return false
  4624  	}
  4625  }
  4626  
  4627  // Range calls f sequentially for each value present in the skip set.
  4628  // If f returns false, range stops the iteration.
  4629  func (s *RuneSetDesc) Range(f func(value rune) bool) {
  4630  	x := s.header.atomicLoadNext(0)
  4631  	for x != nil {
  4632  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4633  			x = x.atomicLoadNext(0)
  4634  			continue
  4635  		}
  4636  		if !f(x.value) {
  4637  			break
  4638  		}
  4639  		x = x.atomicLoadNext(0)
  4640  	}
  4641  }
  4642  
  4643  // Len return the length of this skip set.
  4644  func (s *RuneSetDesc) Len() int {
  4645  	return int(atomic.LoadInt64(&s.length))
  4646  }
  4647  
  4648  // UintSet represents a set based on skip list in ascending order.
  4649  type UintSet struct {
  4650  	header       *uintNode
  4651  	length       int64
  4652  	highestLevel int64 // highest level for now
  4653  }
  4654  
  4655  type uintNode struct {
  4656  	value uint
  4657  	next  optionalArray // [level]*uintNode
  4658  	mu    sync.Mutex
  4659  	flags bitflag
  4660  	level uint32
  4661  }
  4662  
  4663  func newUintNode(value uint, level int) *uintNode {
  4664  	node := &uintNode{
  4665  		value: value,
  4666  		level: uint32(level),
  4667  	}
  4668  	if level > op1 {
  4669  		node.next.extra = new([op2]unsafe.Pointer)
  4670  	}
  4671  	return node
  4672  }
  4673  
  4674  func (n *uintNode) loadNext(i int) *uintNode {
  4675  	return (*uintNode)(n.next.load(i))
  4676  }
  4677  
  4678  func (n *uintNode) storeNext(i int, node *uintNode) {
  4679  	n.next.store(i, unsafe.Pointer(node))
  4680  }
  4681  
  4682  func (n *uintNode) atomicLoadNext(i int) *uintNode {
  4683  	return (*uintNode)(n.next.atomicLoad(i))
  4684  }
  4685  
  4686  func (n *uintNode) atomicStoreNext(i int, node *uintNode) {
  4687  	n.next.atomicStore(i, unsafe.Pointer(node))
  4688  }
  4689  
  4690  func (n *uintNode) lessthan(value uint) bool {
  4691  	return n.value < value
  4692  }
  4693  
  4694  func (n *uintNode) equal(value uint) bool {
  4695  	return n.value == value
  4696  }
  4697  
  4698  // NewUint return an empty uint skip set in ascending order.
  4699  func NewUint() *UintSet {
  4700  	h := newUintNode(0, maxLevel)
  4701  	h.flags.SetTrue(fullyLinked)
  4702  	return &UintSet{
  4703  		header:       h,
  4704  		highestLevel: defaultHighestLevel,
  4705  	}
  4706  }
  4707  
  4708  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4709  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4710  func (s *UintSet) findNodeRemove(value uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int {
  4711  	// lFound represents the index of the first layer at which it found a node.
  4712  	lFound, x := -1, s.header
  4713  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4714  		succ := x.atomicLoadNext(i)
  4715  		for succ != nil && succ.lessthan(value) {
  4716  			x = succ
  4717  			succ = x.atomicLoadNext(i)
  4718  		}
  4719  		preds[i] = x
  4720  		succs[i] = succ
  4721  
  4722  		// Check if the value already in the skip list.
  4723  		if lFound == -1 && succ != nil && succ.equal(value) {
  4724  			lFound = i
  4725  		}
  4726  	}
  4727  	return lFound
  4728  }
  4729  
  4730  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  4731  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4732  func (s *UintSet) findNodeAdd(value uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int {
  4733  	x := s.header
  4734  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4735  		succ := x.atomicLoadNext(i)
  4736  		for succ != nil && succ.lessthan(value) {
  4737  			x = succ
  4738  			succ = x.atomicLoadNext(i)
  4739  		}
  4740  		preds[i] = x
  4741  		succs[i] = succ
  4742  
  4743  		// Check if the value already in the skip list.
  4744  		if succ != nil && succ.equal(value) {
  4745  			return i
  4746  		}
  4747  	}
  4748  	return -1
  4749  }
  4750  
  4751  func unlockUint(preds [maxLevel]*uintNode, highestLevel int) {
  4752  	var prevPred *uintNode
  4753  	for i := highestLevel; i >= 0; i-- {
  4754  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4755  			preds[i].mu.Unlock()
  4756  			prevPred = preds[i]
  4757  		}
  4758  	}
  4759  }
  4760  
  4761  // Add add the value into skip set, return true if this process insert the value into skip set,
  4762  // return false if this process can't insert this value, because another process has insert the same value.
  4763  //
  4764  // If the value is in the skip set but not fully linked, this process will wait until it is.
  4765  func (s *UintSet) Add(value uint) bool {
  4766  	level := s.randomLevel()
  4767  	var preds, succs [maxLevel]*uintNode
  4768  	for {
  4769  		lFound := s.findNodeAdd(value, &preds, &succs)
  4770  		if lFound != -1 { // indicating the value is already in the skip-list
  4771  			nodeFound := succs[lFound]
  4772  			if !nodeFound.flags.Get(marked) {
  4773  				for !nodeFound.flags.Get(fullyLinked) {
  4774  					// The node is not yet fully linked, just waits until it is.
  4775  				}
  4776  				return false
  4777  			}
  4778  			// If the node is marked, represents some other thread is in the process of deleting this node,
  4779  			// we need to add this node in next loop.
  4780  			continue
  4781  		}
  4782  		// Add this node into skip list.
  4783  		var (
  4784  			highestLocked        = -1 // the highest level being locked by this process
  4785  			valid                = true
  4786  			pred, succ, prevPred *uintNode
  4787  		)
  4788  		for layer := 0; valid && layer < level; layer++ {
  4789  			pred = preds[layer]   // target node's previous node
  4790  			succ = succs[layer]   // target node's next node
  4791  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4792  				pred.mu.Lock()
  4793  				highestLocked = layer
  4794  				prevPred = pred
  4795  			}
  4796  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4797  			// It is valid if:
  4798  			// 1. The previous node and next node both are not marked.
  4799  			// 2. The previous node's next node is succ in this layer.
  4800  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4801  		}
  4802  		if !valid {
  4803  			unlockUint(preds, highestLocked)
  4804  			continue
  4805  		}
  4806  
  4807  		nn := newUintNode(value, level)
  4808  		for layer := 0; layer < level; layer++ {
  4809  			nn.storeNext(layer, succs[layer])
  4810  			preds[layer].atomicStoreNext(layer, nn)
  4811  		}
  4812  		nn.flags.SetTrue(fullyLinked)
  4813  		unlockUint(preds, highestLocked)
  4814  		atomic.AddInt64(&s.length, 1)
  4815  		return true
  4816  	}
  4817  }
  4818  
  4819  func (s *UintSet) randomLevel() int {
  4820  	// Generate random level.
  4821  	level := randomLevel()
  4822  	// Update highest level if possible.
  4823  	for {
  4824  		hl := atomic.LoadInt64(&s.highestLevel)
  4825  		if int64(level) <= hl {
  4826  			break
  4827  		}
  4828  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4829  			break
  4830  		}
  4831  	}
  4832  	return level
  4833  }
  4834  
  4835  // Contains check if the value is in the skip set.
  4836  func (s *UintSet) Contains(value uint) bool {
  4837  	x := s.header
  4838  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4839  		nex := x.atomicLoadNext(i)
  4840  		for nex != nil && nex.lessthan(value) {
  4841  			x = nex
  4842  			nex = x.atomicLoadNext(i)
  4843  		}
  4844  
  4845  		// Check if the value already in the skip list.
  4846  		if nex != nil && nex.equal(value) {
  4847  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  4848  		}
  4849  	}
  4850  	return false
  4851  }
  4852  
  4853  // Remove a node from the skip set.
  4854  func (s *UintSet) Remove(value uint) bool {
  4855  	var (
  4856  		nodeToRemove *uintNode
  4857  		isMarked     bool // represents if this operation mark the node
  4858  		topLayer     = -1
  4859  		preds, succs [maxLevel]*uintNode
  4860  	)
  4861  	for {
  4862  		lFound := s.findNodeRemove(value, &preds, &succs)
  4863  		if isMarked || // this process mark this node or we can find this node in the skip list
  4864  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4865  			if !isMarked { // we don't mark this node for now
  4866  				nodeToRemove = succs[lFound]
  4867  				topLayer = lFound
  4868  				nodeToRemove.mu.Lock()
  4869  				if nodeToRemove.flags.Get(marked) {
  4870  					// The node is marked by another process,
  4871  					// the physical deletion will be accomplished by another process.
  4872  					nodeToRemove.mu.Unlock()
  4873  					return false
  4874  				}
  4875  				nodeToRemove.flags.SetTrue(marked)
  4876  				isMarked = true
  4877  			}
  4878  			// Accomplish the physical deletion.
  4879  			var (
  4880  				highestLocked        = -1 // the highest level being locked by this process
  4881  				valid                = true
  4882  				pred, succ, prevPred *uintNode
  4883  			)
  4884  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4885  				pred, succ = preds[layer], succs[layer]
  4886  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4887  					pred.mu.Lock()
  4888  					highestLocked = layer
  4889  					prevPred = pred
  4890  				}
  4891  				// valid check if there is another node has inserted into the skip list in this layer
  4892  				// during this process, or the previous is removed by another process.
  4893  				// It is valid if:
  4894  				// 1. the previous node exists.
  4895  				// 2. no another node has inserted into the skip list in this layer.
  4896  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4897  			}
  4898  			if !valid {
  4899  				unlockUint(preds, highestLocked)
  4900  				continue
  4901  			}
  4902  			for i := topLayer; i >= 0; i-- {
  4903  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4904  				// So we don't need `nodeToRemove.loadNext`
  4905  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4906  			}
  4907  			nodeToRemove.mu.Unlock()
  4908  			unlockUint(preds, highestLocked)
  4909  			atomic.AddInt64(&s.length, -1)
  4910  			return true
  4911  		}
  4912  		return false
  4913  	}
  4914  }
  4915  
  4916  // Range calls f sequentially for each value present in the skip set.
  4917  // If f returns false, range stops the iteration.
  4918  func (s *UintSet) Range(f func(value uint) bool) {
  4919  	x := s.header.atomicLoadNext(0)
  4920  	for x != nil {
  4921  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4922  			x = x.atomicLoadNext(0)
  4923  			continue
  4924  		}
  4925  		if !f(x.value) {
  4926  			break
  4927  		}
  4928  		x = x.atomicLoadNext(0)
  4929  	}
  4930  }
  4931  
  4932  // Len return the length of this skip set.
  4933  func (s *UintSet) Len() int {
  4934  	return int(atomic.LoadInt64(&s.length))
  4935  }
  4936  
  4937  // UintSetDesc represents a set based on skip list in descending order.
  4938  type UintSetDesc struct {
  4939  	header       *uintNodeDesc
  4940  	length       int64
  4941  	highestLevel int64 // highest level for now
  4942  }
  4943  
  4944  type uintNodeDesc struct {
  4945  	value uint
  4946  	next  optionalArray // [level]*uintNodeDesc
  4947  	mu    sync.Mutex
  4948  	flags bitflag
  4949  	level uint32
  4950  }
  4951  
  4952  func newUintNodeDesc(value uint, level int) *uintNodeDesc {
  4953  	node := &uintNodeDesc{
  4954  		value: value,
  4955  		level: uint32(level),
  4956  	}
  4957  	if level > op1 {
  4958  		node.next.extra = new([op2]unsafe.Pointer)
  4959  	}
  4960  	return node
  4961  }
  4962  
  4963  func (n *uintNodeDesc) loadNext(i int) *uintNodeDesc {
  4964  	return (*uintNodeDesc)(n.next.load(i))
  4965  }
  4966  
  4967  func (n *uintNodeDesc) storeNext(i int, node *uintNodeDesc) {
  4968  	n.next.store(i, unsafe.Pointer(node))
  4969  }
  4970  
  4971  func (n *uintNodeDesc) atomicLoadNext(i int) *uintNodeDesc {
  4972  	return (*uintNodeDesc)(n.next.atomicLoad(i))
  4973  }
  4974  
  4975  func (n *uintNodeDesc) atomicStoreNext(i int, node *uintNodeDesc) {
  4976  	n.next.atomicStore(i, unsafe.Pointer(node))
  4977  }
  4978  
  4979  func (n *uintNodeDesc) lessthan(value uint) bool {
  4980  	return n.value > value
  4981  }
  4982  
  4983  func (n *uintNodeDesc) equal(value uint) bool {
  4984  	return n.value == value
  4985  }
  4986  
  4987  // NewUintDesc return an empty uint skip set in descending order.
  4988  func NewUintDesc() *UintSetDesc {
  4989  	h := newUintNodeDesc(0, maxLevel)
  4990  	h.flags.SetTrue(fullyLinked)
  4991  	return &UintSetDesc{
  4992  		header:       h,
  4993  		highestLevel: defaultHighestLevel,
  4994  	}
  4995  }
  4996  
  4997  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4998  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4999  func (s *UintSetDesc) findNodeRemove(value uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int {
  5000  	// lFound represents the index of the first layer at which it found a node.
  5001  	lFound, x := -1, s.header
  5002  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5003  		succ := x.atomicLoadNext(i)
  5004  		for succ != nil && succ.lessthan(value) {
  5005  			x = succ
  5006  			succ = x.atomicLoadNext(i)
  5007  		}
  5008  		preds[i] = x
  5009  		succs[i] = succ
  5010  
  5011  		// Check if the value already in the skip list.
  5012  		if lFound == -1 && succ != nil && succ.equal(value) {
  5013  			lFound = i
  5014  		}
  5015  	}
  5016  	return lFound
  5017  }
  5018  
  5019  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  5020  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5021  func (s *UintSetDesc) findNodeAdd(value uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int {
  5022  	x := s.header
  5023  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5024  		succ := x.atomicLoadNext(i)
  5025  		for succ != nil && succ.lessthan(value) {
  5026  			x = succ
  5027  			succ = x.atomicLoadNext(i)
  5028  		}
  5029  		preds[i] = x
  5030  		succs[i] = succ
  5031  
  5032  		// Check if the value already in the skip list.
  5033  		if succ != nil && succ.equal(value) {
  5034  			return i
  5035  		}
  5036  	}
  5037  	return -1
  5038  }
  5039  
  5040  func unlockUintDesc(preds [maxLevel]*uintNodeDesc, highestLevel int) {
  5041  	var prevPred *uintNodeDesc
  5042  	for i := highestLevel; i >= 0; i-- {
  5043  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5044  			preds[i].mu.Unlock()
  5045  			prevPred = preds[i]
  5046  		}
  5047  	}
  5048  }
  5049  
  5050  // Add add the value into skip set, return true if this process insert the value into skip set,
  5051  // return false if this process can't insert this value, because another process has insert the same value.
  5052  //
  5053  // If the value is in the skip set but not fully linked, this process will wait until it is.
  5054  func (s *UintSetDesc) Add(value uint) bool {
  5055  	level := s.randomLevel()
  5056  	var preds, succs [maxLevel]*uintNodeDesc
  5057  	for {
  5058  		lFound := s.findNodeAdd(value, &preds, &succs)
  5059  		if lFound != -1 { // indicating the value is already in the skip-list
  5060  			nodeFound := succs[lFound]
  5061  			if !nodeFound.flags.Get(marked) {
  5062  				for !nodeFound.flags.Get(fullyLinked) {
  5063  					// The node is not yet fully linked, just waits until it is.
  5064  				}
  5065  				return false
  5066  			}
  5067  			// If the node is marked, represents some other thread is in the process of deleting this node,
  5068  			// we need to add this node in next loop.
  5069  			continue
  5070  		}
  5071  		// Add this node into skip list.
  5072  		var (
  5073  			highestLocked        = -1 // the highest level being locked by this process
  5074  			valid                = true
  5075  			pred, succ, prevPred *uintNodeDesc
  5076  		)
  5077  		for layer := 0; valid && layer < level; layer++ {
  5078  			pred = preds[layer]   // target node's previous node
  5079  			succ = succs[layer]   // target node's next node
  5080  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5081  				pred.mu.Lock()
  5082  				highestLocked = layer
  5083  				prevPred = pred
  5084  			}
  5085  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5086  			// It is valid if:
  5087  			// 1. The previous node and next node both are not marked.
  5088  			// 2. The previous node's next node is succ in this layer.
  5089  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5090  		}
  5091  		if !valid {
  5092  			unlockUintDesc(preds, highestLocked)
  5093  			continue
  5094  		}
  5095  
  5096  		nn := newUintNodeDesc(value, level)
  5097  		for layer := 0; layer < level; layer++ {
  5098  			nn.storeNext(layer, succs[layer])
  5099  			preds[layer].atomicStoreNext(layer, nn)
  5100  		}
  5101  		nn.flags.SetTrue(fullyLinked)
  5102  		unlockUintDesc(preds, highestLocked)
  5103  		atomic.AddInt64(&s.length, 1)
  5104  		return true
  5105  	}
  5106  }
  5107  
  5108  func (s *UintSetDesc) randomLevel() int {
  5109  	// Generate random level.
  5110  	level := randomLevel()
  5111  	// Update highest level if possible.
  5112  	for {
  5113  		hl := atomic.LoadInt64(&s.highestLevel)
  5114  		if int64(level) <= hl {
  5115  			break
  5116  		}
  5117  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5118  			break
  5119  		}
  5120  	}
  5121  	return level
  5122  }
  5123  
  5124  // Contains check if the value is in the skip set.
  5125  func (s *UintSetDesc) Contains(value uint) bool {
  5126  	x := s.header
  5127  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5128  		nex := x.atomicLoadNext(i)
  5129  		for nex != nil && nex.lessthan(value) {
  5130  			x = nex
  5131  			nex = x.atomicLoadNext(i)
  5132  		}
  5133  
  5134  		// Check if the value already in the skip list.
  5135  		if nex != nil && nex.equal(value) {
  5136  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  5137  		}
  5138  	}
  5139  	return false
  5140  }
  5141  
  5142  // Remove a node from the skip set.
  5143  func (s *UintSetDesc) Remove(value uint) bool {
  5144  	var (
  5145  		nodeToRemove *uintNodeDesc
  5146  		isMarked     bool // represents if this operation mark the node
  5147  		topLayer     = -1
  5148  		preds, succs [maxLevel]*uintNodeDesc
  5149  	)
  5150  	for {
  5151  		lFound := s.findNodeRemove(value, &preds, &succs)
  5152  		if isMarked || // this process mark this node or we can find this node in the skip list
  5153  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5154  			if !isMarked { // we don't mark this node for now
  5155  				nodeToRemove = succs[lFound]
  5156  				topLayer = lFound
  5157  				nodeToRemove.mu.Lock()
  5158  				if nodeToRemove.flags.Get(marked) {
  5159  					// The node is marked by another process,
  5160  					// the physical deletion will be accomplished by another process.
  5161  					nodeToRemove.mu.Unlock()
  5162  					return false
  5163  				}
  5164  				nodeToRemove.flags.SetTrue(marked)
  5165  				isMarked = true
  5166  			}
  5167  			// Accomplish the physical deletion.
  5168  			var (
  5169  				highestLocked        = -1 // the highest level being locked by this process
  5170  				valid                = true
  5171  				pred, succ, prevPred *uintNodeDesc
  5172  			)
  5173  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5174  				pred, succ = preds[layer], succs[layer]
  5175  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5176  					pred.mu.Lock()
  5177  					highestLocked = layer
  5178  					prevPred = pred
  5179  				}
  5180  				// valid check if there is another node has inserted into the skip list in this layer
  5181  				// during this process, or the previous is removed by another process.
  5182  				// It is valid if:
  5183  				// 1. the previous node exists.
  5184  				// 2. no another node has inserted into the skip list in this layer.
  5185  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  5186  			}
  5187  			if !valid {
  5188  				unlockUintDesc(preds, highestLocked)
  5189  				continue
  5190  			}
  5191  			for i := topLayer; i >= 0; i-- {
  5192  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  5193  				// So we don't need `nodeToRemove.loadNext`
  5194  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  5195  			}
  5196  			nodeToRemove.mu.Unlock()
  5197  			unlockUintDesc(preds, highestLocked)
  5198  			atomic.AddInt64(&s.length, -1)
  5199  			return true
  5200  		}
  5201  		return false
  5202  	}
  5203  }
  5204  
  5205  // Range calls f sequentially for each value present in the skip set.
  5206  // If f returns false, range stops the iteration.
  5207  func (s *UintSetDesc) Range(f func(value uint) bool) {
  5208  	x := s.header.atomicLoadNext(0)
  5209  	for x != nil {
  5210  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  5211  			x = x.atomicLoadNext(0)
  5212  			continue
  5213  		}
  5214  		if !f(x.value) {
  5215  			break
  5216  		}
  5217  		x = x.atomicLoadNext(0)
  5218  	}
  5219  }
  5220  
  5221  // Len return the length of this skip set.
  5222  func (s *UintSetDesc) Len() int {
  5223  	return int(atomic.LoadInt64(&s.length))
  5224  }
  5225  
  5226  // Uint8Set represents a set based on skip list in ascending order.
  5227  type Uint8Set struct {
  5228  	header       *uint8Node
  5229  	length       int64
  5230  	highestLevel int64 // highest level for now
  5231  }
  5232  
  5233  type uint8Node struct {
  5234  	value uint8
  5235  	next  optionalArray // [level]*uint8Node
  5236  	mu    sync.Mutex
  5237  	flags bitflag
  5238  	level uint32
  5239  }
  5240  
  5241  func newUint8Node(value uint8, level int) *uint8Node {
  5242  	node := &uint8Node{
  5243  		value: value,
  5244  		level: uint32(level),
  5245  	}
  5246  	if level > op1 {
  5247  		node.next.extra = new([op2]unsafe.Pointer)
  5248  	}
  5249  	return node
  5250  }
  5251  
  5252  func (n *uint8Node) loadNext(i int) *uint8Node {
  5253  	return (*uint8Node)(n.next.load(i))
  5254  }
  5255  
  5256  func (n *uint8Node) storeNext(i int, node *uint8Node) {
  5257  	n.next.store(i, unsafe.Pointer(node))
  5258  }
  5259  
  5260  func (n *uint8Node) atomicLoadNext(i int) *uint8Node {
  5261  	return (*uint8Node)(n.next.atomicLoad(i))
  5262  }
  5263  
  5264  func (n *uint8Node) atomicStoreNext(i int, node *uint8Node) {
  5265  	n.next.atomicStore(i, unsafe.Pointer(node))
  5266  }
  5267  
  5268  func (n *uint8Node) lessthan(value uint8) bool {
  5269  	return n.value < value
  5270  }
  5271  
  5272  func (n *uint8Node) equal(value uint8) bool {
  5273  	return n.value == value
  5274  }
  5275  
  5276  // NewUint8 return an empty uint8 skip set in ascending order.
  5277  func NewUint8() *Uint8Set {
  5278  	h := newUint8Node(0, maxLevel)
  5279  	h.flags.SetTrue(fullyLinked)
  5280  	return &Uint8Set{
  5281  		header:       h,
  5282  		highestLevel: defaultHighestLevel,
  5283  	}
  5284  }
  5285  
  5286  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  5287  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5288  func (s *Uint8Set) findNodeRemove(value uint8, preds *[maxLevel]*uint8Node, succs *[maxLevel]*uint8Node) int {
  5289  	// lFound represents the index of the first layer at which it found a node.
  5290  	lFound, x := -1, s.header
  5291  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5292  		succ := x.atomicLoadNext(i)
  5293  		for succ != nil && succ.lessthan(value) {
  5294  			x = succ
  5295  			succ = x.atomicLoadNext(i)
  5296  		}
  5297  		preds[i] = x
  5298  		succs[i] = succ
  5299  
  5300  		// Check if the value already in the skip list.
  5301  		if lFound == -1 && succ != nil && succ.equal(value) {
  5302  			lFound = i
  5303  		}
  5304  	}
  5305  	return lFound
  5306  }
  5307  
  5308  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  5309  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5310  func (s *Uint8Set) findNodeAdd(value uint8, preds *[maxLevel]*uint8Node, succs *[maxLevel]*uint8Node) int {
  5311  	x := s.header
  5312  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5313  		succ := x.atomicLoadNext(i)
  5314  		for succ != nil && succ.lessthan(value) {
  5315  			x = succ
  5316  			succ = x.atomicLoadNext(i)
  5317  		}
  5318  		preds[i] = x
  5319  		succs[i] = succ
  5320  
  5321  		// Check if the value already in the skip list.
  5322  		if succ != nil && succ.equal(value) {
  5323  			return i
  5324  		}
  5325  	}
  5326  	return -1
  5327  }
  5328  
  5329  func unlockUint8(preds [maxLevel]*uint8Node, highestLevel int) {
  5330  	var prevPred *uint8Node
  5331  	for i := highestLevel; i >= 0; i-- {
  5332  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5333  			preds[i].mu.Unlock()
  5334  			prevPred = preds[i]
  5335  		}
  5336  	}
  5337  }
  5338  
  5339  // Add add the value into skip set, return true if this process insert the value into skip set,
  5340  // return false if this process can't insert this value, because another process has insert the same value.
  5341  //
  5342  // If the value is in the skip set but not fully linked, this process will wait until it is.
  5343  func (s *Uint8Set) Add(value uint8) bool {
  5344  	level := s.randomLevel()
  5345  	var preds, succs [maxLevel]*uint8Node
  5346  	for {
  5347  		lFound := s.findNodeAdd(value, &preds, &succs)
  5348  		if lFound != -1 { // indicating the value is already in the skip-list
  5349  			nodeFound := succs[lFound]
  5350  			if !nodeFound.flags.Get(marked) {
  5351  				for !nodeFound.flags.Get(fullyLinked) {
  5352  					// The node is not yet fully linked, just waits until it is.
  5353  				}
  5354  				return false
  5355  			}
  5356  			// If the node is marked, represents some other thread is in the process of deleting this node,
  5357  			// we need to add this node in next loop.
  5358  			continue
  5359  		}
  5360  		// Add this node into skip list.
  5361  		var (
  5362  			highestLocked        = -1 // the highest level being locked by this process
  5363  			valid                = true
  5364  			pred, succ, prevPred *uint8Node
  5365  		)
  5366  		for layer := 0; valid && layer < level; layer++ {
  5367  			pred = preds[layer]   // target node's previous node
  5368  			succ = succs[layer]   // target node's next node
  5369  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5370  				pred.mu.Lock()
  5371  				highestLocked = layer
  5372  				prevPred = pred
  5373  			}
  5374  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5375  			// It is valid if:
  5376  			// 1. The previous node and next node both are not marked.
  5377  			// 2. The previous node's next node is succ in this layer.
  5378  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5379  		}
  5380  		if !valid {
  5381  			unlockUint8(preds, highestLocked)
  5382  			continue
  5383  		}
  5384  
  5385  		nn := newUint8Node(value, level)
  5386  		for layer := 0; layer < level; layer++ {
  5387  			nn.storeNext(layer, succs[layer])
  5388  			preds[layer].atomicStoreNext(layer, nn)
  5389  		}
  5390  		nn.flags.SetTrue(fullyLinked)
  5391  		unlockUint8(preds, highestLocked)
  5392  		atomic.AddInt64(&s.length, 1)
  5393  		return true
  5394  	}
  5395  }
  5396  
  5397  func (s *Uint8Set) randomLevel() int {
  5398  	// Generate random level.
  5399  	level := randomLevel()
  5400  	// Update highest level if possible.
  5401  	for {
  5402  		hl := atomic.LoadInt64(&s.highestLevel)
  5403  		if int64(level) <= hl {
  5404  			break
  5405  		}
  5406  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5407  			break
  5408  		}
  5409  	}
  5410  	return level
  5411  }
  5412  
  5413  // Contains check if the value is in the skip set.
  5414  func (s *Uint8Set) Contains(value uint8) bool {
  5415  	x := s.header
  5416  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5417  		nex := x.atomicLoadNext(i)
  5418  		for nex != nil && nex.lessthan(value) {
  5419  			x = nex
  5420  			nex = x.atomicLoadNext(i)
  5421  		}
  5422  
  5423  		// Check if the value already in the skip list.
  5424  		if nex != nil && nex.equal(value) {
  5425  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  5426  		}
  5427  	}
  5428  	return false
  5429  }
  5430  
  5431  // Remove a node from the skip set.
  5432  func (s *Uint8Set) Remove(value uint8) bool {
  5433  	var (
  5434  		nodeToRemove *uint8Node
  5435  		isMarked     bool // represents if this operation mark the node
  5436  		topLayer     = -1
  5437  		preds, succs [maxLevel]*uint8Node
  5438  	)
  5439  	for {
  5440  		lFound := s.findNodeRemove(value, &preds, &succs)
  5441  		if isMarked || // this process mark this node or we can find this node in the skip list
  5442  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5443  			if !isMarked { // we don't mark this node for now
  5444  				nodeToRemove = succs[lFound]
  5445  				topLayer = lFound
  5446  				nodeToRemove.mu.Lock()
  5447  				if nodeToRemove.flags.Get(marked) {
  5448  					// The node is marked by another process,
  5449  					// the physical deletion will be accomplished by another process.
  5450  					nodeToRemove.mu.Unlock()
  5451  					return false
  5452  				}
  5453  				nodeToRemove.flags.SetTrue(marked)
  5454  				isMarked = true
  5455  			}
  5456  			// Accomplish the physical deletion.
  5457  			var (
  5458  				highestLocked        = -1 // the highest level being locked by this process
  5459  				valid                = true
  5460  				pred, succ, prevPred *uint8Node
  5461  			)
  5462  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5463  				pred, succ = preds[layer], succs[layer]
  5464  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5465  					pred.mu.Lock()
  5466  					highestLocked = layer
  5467  					prevPred = pred
  5468  				}
  5469  				// valid check if there is another node has inserted into the skip list in this layer
  5470  				// during this process, or the previous is removed by another process.
  5471  				// It is valid if:
  5472  				// 1. the previous node exists.
  5473  				// 2. no another node has inserted into the skip list in this layer.
  5474  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  5475  			}
  5476  			if !valid {
  5477  				unlockUint8(preds, highestLocked)
  5478  				continue
  5479  			}
  5480  			for i := topLayer; i >= 0; i-- {
  5481  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  5482  				// So we don't need `nodeToRemove.loadNext`
  5483  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  5484  			}
  5485  			nodeToRemove.mu.Unlock()
  5486  			unlockUint8(preds, highestLocked)
  5487  			atomic.AddInt64(&s.length, -1)
  5488  			return true
  5489  		}
  5490  		return false
  5491  	}
  5492  }
  5493  
  5494  // Range calls f sequentially for each value present in the skip set.
  5495  // If f returns false, range stops the iteration.
  5496  func (s *Uint8Set) Range(f func(value uint8) bool) {
  5497  	x := s.header.atomicLoadNext(0)
  5498  	for x != nil {
  5499  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  5500  			x = x.atomicLoadNext(0)
  5501  			continue
  5502  		}
  5503  		if !f(x.value) {
  5504  			break
  5505  		}
  5506  		x = x.atomicLoadNext(0)
  5507  	}
  5508  }
  5509  
  5510  // Len return the length of this skip set.
  5511  func (s *Uint8Set) Len() int {
  5512  	return int(atomic.LoadInt64(&s.length))
  5513  }
  5514  
  5515  // Uint8SetDesc represents a set based on skip list in descending order.
  5516  type Uint8SetDesc struct {
  5517  	header       *uint8NodeDesc
  5518  	length       int64
  5519  	highestLevel int64 // highest level for now
  5520  }
  5521  
  5522  type uint8NodeDesc struct {
  5523  	value uint8
  5524  	next  optionalArray // [level]*uint8NodeDesc
  5525  	mu    sync.Mutex
  5526  	flags bitflag
  5527  	level uint32
  5528  }
  5529  
  5530  func newUint8NodeDesc(value uint8, level int) *uint8NodeDesc {
  5531  	node := &uint8NodeDesc{
  5532  		value: value,
  5533  		level: uint32(level),
  5534  	}
  5535  	if level > op1 {
  5536  		node.next.extra = new([op2]unsafe.Pointer)
  5537  	}
  5538  	return node
  5539  }
  5540  
  5541  func (n *uint8NodeDesc) loadNext(i int) *uint8NodeDesc {
  5542  	return (*uint8NodeDesc)(n.next.load(i))
  5543  }
  5544  
  5545  func (n *uint8NodeDesc) storeNext(i int, node *uint8NodeDesc) {
  5546  	n.next.store(i, unsafe.Pointer(node))
  5547  }
  5548  
  5549  func (n *uint8NodeDesc) atomicLoadNext(i int) *uint8NodeDesc {
  5550  	return (*uint8NodeDesc)(n.next.atomicLoad(i))
  5551  }
  5552  
  5553  func (n *uint8NodeDesc) atomicStoreNext(i int, node *uint8NodeDesc) {
  5554  	n.next.atomicStore(i, unsafe.Pointer(node))
  5555  }
  5556  
  5557  func (n *uint8NodeDesc) lessthan(value uint8) bool {
  5558  	return n.value > value
  5559  }
  5560  
  5561  func (n *uint8NodeDesc) equal(value uint8) bool {
  5562  	return n.value == value
  5563  }
  5564  
  5565  // NewUint8Desc return an empty uint8 skip set in descending order.
  5566  func NewUint8Desc() *Uint8SetDesc {
  5567  	h := newUint8NodeDesc(0, maxLevel)
  5568  	h.flags.SetTrue(fullyLinked)
  5569  	return &Uint8SetDesc{
  5570  		header:       h,
  5571  		highestLevel: defaultHighestLevel,
  5572  	}
  5573  }
  5574  
  5575  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  5576  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5577  func (s *Uint8SetDesc) findNodeRemove(value uint8, preds *[maxLevel]*uint8NodeDesc, succs *[maxLevel]*uint8NodeDesc) int {
  5578  	// lFound represents the index of the first layer at which it found a node.
  5579  	lFound, x := -1, s.header
  5580  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5581  		succ := x.atomicLoadNext(i)
  5582  		for succ != nil && succ.lessthan(value) {
  5583  			x = succ
  5584  			succ = x.atomicLoadNext(i)
  5585  		}
  5586  		preds[i] = x
  5587  		succs[i] = succ
  5588  
  5589  		// Check if the value already in the skip list.
  5590  		if lFound == -1 && succ != nil && succ.equal(value) {
  5591  			lFound = i
  5592  		}
  5593  	}
  5594  	return lFound
  5595  }
  5596  
  5597  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  5598  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5599  func (s *Uint8SetDesc) findNodeAdd(value uint8, preds *[maxLevel]*uint8NodeDesc, succs *[maxLevel]*uint8NodeDesc) int {
  5600  	x := s.header
  5601  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5602  		succ := x.atomicLoadNext(i)
  5603  		for succ != nil && succ.lessthan(value) {
  5604  			x = succ
  5605  			succ = x.atomicLoadNext(i)
  5606  		}
  5607  		preds[i] = x
  5608  		succs[i] = succ
  5609  
  5610  		// Check if the value already in the skip list.
  5611  		if succ != nil && succ.equal(value) {
  5612  			return i
  5613  		}
  5614  	}
  5615  	return -1
  5616  }
  5617  
  5618  func unlockUint8Desc(preds [maxLevel]*uint8NodeDesc, highestLevel int) {
  5619  	var prevPred *uint8NodeDesc
  5620  	for i := highestLevel; i >= 0; i-- {
  5621  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5622  			preds[i].mu.Unlock()
  5623  			prevPred = preds[i]
  5624  		}
  5625  	}
  5626  }
  5627  
  5628  // Add add the value into skip set, return true if this process insert the value into skip set,
  5629  // return false if this process can't insert this value, because another process has insert the same value.
  5630  //
  5631  // If the value is in the skip set but not fully linked, this process will wait until it is.
  5632  func (s *Uint8SetDesc) Add(value uint8) bool {
  5633  	level := s.randomLevel()
  5634  	var preds, succs [maxLevel]*uint8NodeDesc
  5635  	for {
  5636  		lFound := s.findNodeAdd(value, &preds, &succs)
  5637  		if lFound != -1 { // indicating the value is already in the skip-list
  5638  			nodeFound := succs[lFound]
  5639  			if !nodeFound.flags.Get(marked) {
  5640  				for !nodeFound.flags.Get(fullyLinked) {
  5641  					// The node is not yet fully linked, just waits until it is.
  5642  				}
  5643  				return false
  5644  			}
  5645  			// If the node is marked, represents some other thread is in the process of deleting this node,
  5646  			// we need to add this node in next loop.
  5647  			continue
  5648  		}
  5649  		// Add this node into skip list.
  5650  		var (
  5651  			highestLocked        = -1 // the highest level being locked by this process
  5652  			valid                = true
  5653  			pred, succ, prevPred *uint8NodeDesc
  5654  		)
  5655  		for layer := 0; valid && layer < level; layer++ {
  5656  			pred = preds[layer]   // target node's previous node
  5657  			succ = succs[layer]   // target node's next node
  5658  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5659  				pred.mu.Lock()
  5660  				highestLocked = layer
  5661  				prevPred = pred
  5662  			}
  5663  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5664  			// It is valid if:
  5665  			// 1. The previous node and next node both are not marked.
  5666  			// 2. The previous node's next node is succ in this layer.
  5667  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5668  		}
  5669  		if !valid {
  5670  			unlockUint8Desc(preds, highestLocked)
  5671  			continue
  5672  		}
  5673  
  5674  		nn := newUint8NodeDesc(value, level)
  5675  		for layer := 0; layer < level; layer++ {
  5676  			nn.storeNext(layer, succs[layer])
  5677  			preds[layer].atomicStoreNext(layer, nn)
  5678  		}
  5679  		nn.flags.SetTrue(fullyLinked)
  5680  		unlockUint8Desc(preds, highestLocked)
  5681  		atomic.AddInt64(&s.length, 1)
  5682  		return true
  5683  	}
  5684  }
  5685  
  5686  func (s *Uint8SetDesc) randomLevel() int {
  5687  	// Generate random level.
  5688  	level := randomLevel()
  5689  	// Update highest level if possible.
  5690  	for {
  5691  		hl := atomic.LoadInt64(&s.highestLevel)
  5692  		if int64(level) <= hl {
  5693  			break
  5694  		}
  5695  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5696  			break
  5697  		}
  5698  	}
  5699  	return level
  5700  }
  5701  
  5702  // Contains check if the value is in the skip set.
  5703  func (s *Uint8SetDesc) Contains(value uint8) bool {
  5704  	x := s.header
  5705  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5706  		nex := x.atomicLoadNext(i)
  5707  		for nex != nil && nex.lessthan(value) {
  5708  			x = nex
  5709  			nex = x.atomicLoadNext(i)
  5710  		}
  5711  
  5712  		// Check if the value already in the skip list.
  5713  		if nex != nil && nex.equal(value) {
  5714  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  5715  		}
  5716  	}
  5717  	return false
  5718  }
  5719  
  5720  // Remove a node from the skip set.
  5721  func (s *Uint8SetDesc) Remove(value uint8) bool {
  5722  	var (
  5723  		nodeToRemove *uint8NodeDesc
  5724  		isMarked     bool // represents if this operation mark the node
  5725  		topLayer     = -1
  5726  		preds, succs [maxLevel]*uint8NodeDesc
  5727  	)
  5728  	for {
  5729  		lFound := s.findNodeRemove(value, &preds, &succs)
  5730  		if isMarked || // this process mark this node or we can find this node in the skip list
  5731  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5732  			if !isMarked { // we don't mark this node for now
  5733  				nodeToRemove = succs[lFound]
  5734  				topLayer = lFound
  5735  				nodeToRemove.mu.Lock()
  5736  				if nodeToRemove.flags.Get(marked) {
  5737  					// The node is marked by another process,
  5738  					// the physical deletion will be accomplished by another process.
  5739  					nodeToRemove.mu.Unlock()
  5740  					return false
  5741  				}
  5742  				nodeToRemove.flags.SetTrue(marked)
  5743  				isMarked = true
  5744  			}
  5745  			// Accomplish the physical deletion.
  5746  			var (
  5747  				highestLocked        = -1 // the highest level being locked by this process
  5748  				valid                = true
  5749  				pred, succ, prevPred *uint8NodeDesc
  5750  			)
  5751  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5752  				pred, succ = preds[layer], succs[layer]
  5753  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5754  					pred.mu.Lock()
  5755  					highestLocked = layer
  5756  					prevPred = pred
  5757  				}
  5758  				// valid check if there is another node has inserted into the skip list in this layer
  5759  				// during this process, or the previous is removed by another process.
  5760  				// It is valid if:
  5761  				// 1. the previous node exists.
  5762  				// 2. no another node has inserted into the skip list in this layer.
  5763  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  5764  			}
  5765  			if !valid {
  5766  				unlockUint8Desc(preds, highestLocked)
  5767  				continue
  5768  			}
  5769  			for i := topLayer; i >= 0; i-- {
  5770  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  5771  				// So we don't need `nodeToRemove.loadNext`
  5772  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  5773  			}
  5774  			nodeToRemove.mu.Unlock()
  5775  			unlockUint8Desc(preds, highestLocked)
  5776  			atomic.AddInt64(&s.length, -1)
  5777  			return true
  5778  		}
  5779  		return false
  5780  	}
  5781  }
  5782  
  5783  // Range calls f sequentially for each value present in the skip set.
  5784  // If f returns false, range stops the iteration.
  5785  func (s *Uint8SetDesc) Range(f func(value uint8) bool) {
  5786  	x := s.header.atomicLoadNext(0)
  5787  	for x != nil {
  5788  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  5789  			x = x.atomicLoadNext(0)
  5790  			continue
  5791  		}
  5792  		if !f(x.value) {
  5793  			break
  5794  		}
  5795  		x = x.atomicLoadNext(0)
  5796  	}
  5797  }
  5798  
  5799  // Len return the length of this skip set.
  5800  func (s *Uint8SetDesc) Len() int {
  5801  	return int(atomic.LoadInt64(&s.length))
  5802  }
  5803  
  5804  // Uint16Set represents a set based on skip list in ascending order.
  5805  type Uint16Set struct {
  5806  	header       *uint16Node
  5807  	length       int64
  5808  	highestLevel int64 // highest level for now
  5809  }
  5810  
  5811  type uint16Node struct {
  5812  	value uint16
  5813  	next  optionalArray // [level]*uint16Node
  5814  	mu    sync.Mutex
  5815  	flags bitflag
  5816  	level uint32
  5817  }
  5818  
  5819  func newUint16Node(value uint16, level int) *uint16Node {
  5820  	node := &uint16Node{
  5821  		value: value,
  5822  		level: uint32(level),
  5823  	}
  5824  	if level > op1 {
  5825  		node.next.extra = new([op2]unsafe.Pointer)
  5826  	}
  5827  	return node
  5828  }
  5829  
  5830  func (n *uint16Node) loadNext(i int) *uint16Node {
  5831  	return (*uint16Node)(n.next.load(i))
  5832  }
  5833  
  5834  func (n *uint16Node) storeNext(i int, node *uint16Node) {
  5835  	n.next.store(i, unsafe.Pointer(node))
  5836  }
  5837  
  5838  func (n *uint16Node) atomicLoadNext(i int) *uint16Node {
  5839  	return (*uint16Node)(n.next.atomicLoad(i))
  5840  }
  5841  
  5842  func (n *uint16Node) atomicStoreNext(i int, node *uint16Node) {
  5843  	n.next.atomicStore(i, unsafe.Pointer(node))
  5844  }
  5845  
  5846  func (n *uint16Node) lessthan(value uint16) bool {
  5847  	return n.value < value
  5848  }
  5849  
  5850  func (n *uint16Node) equal(value uint16) bool {
  5851  	return n.value == value
  5852  }
  5853  
  5854  // NewUint16 return an empty uint16 skip set in ascending order.
  5855  func NewUint16() *Uint16Set {
  5856  	h := newUint16Node(0, maxLevel)
  5857  	h.flags.SetTrue(fullyLinked)
  5858  	return &Uint16Set{
  5859  		header:       h,
  5860  		highestLevel: defaultHighestLevel,
  5861  	}
  5862  }
  5863  
  5864  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  5865  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5866  func (s *Uint16Set) findNodeRemove(value uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int {
  5867  	// lFound represents the index of the first layer at which it found a node.
  5868  	lFound, x := -1, s.header
  5869  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5870  		succ := x.atomicLoadNext(i)
  5871  		for succ != nil && succ.lessthan(value) {
  5872  			x = succ
  5873  			succ = x.atomicLoadNext(i)
  5874  		}
  5875  		preds[i] = x
  5876  		succs[i] = succ
  5877  
  5878  		// Check if the value already in the skip list.
  5879  		if lFound == -1 && succ != nil && succ.equal(value) {
  5880  			lFound = i
  5881  		}
  5882  	}
  5883  	return lFound
  5884  }
  5885  
  5886  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  5887  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5888  func (s *Uint16Set) findNodeAdd(value uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int {
  5889  	x := s.header
  5890  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5891  		succ := x.atomicLoadNext(i)
  5892  		for succ != nil && succ.lessthan(value) {
  5893  			x = succ
  5894  			succ = x.atomicLoadNext(i)
  5895  		}
  5896  		preds[i] = x
  5897  		succs[i] = succ
  5898  
  5899  		// Check if the value already in the skip list.
  5900  		if succ != nil && succ.equal(value) {
  5901  			return i
  5902  		}
  5903  	}
  5904  	return -1
  5905  }
  5906  
  5907  func unlockUint16(preds [maxLevel]*uint16Node, highestLevel int) {
  5908  	var prevPred *uint16Node
  5909  	for i := highestLevel; i >= 0; i-- {
  5910  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5911  			preds[i].mu.Unlock()
  5912  			prevPred = preds[i]
  5913  		}
  5914  	}
  5915  }
  5916  
  5917  // Add add the value into skip set, return true if this process insert the value into skip set,
  5918  // return false if this process can't insert this value, because another process has insert the same value.
  5919  //
  5920  // If the value is in the skip set but not fully linked, this process will wait until it is.
  5921  func (s *Uint16Set) Add(value uint16) bool {
  5922  	level := s.randomLevel()
  5923  	var preds, succs [maxLevel]*uint16Node
  5924  	for {
  5925  		lFound := s.findNodeAdd(value, &preds, &succs)
  5926  		if lFound != -1 { // indicating the value is already in the skip-list
  5927  			nodeFound := succs[lFound]
  5928  			if !nodeFound.flags.Get(marked) {
  5929  				for !nodeFound.flags.Get(fullyLinked) {
  5930  					// The node is not yet fully linked, just waits until it is.
  5931  				}
  5932  				return false
  5933  			}
  5934  			// If the node is marked, represents some other thread is in the process of deleting this node,
  5935  			// we need to add this node in next loop.
  5936  			continue
  5937  		}
  5938  		// Add this node into skip list.
  5939  		var (
  5940  			highestLocked        = -1 // the highest level being locked by this process
  5941  			valid                = true
  5942  			pred, succ, prevPred *uint16Node
  5943  		)
  5944  		for layer := 0; valid && layer < level; layer++ {
  5945  			pred = preds[layer]   // target node's previous node
  5946  			succ = succs[layer]   // target node's next node
  5947  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5948  				pred.mu.Lock()
  5949  				highestLocked = layer
  5950  				prevPred = pred
  5951  			}
  5952  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5953  			// It is valid if:
  5954  			// 1. The previous node and next node both are not marked.
  5955  			// 2. The previous node's next node is succ in this layer.
  5956  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5957  		}
  5958  		if !valid {
  5959  			unlockUint16(preds, highestLocked)
  5960  			continue
  5961  		}
  5962  
  5963  		nn := newUint16Node(value, level)
  5964  		for layer := 0; layer < level; layer++ {
  5965  			nn.storeNext(layer, succs[layer])
  5966  			preds[layer].atomicStoreNext(layer, nn)
  5967  		}
  5968  		nn.flags.SetTrue(fullyLinked)
  5969  		unlockUint16(preds, highestLocked)
  5970  		atomic.AddInt64(&s.length, 1)
  5971  		return true
  5972  	}
  5973  }
  5974  
  5975  func (s *Uint16Set) randomLevel() int {
  5976  	// Generate random level.
  5977  	level := randomLevel()
  5978  	// Update highest level if possible.
  5979  	for {
  5980  		hl := atomic.LoadInt64(&s.highestLevel)
  5981  		if int64(level) <= hl {
  5982  			break
  5983  		}
  5984  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5985  			break
  5986  		}
  5987  	}
  5988  	return level
  5989  }
  5990  
  5991  // Contains check if the value is in the skip set.
  5992  func (s *Uint16Set) Contains(value uint16) bool {
  5993  	x := s.header
  5994  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5995  		nex := x.atomicLoadNext(i)
  5996  		for nex != nil && nex.lessthan(value) {
  5997  			x = nex
  5998  			nex = x.atomicLoadNext(i)
  5999  		}
  6000  
  6001  		// Check if the value already in the skip list.
  6002  		if nex != nil && nex.equal(value) {
  6003  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  6004  		}
  6005  	}
  6006  	return false
  6007  }
  6008  
  6009  // Remove a node from the skip set.
  6010  func (s *Uint16Set) Remove(value uint16) bool {
  6011  	var (
  6012  		nodeToRemove *uint16Node
  6013  		isMarked     bool // represents if this operation mark the node
  6014  		topLayer     = -1
  6015  		preds, succs [maxLevel]*uint16Node
  6016  	)
  6017  	for {
  6018  		lFound := s.findNodeRemove(value, &preds, &succs)
  6019  		if isMarked || // this process mark this node or we can find this node in the skip list
  6020  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6021  			if !isMarked { // we don't mark this node for now
  6022  				nodeToRemove = succs[lFound]
  6023  				topLayer = lFound
  6024  				nodeToRemove.mu.Lock()
  6025  				if nodeToRemove.flags.Get(marked) {
  6026  					// The node is marked by another process,
  6027  					// the physical deletion will be accomplished by another process.
  6028  					nodeToRemove.mu.Unlock()
  6029  					return false
  6030  				}
  6031  				nodeToRemove.flags.SetTrue(marked)
  6032  				isMarked = true
  6033  			}
  6034  			// Accomplish the physical deletion.
  6035  			var (
  6036  				highestLocked        = -1 // the highest level being locked by this process
  6037  				valid                = true
  6038  				pred, succ, prevPred *uint16Node
  6039  			)
  6040  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6041  				pred, succ = preds[layer], succs[layer]
  6042  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6043  					pred.mu.Lock()
  6044  					highestLocked = layer
  6045  					prevPred = pred
  6046  				}
  6047  				// valid check if there is another node has inserted into the skip list in this layer
  6048  				// during this process, or the previous is removed by another process.
  6049  				// It is valid if:
  6050  				// 1. the previous node exists.
  6051  				// 2. no another node has inserted into the skip list in this layer.
  6052  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  6053  			}
  6054  			if !valid {
  6055  				unlockUint16(preds, highestLocked)
  6056  				continue
  6057  			}
  6058  			for i := topLayer; i >= 0; i-- {
  6059  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  6060  				// So we don't need `nodeToRemove.loadNext`
  6061  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  6062  			}
  6063  			nodeToRemove.mu.Unlock()
  6064  			unlockUint16(preds, highestLocked)
  6065  			atomic.AddInt64(&s.length, -1)
  6066  			return true
  6067  		}
  6068  		return false
  6069  	}
  6070  }
  6071  
  6072  // Range calls f sequentially for each value present in the skip set.
  6073  // If f returns false, range stops the iteration.
  6074  func (s *Uint16Set) Range(f func(value uint16) bool) {
  6075  	x := s.header.atomicLoadNext(0)
  6076  	for x != nil {
  6077  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  6078  			x = x.atomicLoadNext(0)
  6079  			continue
  6080  		}
  6081  		if !f(x.value) {
  6082  			break
  6083  		}
  6084  		x = x.atomicLoadNext(0)
  6085  	}
  6086  }
  6087  
  6088  // Len return the length of this skip set.
  6089  func (s *Uint16Set) Len() int {
  6090  	return int(atomic.LoadInt64(&s.length))
  6091  }
  6092  
  6093  // Uint16SetDesc represents a set based on skip list in descending order.
  6094  type Uint16SetDesc struct {
  6095  	header       *uint16NodeDesc
  6096  	length       int64
  6097  	highestLevel int64 // highest level for now
  6098  }
  6099  
  6100  type uint16NodeDesc struct {
  6101  	value uint16
  6102  	next  optionalArray // [level]*uint16NodeDesc
  6103  	mu    sync.Mutex
  6104  	flags bitflag
  6105  	level uint32
  6106  }
  6107  
  6108  func newUint16NodeDesc(value uint16, level int) *uint16NodeDesc {
  6109  	node := &uint16NodeDesc{
  6110  		value: value,
  6111  		level: uint32(level),
  6112  	}
  6113  	if level > op1 {
  6114  		node.next.extra = new([op2]unsafe.Pointer)
  6115  	}
  6116  	return node
  6117  }
  6118  
  6119  func (n *uint16NodeDesc) loadNext(i int) *uint16NodeDesc {
  6120  	return (*uint16NodeDesc)(n.next.load(i))
  6121  }
  6122  
  6123  func (n *uint16NodeDesc) storeNext(i int, node *uint16NodeDesc) {
  6124  	n.next.store(i, unsafe.Pointer(node))
  6125  }
  6126  
  6127  func (n *uint16NodeDesc) atomicLoadNext(i int) *uint16NodeDesc {
  6128  	return (*uint16NodeDesc)(n.next.atomicLoad(i))
  6129  }
  6130  
  6131  func (n *uint16NodeDesc) atomicStoreNext(i int, node *uint16NodeDesc) {
  6132  	n.next.atomicStore(i, unsafe.Pointer(node))
  6133  }
  6134  
  6135  func (n *uint16NodeDesc) lessthan(value uint16) bool {
  6136  	return n.value > value
  6137  }
  6138  
  6139  func (n *uint16NodeDesc) equal(value uint16) bool {
  6140  	return n.value == value
  6141  }
  6142  
  6143  // NewUint16Desc return an empty uint16 skip set in descending order.
  6144  func NewUint16Desc() *Uint16SetDesc {
  6145  	h := newUint16NodeDesc(0, maxLevel)
  6146  	h.flags.SetTrue(fullyLinked)
  6147  	return &Uint16SetDesc{
  6148  		header:       h,
  6149  		highestLevel: defaultHighestLevel,
  6150  	}
  6151  }
  6152  
  6153  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  6154  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  6155  func (s *Uint16SetDesc) findNodeRemove(value uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int {
  6156  	// lFound represents the index of the first layer at which it found a node.
  6157  	lFound, x := -1, s.header
  6158  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6159  		succ := x.atomicLoadNext(i)
  6160  		for succ != nil && succ.lessthan(value) {
  6161  			x = succ
  6162  			succ = x.atomicLoadNext(i)
  6163  		}
  6164  		preds[i] = x
  6165  		succs[i] = succ
  6166  
  6167  		// Check if the value already in the skip list.
  6168  		if lFound == -1 && succ != nil && succ.equal(value) {
  6169  			lFound = i
  6170  		}
  6171  	}
  6172  	return lFound
  6173  }
  6174  
  6175  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  6176  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  6177  func (s *Uint16SetDesc) findNodeAdd(value uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int {
  6178  	x := s.header
  6179  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6180  		succ := x.atomicLoadNext(i)
  6181  		for succ != nil && succ.lessthan(value) {
  6182  			x = succ
  6183  			succ = x.atomicLoadNext(i)
  6184  		}
  6185  		preds[i] = x
  6186  		succs[i] = succ
  6187  
  6188  		// Check if the value already in the skip list.
  6189  		if succ != nil && succ.equal(value) {
  6190  			return i
  6191  		}
  6192  	}
  6193  	return -1
  6194  }
  6195  
  6196  func unlockUint16Desc(preds [maxLevel]*uint16NodeDesc, highestLevel int) {
  6197  	var prevPred *uint16NodeDesc
  6198  	for i := highestLevel; i >= 0; i-- {
  6199  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  6200  			preds[i].mu.Unlock()
  6201  			prevPred = preds[i]
  6202  		}
  6203  	}
  6204  }
  6205  
  6206  // Add add the value into skip set, return true if this process insert the value into skip set,
  6207  // return false if this process can't insert this value, because another process has insert the same value.
  6208  //
  6209  // If the value is in the skip set but not fully linked, this process will wait until it is.
  6210  func (s *Uint16SetDesc) Add(value uint16) bool {
  6211  	level := s.randomLevel()
  6212  	var preds, succs [maxLevel]*uint16NodeDesc
  6213  	for {
  6214  		lFound := s.findNodeAdd(value, &preds, &succs)
  6215  		if lFound != -1 { // indicating the value is already in the skip-list
  6216  			nodeFound := succs[lFound]
  6217  			if !nodeFound.flags.Get(marked) {
  6218  				for !nodeFound.flags.Get(fullyLinked) {
  6219  					// The node is not yet fully linked, just waits until it is.
  6220  				}
  6221  				return false
  6222  			}
  6223  			// If the node is marked, represents some other thread is in the process of deleting this node,
  6224  			// we need to add this node in next loop.
  6225  			continue
  6226  		}
  6227  		// Add this node into skip list.
  6228  		var (
  6229  			highestLocked        = -1 // the highest level being locked by this process
  6230  			valid                = true
  6231  			pred, succ, prevPred *uint16NodeDesc
  6232  		)
  6233  		for layer := 0; valid && layer < level; layer++ {
  6234  			pred = preds[layer]   // target node's previous node
  6235  			succ = succs[layer]   // target node's next node
  6236  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6237  				pred.mu.Lock()
  6238  				highestLocked = layer
  6239  				prevPred = pred
  6240  			}
  6241  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6242  			// It is valid if:
  6243  			// 1. The previous node and next node both are not marked.
  6244  			// 2. The previous node's next node is succ in this layer.
  6245  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  6246  		}
  6247  		if !valid {
  6248  			unlockUint16Desc(preds, highestLocked)
  6249  			continue
  6250  		}
  6251  
  6252  		nn := newUint16NodeDesc(value, level)
  6253  		for layer := 0; layer < level; layer++ {
  6254  			nn.storeNext(layer, succs[layer])
  6255  			preds[layer].atomicStoreNext(layer, nn)
  6256  		}
  6257  		nn.flags.SetTrue(fullyLinked)
  6258  		unlockUint16Desc(preds, highestLocked)
  6259  		atomic.AddInt64(&s.length, 1)
  6260  		return true
  6261  	}
  6262  }
  6263  
  6264  func (s *Uint16SetDesc) randomLevel() int {
  6265  	// Generate random level.
  6266  	level := randomLevel()
  6267  	// Update highest level if possible.
  6268  	for {
  6269  		hl := atomic.LoadInt64(&s.highestLevel)
  6270  		if int64(level) <= hl {
  6271  			break
  6272  		}
  6273  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  6274  			break
  6275  		}
  6276  	}
  6277  	return level
  6278  }
  6279  
  6280  // Contains check if the value is in the skip set.
  6281  func (s *Uint16SetDesc) Contains(value uint16) bool {
  6282  	x := s.header
  6283  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6284  		nex := x.atomicLoadNext(i)
  6285  		for nex != nil && nex.lessthan(value) {
  6286  			x = nex
  6287  			nex = x.atomicLoadNext(i)
  6288  		}
  6289  
  6290  		// Check if the value already in the skip list.
  6291  		if nex != nil && nex.equal(value) {
  6292  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  6293  		}
  6294  	}
  6295  	return false
  6296  }
  6297  
  6298  // Remove a node from the skip set.
  6299  func (s *Uint16SetDesc) Remove(value uint16) bool {
  6300  	var (
  6301  		nodeToRemove *uint16NodeDesc
  6302  		isMarked     bool // represents if this operation mark the node
  6303  		topLayer     = -1
  6304  		preds, succs [maxLevel]*uint16NodeDesc
  6305  	)
  6306  	for {
  6307  		lFound := s.findNodeRemove(value, &preds, &succs)
  6308  		if isMarked || // this process mark this node or we can find this node in the skip list
  6309  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6310  			if !isMarked { // we don't mark this node for now
  6311  				nodeToRemove = succs[lFound]
  6312  				topLayer = lFound
  6313  				nodeToRemove.mu.Lock()
  6314  				if nodeToRemove.flags.Get(marked) {
  6315  					// The node is marked by another process,
  6316  					// the physical deletion will be accomplished by another process.
  6317  					nodeToRemove.mu.Unlock()
  6318  					return false
  6319  				}
  6320  				nodeToRemove.flags.SetTrue(marked)
  6321  				isMarked = true
  6322  			}
  6323  			// Accomplish the physical deletion.
  6324  			var (
  6325  				highestLocked        = -1 // the highest level being locked by this process
  6326  				valid                = true
  6327  				pred, succ, prevPred *uint16NodeDesc
  6328  			)
  6329  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6330  				pred, succ = preds[layer], succs[layer]
  6331  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6332  					pred.mu.Lock()
  6333  					highestLocked = layer
  6334  					prevPred = pred
  6335  				}
  6336  				// valid check if there is another node has inserted into the skip list in this layer
  6337  				// during this process, or the previous is removed by another process.
  6338  				// It is valid if:
  6339  				// 1. the previous node exists.
  6340  				// 2. no another node has inserted into the skip list in this layer.
  6341  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  6342  			}
  6343  			if !valid {
  6344  				unlockUint16Desc(preds, highestLocked)
  6345  				continue
  6346  			}
  6347  			for i := topLayer; i >= 0; i-- {
  6348  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  6349  				// So we don't need `nodeToRemove.loadNext`
  6350  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  6351  			}
  6352  			nodeToRemove.mu.Unlock()
  6353  			unlockUint16Desc(preds, highestLocked)
  6354  			atomic.AddInt64(&s.length, -1)
  6355  			return true
  6356  		}
  6357  		return false
  6358  	}
  6359  }
  6360  
  6361  // Range calls f sequentially for each value present in the skip set.
  6362  // If f returns false, range stops the iteration.
  6363  func (s *Uint16SetDesc) Range(f func(value uint16) bool) {
  6364  	x := s.header.atomicLoadNext(0)
  6365  	for x != nil {
  6366  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  6367  			x = x.atomicLoadNext(0)
  6368  			continue
  6369  		}
  6370  		if !f(x.value) {
  6371  			break
  6372  		}
  6373  		x = x.atomicLoadNext(0)
  6374  	}
  6375  }
  6376  
  6377  // Len return the length of this skip set.
  6378  func (s *Uint16SetDesc) Len() int {
  6379  	return int(atomic.LoadInt64(&s.length))
  6380  }
  6381  
  6382  // Uint32Set represents a set based on skip list in ascending order.
  6383  type Uint32Set struct {
  6384  	header       *uint32Node
  6385  	length       int64
  6386  	highestLevel int64 // highest level for now
  6387  }
  6388  
  6389  type uint32Node struct {
  6390  	value uint32
  6391  	next  optionalArray // [level]*uint32Node
  6392  	mu    sync.Mutex
  6393  	flags bitflag
  6394  	level uint32
  6395  }
  6396  
  6397  func newUint32Node(value uint32, level int) *uint32Node {
  6398  	node := &uint32Node{
  6399  		value: value,
  6400  		level: uint32(level),
  6401  	}
  6402  	if level > op1 {
  6403  		node.next.extra = new([op2]unsafe.Pointer)
  6404  	}
  6405  	return node
  6406  }
  6407  
  6408  func (n *uint32Node) loadNext(i int) *uint32Node {
  6409  	return (*uint32Node)(n.next.load(i))
  6410  }
  6411  
  6412  func (n *uint32Node) storeNext(i int, node *uint32Node) {
  6413  	n.next.store(i, unsafe.Pointer(node))
  6414  }
  6415  
  6416  func (n *uint32Node) atomicLoadNext(i int) *uint32Node {
  6417  	return (*uint32Node)(n.next.atomicLoad(i))
  6418  }
  6419  
  6420  func (n *uint32Node) atomicStoreNext(i int, node *uint32Node) {
  6421  	n.next.atomicStore(i, unsafe.Pointer(node))
  6422  }
  6423  
  6424  func (n *uint32Node) lessthan(value uint32) bool {
  6425  	return n.value < value
  6426  }
  6427  
  6428  func (n *uint32Node) equal(value uint32) bool {
  6429  	return n.value == value
  6430  }
  6431  
  6432  // NewUint32 return an empty uint32 skip set in ascending order.
  6433  func NewUint32() *Uint32Set {
  6434  	h := newUint32Node(0, maxLevel)
  6435  	h.flags.SetTrue(fullyLinked)
  6436  	return &Uint32Set{
  6437  		header:       h,
  6438  		highestLevel: defaultHighestLevel,
  6439  	}
  6440  }
  6441  
  6442  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  6443  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  6444  func (s *Uint32Set) findNodeRemove(value uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int {
  6445  	// lFound represents the index of the first layer at which it found a node.
  6446  	lFound, x := -1, s.header
  6447  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6448  		succ := x.atomicLoadNext(i)
  6449  		for succ != nil && succ.lessthan(value) {
  6450  			x = succ
  6451  			succ = x.atomicLoadNext(i)
  6452  		}
  6453  		preds[i] = x
  6454  		succs[i] = succ
  6455  
  6456  		// Check if the value already in the skip list.
  6457  		if lFound == -1 && succ != nil && succ.equal(value) {
  6458  			lFound = i
  6459  		}
  6460  	}
  6461  	return lFound
  6462  }
  6463  
  6464  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  6465  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  6466  func (s *Uint32Set) findNodeAdd(value uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int {
  6467  	x := s.header
  6468  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6469  		succ := x.atomicLoadNext(i)
  6470  		for succ != nil && succ.lessthan(value) {
  6471  			x = succ
  6472  			succ = x.atomicLoadNext(i)
  6473  		}
  6474  		preds[i] = x
  6475  		succs[i] = succ
  6476  
  6477  		// Check if the value already in the skip list.
  6478  		if succ != nil && succ.equal(value) {
  6479  			return i
  6480  		}
  6481  	}
  6482  	return -1
  6483  }
  6484  
  6485  func unlockUint32(preds [maxLevel]*uint32Node, highestLevel int) {
  6486  	var prevPred *uint32Node
  6487  	for i := highestLevel; i >= 0; i-- {
  6488  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  6489  			preds[i].mu.Unlock()
  6490  			prevPred = preds[i]
  6491  		}
  6492  	}
  6493  }
  6494  
  6495  // Add add the value into skip set, return true if this process insert the value into skip set,
  6496  // return false if this process can't insert this value, because another process has insert the same value.
  6497  //
  6498  // If the value is in the skip set but not fully linked, this process will wait until it is.
  6499  func (s *Uint32Set) Add(value uint32) bool {
  6500  	level := s.randomLevel()
  6501  	var preds, succs [maxLevel]*uint32Node
  6502  	for {
  6503  		lFound := s.findNodeAdd(value, &preds, &succs)
  6504  		if lFound != -1 { // indicating the value is already in the skip-list
  6505  			nodeFound := succs[lFound]
  6506  			if !nodeFound.flags.Get(marked) {
  6507  				for !nodeFound.flags.Get(fullyLinked) {
  6508  					// The node is not yet fully linked, just waits until it is.
  6509  				}
  6510  				return false
  6511  			}
  6512  			// If the node is marked, represents some other thread is in the process of deleting this node,
  6513  			// we need to add this node in next loop.
  6514  			continue
  6515  		}
  6516  		// Add this node into skip list.
  6517  		var (
  6518  			highestLocked        = -1 // the highest level being locked by this process
  6519  			valid                = true
  6520  			pred, succ, prevPred *uint32Node
  6521  		)
  6522  		for layer := 0; valid && layer < level; layer++ {
  6523  			pred = preds[layer]   // target node's previous node
  6524  			succ = succs[layer]   // target node's next node
  6525  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6526  				pred.mu.Lock()
  6527  				highestLocked = layer
  6528  				prevPred = pred
  6529  			}
  6530  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6531  			// It is valid if:
  6532  			// 1. The previous node and next node both are not marked.
  6533  			// 2. The previous node's next node is succ in this layer.
  6534  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  6535  		}
  6536  		if !valid {
  6537  			unlockUint32(preds, highestLocked)
  6538  			continue
  6539  		}
  6540  
  6541  		nn := newUint32Node(value, level)
  6542  		for layer := 0; layer < level; layer++ {
  6543  			nn.storeNext(layer, succs[layer])
  6544  			preds[layer].atomicStoreNext(layer, nn)
  6545  		}
  6546  		nn.flags.SetTrue(fullyLinked)
  6547  		unlockUint32(preds, highestLocked)
  6548  		atomic.AddInt64(&s.length, 1)
  6549  		return true
  6550  	}
  6551  }
  6552  
  6553  func (s *Uint32Set) randomLevel() int {
  6554  	// Generate random level.
  6555  	level := randomLevel()
  6556  	// Update highest level if possible.
  6557  	for {
  6558  		hl := atomic.LoadInt64(&s.highestLevel)
  6559  		if int64(level) <= hl {
  6560  			break
  6561  		}
  6562  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  6563  			break
  6564  		}
  6565  	}
  6566  	return level
  6567  }
  6568  
  6569  // Contains check if the value is in the skip set.
  6570  func (s *Uint32Set) Contains(value uint32) bool {
  6571  	x := s.header
  6572  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6573  		nex := x.atomicLoadNext(i)
  6574  		for nex != nil && nex.lessthan(value) {
  6575  			x = nex
  6576  			nex = x.atomicLoadNext(i)
  6577  		}
  6578  
  6579  		// Check if the value already in the skip list.
  6580  		if nex != nil && nex.equal(value) {
  6581  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  6582  		}
  6583  	}
  6584  	return false
  6585  }
  6586  
  6587  // Remove a node from the skip set.
  6588  func (s *Uint32Set) Remove(value uint32) bool {
  6589  	var (
  6590  		nodeToRemove *uint32Node
  6591  		isMarked     bool // represents if this operation mark the node
  6592  		topLayer     = -1
  6593  		preds, succs [maxLevel]*uint32Node
  6594  	)
  6595  	for {
  6596  		lFound := s.findNodeRemove(value, &preds, &succs)
  6597  		if isMarked || // this process mark this node or we can find this node in the skip list
  6598  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6599  			if !isMarked { // we don't mark this node for now
  6600  				nodeToRemove = succs[lFound]
  6601  				topLayer = lFound
  6602  				nodeToRemove.mu.Lock()
  6603  				if nodeToRemove.flags.Get(marked) {
  6604  					// The node is marked by another process,
  6605  					// the physical deletion will be accomplished by another process.
  6606  					nodeToRemove.mu.Unlock()
  6607  					return false
  6608  				}
  6609  				nodeToRemove.flags.SetTrue(marked)
  6610  				isMarked = true
  6611  			}
  6612  			// Accomplish the physical deletion.
  6613  			var (
  6614  				highestLocked        = -1 // the highest level being locked by this process
  6615  				valid                = true
  6616  				pred, succ, prevPred *uint32Node
  6617  			)
  6618  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6619  				pred, succ = preds[layer], succs[layer]
  6620  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6621  					pred.mu.Lock()
  6622  					highestLocked = layer
  6623  					prevPred = pred
  6624  				}
  6625  				// valid check if there is another node has inserted into the skip list in this layer
  6626  				// during this process, or the previous is removed by another process.
  6627  				// It is valid if:
  6628  				// 1. the previous node exists.
  6629  				// 2. no another node has inserted into the skip list in this layer.
  6630  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  6631  			}
  6632  			if !valid {
  6633  				unlockUint32(preds, highestLocked)
  6634  				continue
  6635  			}
  6636  			for i := topLayer; i >= 0; i-- {
  6637  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  6638  				// So we don't need `nodeToRemove.loadNext`
  6639  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  6640  			}
  6641  			nodeToRemove.mu.Unlock()
  6642  			unlockUint32(preds, highestLocked)
  6643  			atomic.AddInt64(&s.length, -1)
  6644  			return true
  6645  		}
  6646  		return false
  6647  	}
  6648  }
  6649  
  6650  // Range calls f sequentially for each value present in the skip set.
  6651  // If f returns false, range stops the iteration.
  6652  func (s *Uint32Set) Range(f func(value uint32) bool) {
  6653  	x := s.header.atomicLoadNext(0)
  6654  	for x != nil {
  6655  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  6656  			x = x.atomicLoadNext(0)
  6657  			continue
  6658  		}
  6659  		if !f(x.value) {
  6660  			break
  6661  		}
  6662  		x = x.atomicLoadNext(0)
  6663  	}
  6664  }
  6665  
  6666  // Len return the length of this skip set.
  6667  func (s *Uint32Set) Len() int {
  6668  	return int(atomic.LoadInt64(&s.length))
  6669  }
  6670  
  6671  // Uint32SetDesc represents a set based on skip list in descending order.
  6672  type Uint32SetDesc struct {
  6673  	header       *uint32NodeDesc
  6674  	length       int64
  6675  	highestLevel int64 // highest level for now
  6676  }
  6677  
  6678  type uint32NodeDesc struct {
  6679  	value uint32
  6680  	next  optionalArray // [level]*uint32NodeDesc
  6681  	mu    sync.Mutex
  6682  	flags bitflag
  6683  	level uint32
  6684  }
  6685  
  6686  func newUint32NodeDesc(value uint32, level int) *uint32NodeDesc {
  6687  	node := &uint32NodeDesc{
  6688  		value: value,
  6689  		level: uint32(level),
  6690  	}
  6691  	if level > op1 {
  6692  		node.next.extra = new([op2]unsafe.Pointer)
  6693  	}
  6694  	return node
  6695  }
  6696  
  6697  func (n *uint32NodeDesc) loadNext(i int) *uint32NodeDesc {
  6698  	return (*uint32NodeDesc)(n.next.load(i))
  6699  }
  6700  
  6701  func (n *uint32NodeDesc) storeNext(i int, node *uint32NodeDesc) {
  6702  	n.next.store(i, unsafe.Pointer(node))
  6703  }
  6704  
  6705  func (n *uint32NodeDesc) atomicLoadNext(i int) *uint32NodeDesc {
  6706  	return (*uint32NodeDesc)(n.next.atomicLoad(i))
  6707  }
  6708  
  6709  func (n *uint32NodeDesc) atomicStoreNext(i int, node *uint32NodeDesc) {
  6710  	n.next.atomicStore(i, unsafe.Pointer(node))
  6711  }
  6712  
  6713  func (n *uint32NodeDesc) lessthan(value uint32) bool {
  6714  	return n.value > value
  6715  }
  6716  
  6717  func (n *uint32NodeDesc) equal(value uint32) bool {
  6718  	return n.value == value
  6719  }
  6720  
  6721  // NewUint32Desc return an empty uint32 skip set in descending order.
  6722  func NewUint32Desc() *Uint32SetDesc {
  6723  	h := newUint32NodeDesc(0, maxLevel)
  6724  	h.flags.SetTrue(fullyLinked)
  6725  	return &Uint32SetDesc{
  6726  		header:       h,
  6727  		highestLevel: defaultHighestLevel,
  6728  	}
  6729  }
  6730  
  6731  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  6732  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  6733  func (s *Uint32SetDesc) findNodeRemove(value uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int {
  6734  	// lFound represents the index of the first layer at which it found a node.
  6735  	lFound, x := -1, s.header
  6736  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6737  		succ := x.atomicLoadNext(i)
  6738  		for succ != nil && succ.lessthan(value) {
  6739  			x = succ
  6740  			succ = x.atomicLoadNext(i)
  6741  		}
  6742  		preds[i] = x
  6743  		succs[i] = succ
  6744  
  6745  		// Check if the value already in the skip list.
  6746  		if lFound == -1 && succ != nil && succ.equal(value) {
  6747  			lFound = i
  6748  		}
  6749  	}
  6750  	return lFound
  6751  }
  6752  
  6753  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  6754  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  6755  func (s *Uint32SetDesc) findNodeAdd(value uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int {
  6756  	x := s.header
  6757  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6758  		succ := x.atomicLoadNext(i)
  6759  		for succ != nil && succ.lessthan(value) {
  6760  			x = succ
  6761  			succ = x.atomicLoadNext(i)
  6762  		}
  6763  		preds[i] = x
  6764  		succs[i] = succ
  6765  
  6766  		// Check if the value already in the skip list.
  6767  		if succ != nil && succ.equal(value) {
  6768  			return i
  6769  		}
  6770  	}
  6771  	return -1
  6772  }
  6773  
  6774  func unlockUint32Desc(preds [maxLevel]*uint32NodeDesc, highestLevel int) {
  6775  	var prevPred *uint32NodeDesc
  6776  	for i := highestLevel; i >= 0; i-- {
  6777  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  6778  			preds[i].mu.Unlock()
  6779  			prevPred = preds[i]
  6780  		}
  6781  	}
  6782  }
  6783  
  6784  // Add add the value into skip set, return true if this process insert the value into skip set,
  6785  // return false if this process can't insert this value, because another process has insert the same value.
  6786  //
  6787  // If the value is in the skip set but not fully linked, this process will wait until it is.
  6788  func (s *Uint32SetDesc) Add(value uint32) bool {
  6789  	level := s.randomLevel()
  6790  	var preds, succs [maxLevel]*uint32NodeDesc
  6791  	for {
  6792  		lFound := s.findNodeAdd(value, &preds, &succs)
  6793  		if lFound != -1 { // indicating the value is already in the skip-list
  6794  			nodeFound := succs[lFound]
  6795  			if !nodeFound.flags.Get(marked) {
  6796  				for !nodeFound.flags.Get(fullyLinked) {
  6797  					// The node is not yet fully linked, just waits until it is.
  6798  				}
  6799  				return false
  6800  			}
  6801  			// If the node is marked, represents some other thread is in the process of deleting this node,
  6802  			// we need to add this node in next loop.
  6803  			continue
  6804  		}
  6805  		// Add this node into skip list.
  6806  		var (
  6807  			highestLocked        = -1 // the highest level being locked by this process
  6808  			valid                = true
  6809  			pred, succ, prevPred *uint32NodeDesc
  6810  		)
  6811  		for layer := 0; valid && layer < level; layer++ {
  6812  			pred = preds[layer]   // target node's previous node
  6813  			succ = succs[layer]   // target node's next node
  6814  			if pred != prevPred { // the node in this layer could be locked by previous loop
  6815  				pred.mu.Lock()
  6816  				highestLocked = layer
  6817  				prevPred = pred
  6818  			}
  6819  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  6820  			// It is valid if:
  6821  			// 1. The previous node and next node both are not marked.
  6822  			// 2. The previous node's next node is succ in this layer.
  6823  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  6824  		}
  6825  		if !valid {
  6826  			unlockUint32Desc(preds, highestLocked)
  6827  			continue
  6828  		}
  6829  
  6830  		nn := newUint32NodeDesc(value, level)
  6831  		for layer := 0; layer < level; layer++ {
  6832  			nn.storeNext(layer, succs[layer])
  6833  			preds[layer].atomicStoreNext(layer, nn)
  6834  		}
  6835  		nn.flags.SetTrue(fullyLinked)
  6836  		unlockUint32Desc(preds, highestLocked)
  6837  		atomic.AddInt64(&s.length, 1)
  6838  		return true
  6839  	}
  6840  }
  6841  
  6842  func (s *Uint32SetDesc) randomLevel() int {
  6843  	// Generate random level.
  6844  	level := randomLevel()
  6845  	// Update highest level if possible.
  6846  	for {
  6847  		hl := atomic.LoadInt64(&s.highestLevel)
  6848  		if int64(level) <= hl {
  6849  			break
  6850  		}
  6851  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  6852  			break
  6853  		}
  6854  	}
  6855  	return level
  6856  }
  6857  
  6858  // Contains check if the value is in the skip set.
  6859  func (s *Uint32SetDesc) Contains(value uint32) bool {
  6860  	x := s.header
  6861  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  6862  		nex := x.atomicLoadNext(i)
  6863  		for nex != nil && nex.lessthan(value) {
  6864  			x = nex
  6865  			nex = x.atomicLoadNext(i)
  6866  		}
  6867  
  6868  		// Check if the value already in the skip list.
  6869  		if nex != nil && nex.equal(value) {
  6870  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  6871  		}
  6872  	}
  6873  	return false
  6874  }
  6875  
  6876  // Remove a node from the skip set.
  6877  func (s *Uint32SetDesc) Remove(value uint32) bool {
  6878  	var (
  6879  		nodeToRemove *uint32NodeDesc
  6880  		isMarked     bool // represents if this operation mark the node
  6881  		topLayer     = -1
  6882  		preds, succs [maxLevel]*uint32NodeDesc
  6883  	)
  6884  	for {
  6885  		lFound := s.findNodeRemove(value, &preds, &succs)
  6886  		if isMarked || // this process mark this node or we can find this node in the skip list
  6887  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  6888  			if !isMarked { // we don't mark this node for now
  6889  				nodeToRemove = succs[lFound]
  6890  				topLayer = lFound
  6891  				nodeToRemove.mu.Lock()
  6892  				if nodeToRemove.flags.Get(marked) {
  6893  					// The node is marked by another process,
  6894  					// the physical deletion will be accomplished by another process.
  6895  					nodeToRemove.mu.Unlock()
  6896  					return false
  6897  				}
  6898  				nodeToRemove.flags.SetTrue(marked)
  6899  				isMarked = true
  6900  			}
  6901  			// Accomplish the physical deletion.
  6902  			var (
  6903  				highestLocked        = -1 // the highest level being locked by this process
  6904  				valid                = true
  6905  				pred, succ, prevPred *uint32NodeDesc
  6906  			)
  6907  			for layer := 0; valid && (layer <= topLayer); layer++ {
  6908  				pred, succ = preds[layer], succs[layer]
  6909  				if pred != prevPred { // the node in this layer could be locked by previous loop
  6910  					pred.mu.Lock()
  6911  					highestLocked = layer
  6912  					prevPred = pred
  6913  				}
  6914  				// valid check if there is another node has inserted into the skip list in this layer
  6915  				// during this process, or the previous is removed by another process.
  6916  				// It is valid if:
  6917  				// 1. the previous node exists.
  6918  				// 2. no another node has inserted into the skip list in this layer.
  6919  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  6920  			}
  6921  			if !valid {
  6922  				unlockUint32Desc(preds, highestLocked)
  6923  				continue
  6924  			}
  6925  			for i := topLayer; i >= 0; i-- {
  6926  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  6927  				// So we don't need `nodeToRemove.loadNext`
  6928  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  6929  			}
  6930  			nodeToRemove.mu.Unlock()
  6931  			unlockUint32Desc(preds, highestLocked)
  6932  			atomic.AddInt64(&s.length, -1)
  6933  			return true
  6934  		}
  6935  		return false
  6936  	}
  6937  }
  6938  
  6939  // Range calls f sequentially for each value present in the skip set.
  6940  // If f returns false, range stops the iteration.
  6941  func (s *Uint32SetDesc) Range(f func(value uint32) bool) {
  6942  	x := s.header.atomicLoadNext(0)
  6943  	for x != nil {
  6944  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  6945  			x = x.atomicLoadNext(0)
  6946  			continue
  6947  		}
  6948  		if !f(x.value) {
  6949  			break
  6950  		}
  6951  		x = x.atomicLoadNext(0)
  6952  	}
  6953  }
  6954  
  6955  // Len return the length of this skip set.
  6956  func (s *Uint32SetDesc) Len() int {
  6957  	return int(atomic.LoadInt64(&s.length))
  6958  }
  6959  
  6960  // Uint64Set represents a set based on skip list in ascending order.
  6961  type Uint64Set struct {
  6962  	header       *uint64Node
  6963  	length       int64
  6964  	highestLevel int64 // highest level for now
  6965  }
  6966  
  6967  type uint64Node struct {
  6968  	value uint64
  6969  	next  optionalArray // [level]*uint64Node
  6970  	mu    sync.Mutex
  6971  	flags bitflag
  6972  	level uint32
  6973  }
  6974  
  6975  func newUuint64Node(value uint64, level int) *uint64Node {
  6976  	node := &uint64Node{
  6977  		value: value,
  6978  		level: uint32(level),
  6979  	}
  6980  	if level > op1 {
  6981  		node.next.extra = new([op2]unsafe.Pointer)
  6982  	}
  6983  	return node
  6984  }
  6985  
  6986  func (n *uint64Node) loadNext(i int) *uint64Node {
  6987  	return (*uint64Node)(n.next.load(i))
  6988  }
  6989  
  6990  func (n *uint64Node) storeNext(i int, node *uint64Node) {
  6991  	n.next.store(i, unsafe.Pointer(node))
  6992  }
  6993  
  6994  func (n *uint64Node) atomicLoadNext(i int) *uint64Node {
  6995  	return (*uint64Node)(n.next.atomicLoad(i))
  6996  }
  6997  
  6998  func (n *uint64Node) atomicStoreNext(i int, node *uint64Node) {
  6999  	n.next.atomicStore(i, unsafe.Pointer(node))
  7000  }
  7001  
  7002  func (n *uint64Node) lessthan(value uint64) bool {
  7003  	return n.value < value
  7004  }
  7005  
  7006  func (n *uint64Node) equal(value uint64) bool {
  7007  	return n.value == value
  7008  }
  7009  
  7010  // NewUint64 return an empty uint64 skip set in ascending order.
  7011  func NewUint64() *Uint64Set {
  7012  	h := newUuint64Node(0, maxLevel)
  7013  	h.flags.SetTrue(fullyLinked)
  7014  	return &Uint64Set{
  7015  		header:       h,
  7016  		highestLevel: defaultHighestLevel,
  7017  	}
  7018  }
  7019  
  7020  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  7021  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7022  func (s *Uint64Set) findNodeRemove(value uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int {
  7023  	// lFound represents the index of the first layer at which it found a node.
  7024  	lFound, x := -1, s.header
  7025  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7026  		succ := x.atomicLoadNext(i)
  7027  		for succ != nil && succ.lessthan(value) {
  7028  			x = succ
  7029  			succ = x.atomicLoadNext(i)
  7030  		}
  7031  		preds[i] = x
  7032  		succs[i] = succ
  7033  
  7034  		// Check if the value already in the skip list.
  7035  		if lFound == -1 && succ != nil && succ.equal(value) {
  7036  			lFound = i
  7037  		}
  7038  	}
  7039  	return lFound
  7040  }
  7041  
  7042  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  7043  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7044  func (s *Uint64Set) findNodeAdd(value uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int {
  7045  	x := s.header
  7046  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7047  		succ := x.atomicLoadNext(i)
  7048  		for succ != nil && succ.lessthan(value) {
  7049  			x = succ
  7050  			succ = x.atomicLoadNext(i)
  7051  		}
  7052  		preds[i] = x
  7053  		succs[i] = succ
  7054  
  7055  		// Check if the value already in the skip list.
  7056  		if succ != nil && succ.equal(value) {
  7057  			return i
  7058  		}
  7059  	}
  7060  	return -1
  7061  }
  7062  
  7063  func unlockUint64(preds [maxLevel]*uint64Node, highestLevel int) {
  7064  	var prevPred *uint64Node
  7065  	for i := highestLevel; i >= 0; i-- {
  7066  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  7067  			preds[i].mu.Unlock()
  7068  			prevPred = preds[i]
  7069  		}
  7070  	}
  7071  }
  7072  
  7073  // Add add the value into skip set, return true if this process insert the value into skip set,
  7074  // return false if this process can't insert this value, because another process has insert the same value.
  7075  //
  7076  // If the value is in the skip set but not fully linked, this process will wait until it is.
  7077  func (s *Uint64Set) Add(value uint64) bool {
  7078  	level := s.randomLevel()
  7079  	var preds, succs [maxLevel]*uint64Node
  7080  	for {
  7081  		lFound := s.findNodeAdd(value, &preds, &succs)
  7082  		if lFound != -1 { // indicating the value is already in the skip-list
  7083  			nodeFound := succs[lFound]
  7084  			if !nodeFound.flags.Get(marked) {
  7085  				for !nodeFound.flags.Get(fullyLinked) {
  7086  					// The node is not yet fully linked, just waits until it is.
  7087  				}
  7088  				return false
  7089  			}
  7090  			// If the node is marked, represents some other thread is in the process of deleting this node,
  7091  			// we need to add this node in next loop.
  7092  			continue
  7093  		}
  7094  		// Add this node into skip list.
  7095  		var (
  7096  			highestLocked        = -1 // the highest level being locked by this process
  7097  			valid                = true
  7098  			pred, succ, prevPred *uint64Node
  7099  		)
  7100  		for layer := 0; valid && layer < level; layer++ {
  7101  			pred = preds[layer]   // target node's previous node
  7102  			succ = succs[layer]   // target node's next node
  7103  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7104  				pred.mu.Lock()
  7105  				highestLocked = layer
  7106  				prevPred = pred
  7107  			}
  7108  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7109  			// It is valid if:
  7110  			// 1. The previous node and next node both are not marked.
  7111  			// 2. The previous node's next node is succ in this layer.
  7112  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7113  		}
  7114  		if !valid {
  7115  			unlockUint64(preds, highestLocked)
  7116  			continue
  7117  		}
  7118  
  7119  		nn := newUuint64Node(value, level)
  7120  		for layer := 0; layer < level; layer++ {
  7121  			nn.storeNext(layer, succs[layer])
  7122  			preds[layer].atomicStoreNext(layer, nn)
  7123  		}
  7124  		nn.flags.SetTrue(fullyLinked)
  7125  		unlockUint64(preds, highestLocked)
  7126  		atomic.AddInt64(&s.length, 1)
  7127  		return true
  7128  	}
  7129  }
  7130  
  7131  func (s *Uint64Set) randomLevel() int {
  7132  	// Generate random level.
  7133  	level := randomLevel()
  7134  	// Update highest level if possible.
  7135  	for {
  7136  		hl := atomic.LoadInt64(&s.highestLevel)
  7137  		if int64(level) <= hl {
  7138  			break
  7139  		}
  7140  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  7141  			break
  7142  		}
  7143  	}
  7144  	return level
  7145  }
  7146  
  7147  // Contains check if the value is in the skip set.
  7148  func (s *Uint64Set) Contains(value uint64) bool {
  7149  	x := s.header
  7150  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7151  		nex := x.atomicLoadNext(i)
  7152  		for nex != nil && nex.lessthan(value) {
  7153  			x = nex
  7154  			nex = x.atomicLoadNext(i)
  7155  		}
  7156  
  7157  		// Check if the value already in the skip list.
  7158  		if nex != nil && nex.equal(value) {
  7159  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  7160  		}
  7161  	}
  7162  	return false
  7163  }
  7164  
  7165  // Remove a node from the skip set.
  7166  func (s *Uint64Set) Remove(value uint64) bool {
  7167  	var (
  7168  		nodeToRemove *uint64Node
  7169  		isMarked     bool // represents if this operation mark the node
  7170  		topLayer     = -1
  7171  		preds, succs [maxLevel]*uint64Node
  7172  	)
  7173  	for {
  7174  		lFound := s.findNodeRemove(value, &preds, &succs)
  7175  		if isMarked || // this process mark this node or we can find this node in the skip list
  7176  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  7177  			if !isMarked { // we don't mark this node for now
  7178  				nodeToRemove = succs[lFound]
  7179  				topLayer = lFound
  7180  				nodeToRemove.mu.Lock()
  7181  				if nodeToRemove.flags.Get(marked) {
  7182  					// The node is marked by another process,
  7183  					// the physical deletion will be accomplished by another process.
  7184  					nodeToRemove.mu.Unlock()
  7185  					return false
  7186  				}
  7187  				nodeToRemove.flags.SetTrue(marked)
  7188  				isMarked = true
  7189  			}
  7190  			// Accomplish the physical deletion.
  7191  			var (
  7192  				highestLocked        = -1 // the highest level being locked by this process
  7193  				valid                = true
  7194  				pred, succ, prevPred *uint64Node
  7195  			)
  7196  			for layer := 0; valid && (layer <= topLayer); layer++ {
  7197  				pred, succ = preds[layer], succs[layer]
  7198  				if pred != prevPred { // the node in this layer could be locked by previous loop
  7199  					pred.mu.Lock()
  7200  					highestLocked = layer
  7201  					prevPred = pred
  7202  				}
  7203  				// valid check if there is another node has inserted into the skip list in this layer
  7204  				// during this process, or the previous is removed by another process.
  7205  				// It is valid if:
  7206  				// 1. the previous node exists.
  7207  				// 2. no another node has inserted into the skip list in this layer.
  7208  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  7209  			}
  7210  			if !valid {
  7211  				unlockUint64(preds, highestLocked)
  7212  				continue
  7213  			}
  7214  			for i := topLayer; i >= 0; i-- {
  7215  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  7216  				// So we don't need `nodeToRemove.loadNext`
  7217  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  7218  			}
  7219  			nodeToRemove.mu.Unlock()
  7220  			unlockUint64(preds, highestLocked)
  7221  			atomic.AddInt64(&s.length, -1)
  7222  			return true
  7223  		}
  7224  		return false
  7225  	}
  7226  }
  7227  
  7228  // Range calls f sequentially for each value present in the skip set.
  7229  // If f returns false, range stops the iteration.
  7230  func (s *Uint64Set) Range(f func(value uint64) bool) {
  7231  	x := s.header.atomicLoadNext(0)
  7232  	for x != nil {
  7233  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  7234  			x = x.atomicLoadNext(0)
  7235  			continue
  7236  		}
  7237  		if !f(x.value) {
  7238  			break
  7239  		}
  7240  		x = x.atomicLoadNext(0)
  7241  	}
  7242  }
  7243  
  7244  // Len return the length of this skip set.
  7245  func (s *Uint64Set) Len() int {
  7246  	return int(atomic.LoadInt64(&s.length))
  7247  }
  7248  
  7249  // Uint64SetDesc represents a set based on skip list in descending order.
  7250  type Uint64SetDesc struct {
  7251  	header       *uint64NodeDesc
  7252  	length       int64
  7253  	highestLevel int64 // highest level for now
  7254  }
  7255  
  7256  type uint64NodeDesc struct {
  7257  	value uint64
  7258  	next  optionalArray // [level]*uint64NodeDesc
  7259  	mu    sync.Mutex
  7260  	flags bitflag
  7261  	level uint32
  7262  }
  7263  
  7264  func newUuint64NodeDescDesc(value uint64, level int) *uint64NodeDesc {
  7265  	node := &uint64NodeDesc{
  7266  		value: value,
  7267  		level: uint32(level),
  7268  	}
  7269  	if level > op1 {
  7270  		node.next.extra = new([op2]unsafe.Pointer)
  7271  	}
  7272  	return node
  7273  }
  7274  
  7275  func (n *uint64NodeDesc) loadNext(i int) *uint64NodeDesc {
  7276  	return (*uint64NodeDesc)(n.next.load(i))
  7277  }
  7278  
  7279  func (n *uint64NodeDesc) storeNext(i int, node *uint64NodeDesc) {
  7280  	n.next.store(i, unsafe.Pointer(node))
  7281  }
  7282  
  7283  func (n *uint64NodeDesc) atomicLoadNext(i int) *uint64NodeDesc {
  7284  	return (*uint64NodeDesc)(n.next.atomicLoad(i))
  7285  }
  7286  
  7287  func (n *uint64NodeDesc) atomicStoreNext(i int, node *uint64NodeDesc) {
  7288  	n.next.atomicStore(i, unsafe.Pointer(node))
  7289  }
  7290  
  7291  func (n *uint64NodeDesc) lessthan(value uint64) bool {
  7292  	return n.value > value
  7293  }
  7294  
  7295  func (n *uint64NodeDesc) equal(value uint64) bool {
  7296  	return n.value == value
  7297  }
  7298  
  7299  // NewUint64Desc return an empty uint64 skip set in descending order.
  7300  func NewUint64Desc() *Uint64SetDesc {
  7301  	h := newUuint64NodeDescDesc(0, maxLevel)
  7302  	h.flags.SetTrue(fullyLinked)
  7303  	return &Uint64SetDesc{
  7304  		header:       h,
  7305  		highestLevel: defaultHighestLevel,
  7306  	}
  7307  }
  7308  
  7309  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  7310  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7311  func (s *Uint64SetDesc) findNodeRemove(value uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int {
  7312  	// lFound represents the index of the first layer at which it found a node.
  7313  	lFound, x := -1, s.header
  7314  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7315  		succ := x.atomicLoadNext(i)
  7316  		for succ != nil && succ.lessthan(value) {
  7317  			x = succ
  7318  			succ = x.atomicLoadNext(i)
  7319  		}
  7320  		preds[i] = x
  7321  		succs[i] = succ
  7322  
  7323  		// Check if the value already in the skip list.
  7324  		if lFound == -1 && succ != nil && succ.equal(value) {
  7325  			lFound = i
  7326  		}
  7327  	}
  7328  	return lFound
  7329  }
  7330  
  7331  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  7332  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7333  func (s *Uint64SetDesc) findNodeAdd(value uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int {
  7334  	x := s.header
  7335  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7336  		succ := x.atomicLoadNext(i)
  7337  		for succ != nil && succ.lessthan(value) {
  7338  			x = succ
  7339  			succ = x.atomicLoadNext(i)
  7340  		}
  7341  		preds[i] = x
  7342  		succs[i] = succ
  7343  
  7344  		// Check if the value already in the skip list.
  7345  		if succ != nil && succ.equal(value) {
  7346  			return i
  7347  		}
  7348  	}
  7349  	return -1
  7350  }
  7351  
  7352  func unlockUint64Desc(preds [maxLevel]*uint64NodeDesc, highestLevel int) {
  7353  	var prevPred *uint64NodeDesc
  7354  	for i := highestLevel; i >= 0; i-- {
  7355  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  7356  			preds[i].mu.Unlock()
  7357  			prevPred = preds[i]
  7358  		}
  7359  	}
  7360  }
  7361  
  7362  // Add add the value into skip set, return true if this process insert the value into skip set,
  7363  // return false if this process can't insert this value, because another process has insert the same value.
  7364  //
  7365  // If the value is in the skip set but not fully linked, this process will wait until it is.
  7366  func (s *Uint64SetDesc) Add(value uint64) bool {
  7367  	level := s.randomLevel()
  7368  	var preds, succs [maxLevel]*uint64NodeDesc
  7369  	for {
  7370  		lFound := s.findNodeAdd(value, &preds, &succs)
  7371  		if lFound != -1 { // indicating the value is already in the skip-list
  7372  			nodeFound := succs[lFound]
  7373  			if !nodeFound.flags.Get(marked) {
  7374  				for !nodeFound.flags.Get(fullyLinked) {
  7375  					// The node is not yet fully linked, just waits until it is.
  7376  				}
  7377  				return false
  7378  			}
  7379  			// If the node is marked, represents some other thread is in the process of deleting this node,
  7380  			// we need to add this node in next loop.
  7381  			continue
  7382  		}
  7383  		// Add this node into skip list.
  7384  		var (
  7385  			highestLocked        = -1 // the highest level being locked by this process
  7386  			valid                = true
  7387  			pred, succ, prevPred *uint64NodeDesc
  7388  		)
  7389  		for layer := 0; valid && layer < level; layer++ {
  7390  			pred = preds[layer]   // target node's previous node
  7391  			succ = succs[layer]   // target node's next node
  7392  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7393  				pred.mu.Lock()
  7394  				highestLocked = layer
  7395  				prevPred = pred
  7396  			}
  7397  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7398  			// It is valid if:
  7399  			// 1. The previous node and next node both are not marked.
  7400  			// 2. The previous node's next node is succ in this layer.
  7401  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7402  		}
  7403  		if !valid {
  7404  			unlockUint64Desc(preds, highestLocked)
  7405  			continue
  7406  		}
  7407  
  7408  		nn := newUuint64NodeDescDesc(value, level)
  7409  		for layer := 0; layer < level; layer++ {
  7410  			nn.storeNext(layer, succs[layer])
  7411  			preds[layer].atomicStoreNext(layer, nn)
  7412  		}
  7413  		nn.flags.SetTrue(fullyLinked)
  7414  		unlockUint64Desc(preds, highestLocked)
  7415  		atomic.AddInt64(&s.length, 1)
  7416  		return true
  7417  	}
  7418  }
  7419  
  7420  func (s *Uint64SetDesc) randomLevel() int {
  7421  	// Generate random level.
  7422  	level := randomLevel()
  7423  	// Update highest level if possible.
  7424  	for {
  7425  		hl := atomic.LoadInt64(&s.highestLevel)
  7426  		if int64(level) <= hl {
  7427  			break
  7428  		}
  7429  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  7430  			break
  7431  		}
  7432  	}
  7433  	return level
  7434  }
  7435  
  7436  // Contains check if the value is in the skip set.
  7437  func (s *Uint64SetDesc) Contains(value uint64) bool {
  7438  	x := s.header
  7439  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7440  		nex := x.atomicLoadNext(i)
  7441  		for nex != nil && nex.lessthan(value) {
  7442  			x = nex
  7443  			nex = x.atomicLoadNext(i)
  7444  		}
  7445  
  7446  		// Check if the value already in the skip list.
  7447  		if nex != nil && nex.equal(value) {
  7448  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  7449  		}
  7450  	}
  7451  	return false
  7452  }
  7453  
  7454  // Remove a node from the skip set.
  7455  func (s *Uint64SetDesc) Remove(value uint64) bool {
  7456  	var (
  7457  		nodeToRemove *uint64NodeDesc
  7458  		isMarked     bool // represents if this operation mark the node
  7459  		topLayer     = -1
  7460  		preds, succs [maxLevel]*uint64NodeDesc
  7461  	)
  7462  	for {
  7463  		lFound := s.findNodeRemove(value, &preds, &succs)
  7464  		if isMarked || // this process mark this node or we can find this node in the skip list
  7465  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  7466  			if !isMarked { // we don't mark this node for now
  7467  				nodeToRemove = succs[lFound]
  7468  				topLayer = lFound
  7469  				nodeToRemove.mu.Lock()
  7470  				if nodeToRemove.flags.Get(marked) {
  7471  					// The node is marked by another process,
  7472  					// the physical deletion will be accomplished by another process.
  7473  					nodeToRemove.mu.Unlock()
  7474  					return false
  7475  				}
  7476  				nodeToRemove.flags.SetTrue(marked)
  7477  				isMarked = true
  7478  			}
  7479  			// Accomplish the physical deletion.
  7480  			var (
  7481  				highestLocked        = -1 // the highest level being locked by this process
  7482  				valid                = true
  7483  				pred, succ, prevPred *uint64NodeDesc
  7484  			)
  7485  			for layer := 0; valid && (layer <= topLayer); layer++ {
  7486  				pred, succ = preds[layer], succs[layer]
  7487  				if pred != prevPred { // the node in this layer could be locked by previous loop
  7488  					pred.mu.Lock()
  7489  					highestLocked = layer
  7490  					prevPred = pred
  7491  				}
  7492  				// valid check if there is another node has inserted into the skip list in this layer
  7493  				// during this process, or the previous is removed by another process.
  7494  				// It is valid if:
  7495  				// 1. the previous node exists.
  7496  				// 2. no another node has inserted into the skip list in this layer.
  7497  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  7498  			}
  7499  			if !valid {
  7500  				unlockUint64Desc(preds, highestLocked)
  7501  				continue
  7502  			}
  7503  			for i := topLayer; i >= 0; i-- {
  7504  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  7505  				// So we don't need `nodeToRemove.loadNext`
  7506  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  7507  			}
  7508  			nodeToRemove.mu.Unlock()
  7509  			unlockUint64Desc(preds, highestLocked)
  7510  			atomic.AddInt64(&s.length, -1)
  7511  			return true
  7512  		}
  7513  		return false
  7514  	}
  7515  }
  7516  
  7517  // Range calls f sequentially for each value present in the skip set.
  7518  // If f returns false, range stops the iteration.
  7519  func (s *Uint64SetDesc) Range(f func(value uint64) bool) {
  7520  	x := s.header.atomicLoadNext(0)
  7521  	for x != nil {
  7522  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  7523  			x = x.atomicLoadNext(0)
  7524  			continue
  7525  		}
  7526  		if !f(x.value) {
  7527  			break
  7528  		}
  7529  		x = x.atomicLoadNext(0)
  7530  	}
  7531  }
  7532  
  7533  // Len return the length of this skip set.
  7534  func (s *Uint64SetDesc) Len() int {
  7535  	return int(atomic.LoadInt64(&s.length))
  7536  }
  7537  
  7538  // UintptrSet represents a set based on skip list in ascending order.
  7539  type UintptrSet struct {
  7540  	header       *uintptrNode
  7541  	length       int64
  7542  	highestLevel int64 // highest level for now
  7543  }
  7544  
  7545  type uintptrNode struct {
  7546  	value uintptr
  7547  	next  optionalArray // [level]*uintptrNode
  7548  	mu    sync.Mutex
  7549  	flags bitflag
  7550  	level uint32
  7551  }
  7552  
  7553  func newUintptrNode(value uintptr, level int) *uintptrNode {
  7554  	node := &uintptrNode{
  7555  		value: value,
  7556  		level: uint32(level),
  7557  	}
  7558  	if level > op1 {
  7559  		node.next.extra = new([op2]unsafe.Pointer)
  7560  	}
  7561  	return node
  7562  }
  7563  
  7564  func (n *uintptrNode) loadNext(i int) *uintptrNode {
  7565  	return (*uintptrNode)(n.next.load(i))
  7566  }
  7567  
  7568  func (n *uintptrNode) storeNext(i int, node *uintptrNode) {
  7569  	n.next.store(i, unsafe.Pointer(node))
  7570  }
  7571  
  7572  func (n *uintptrNode) atomicLoadNext(i int) *uintptrNode {
  7573  	return (*uintptrNode)(n.next.atomicLoad(i))
  7574  }
  7575  
  7576  func (n *uintptrNode) atomicStoreNext(i int, node *uintptrNode) {
  7577  	n.next.atomicStore(i, unsafe.Pointer(node))
  7578  }
  7579  
  7580  func (n *uintptrNode) lessthan(value uintptr) bool {
  7581  	return n.value < value
  7582  }
  7583  
  7584  func (n *uintptrNode) equal(value uintptr) bool {
  7585  	return n.value == value
  7586  }
  7587  
  7588  // NewUintptr return an empty uintptr skip set in ascending order.
  7589  func NewUintptr() *UintptrSet {
  7590  	h := newUintptrNode(0, maxLevel)
  7591  	h.flags.SetTrue(fullyLinked)
  7592  	return &UintptrSet{
  7593  		header:       h,
  7594  		highestLevel: defaultHighestLevel,
  7595  	}
  7596  }
  7597  
  7598  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  7599  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7600  func (s *UintptrSet) findNodeRemove(value uintptr, preds *[maxLevel]*uintptrNode, succs *[maxLevel]*uintptrNode) int {
  7601  	// lFound represents the index of the first layer at which it found a node.
  7602  	lFound, x := -1, s.header
  7603  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7604  		succ := x.atomicLoadNext(i)
  7605  		for succ != nil && succ.lessthan(value) {
  7606  			x = succ
  7607  			succ = x.atomicLoadNext(i)
  7608  		}
  7609  		preds[i] = x
  7610  		succs[i] = succ
  7611  
  7612  		// Check if the value already in the skip list.
  7613  		if lFound == -1 && succ != nil && succ.equal(value) {
  7614  			lFound = i
  7615  		}
  7616  	}
  7617  	return lFound
  7618  }
  7619  
  7620  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  7621  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7622  func (s *UintptrSet) findNodeAdd(value uintptr, preds *[maxLevel]*uintptrNode, succs *[maxLevel]*uintptrNode) int {
  7623  	x := s.header
  7624  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7625  		succ := x.atomicLoadNext(i)
  7626  		for succ != nil && succ.lessthan(value) {
  7627  			x = succ
  7628  			succ = x.atomicLoadNext(i)
  7629  		}
  7630  		preds[i] = x
  7631  		succs[i] = succ
  7632  
  7633  		// Check if the value already in the skip list.
  7634  		if succ != nil && succ.equal(value) {
  7635  			return i
  7636  		}
  7637  	}
  7638  	return -1
  7639  }
  7640  
  7641  func unlockUintptr(preds [maxLevel]*uintptrNode, highestLevel int) {
  7642  	var prevPred *uintptrNode
  7643  	for i := highestLevel; i >= 0; i-- {
  7644  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  7645  			preds[i].mu.Unlock()
  7646  			prevPred = preds[i]
  7647  		}
  7648  	}
  7649  }
  7650  
  7651  // Add add the value into skip set, return true if this process insert the value into skip set,
  7652  // return false if this process can't insert this value, because another process has insert the same value.
  7653  //
  7654  // If the value is in the skip set but not fully linked, this process will wait until it is.
  7655  func (s *UintptrSet) Add(value uintptr) bool {
  7656  	level := s.randomLevel()
  7657  	var preds, succs [maxLevel]*uintptrNode
  7658  	for {
  7659  		lFound := s.findNodeAdd(value, &preds, &succs)
  7660  		if lFound != -1 { // indicating the value is already in the skip-list
  7661  			nodeFound := succs[lFound]
  7662  			if !nodeFound.flags.Get(marked) {
  7663  				for !nodeFound.flags.Get(fullyLinked) {
  7664  					// The node is not yet fully linked, just waits until it is.
  7665  				}
  7666  				return false
  7667  			}
  7668  			// If the node is marked, represents some other thread is in the process of deleting this node,
  7669  			// we need to add this node in next loop.
  7670  			continue
  7671  		}
  7672  		// Add this node into skip list.
  7673  		var (
  7674  			highestLocked        = -1 // the highest level being locked by this process
  7675  			valid                = true
  7676  			pred, succ, prevPred *uintptrNode
  7677  		)
  7678  		for layer := 0; valid && layer < level; layer++ {
  7679  			pred = preds[layer]   // target node's previous node
  7680  			succ = succs[layer]   // target node's next node
  7681  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7682  				pred.mu.Lock()
  7683  				highestLocked = layer
  7684  				prevPred = pred
  7685  			}
  7686  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7687  			// It is valid if:
  7688  			// 1. The previous node and next node both are not marked.
  7689  			// 2. The previous node's next node is succ in this layer.
  7690  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7691  		}
  7692  		if !valid {
  7693  			unlockUintptr(preds, highestLocked)
  7694  			continue
  7695  		}
  7696  
  7697  		nn := newUintptrNode(value, level)
  7698  		for layer := 0; layer < level; layer++ {
  7699  			nn.storeNext(layer, succs[layer])
  7700  			preds[layer].atomicStoreNext(layer, nn)
  7701  		}
  7702  		nn.flags.SetTrue(fullyLinked)
  7703  		unlockUintptr(preds, highestLocked)
  7704  		atomic.AddInt64(&s.length, 1)
  7705  		return true
  7706  	}
  7707  }
  7708  
  7709  func (s *UintptrSet) randomLevel() int {
  7710  	// Generate random level.
  7711  	level := randomLevel()
  7712  	// Update highest level if possible.
  7713  	for {
  7714  		hl := atomic.LoadInt64(&s.highestLevel)
  7715  		if int64(level) <= hl {
  7716  			break
  7717  		}
  7718  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  7719  			break
  7720  		}
  7721  	}
  7722  	return level
  7723  }
  7724  
  7725  // Contains check if the value is in the skip set.
  7726  func (s *UintptrSet) Contains(value uintptr) bool {
  7727  	x := s.header
  7728  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7729  		nex := x.atomicLoadNext(i)
  7730  		for nex != nil && nex.lessthan(value) {
  7731  			x = nex
  7732  			nex = x.atomicLoadNext(i)
  7733  		}
  7734  
  7735  		// Check if the value already in the skip list.
  7736  		if nex != nil && nex.equal(value) {
  7737  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  7738  		}
  7739  	}
  7740  	return false
  7741  }
  7742  
  7743  // Remove a node from the skip set.
  7744  func (s *UintptrSet) Remove(value uintptr) bool {
  7745  	var (
  7746  		nodeToRemove *uintptrNode
  7747  		isMarked     bool // represents if this operation mark the node
  7748  		topLayer     = -1
  7749  		preds, succs [maxLevel]*uintptrNode
  7750  	)
  7751  	for {
  7752  		lFound := s.findNodeRemove(value, &preds, &succs)
  7753  		if isMarked || // this process mark this node or we can find this node in the skip list
  7754  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  7755  			if !isMarked { // we don't mark this node for now
  7756  				nodeToRemove = succs[lFound]
  7757  				topLayer = lFound
  7758  				nodeToRemove.mu.Lock()
  7759  				if nodeToRemove.flags.Get(marked) {
  7760  					// The node is marked by another process,
  7761  					// the physical deletion will be accomplished by another process.
  7762  					nodeToRemove.mu.Unlock()
  7763  					return false
  7764  				}
  7765  				nodeToRemove.flags.SetTrue(marked)
  7766  				isMarked = true
  7767  			}
  7768  			// Accomplish the physical deletion.
  7769  			var (
  7770  				highestLocked        = -1 // the highest level being locked by this process
  7771  				valid                = true
  7772  				pred, succ, prevPred *uintptrNode
  7773  			)
  7774  			for layer := 0; valid && (layer <= topLayer); layer++ {
  7775  				pred, succ = preds[layer], succs[layer]
  7776  				if pred != prevPred { // the node in this layer could be locked by previous loop
  7777  					pred.mu.Lock()
  7778  					highestLocked = layer
  7779  					prevPred = pred
  7780  				}
  7781  				// valid check if there is another node has inserted into the skip list in this layer
  7782  				// during this process, or the previous is removed by another process.
  7783  				// It is valid if:
  7784  				// 1. the previous node exists.
  7785  				// 2. no another node has inserted into the skip list in this layer.
  7786  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  7787  			}
  7788  			if !valid {
  7789  				unlockUintptr(preds, highestLocked)
  7790  				continue
  7791  			}
  7792  			for i := topLayer; i >= 0; i-- {
  7793  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  7794  				// So we don't need `nodeToRemove.loadNext`
  7795  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  7796  			}
  7797  			nodeToRemove.mu.Unlock()
  7798  			unlockUintptr(preds, highestLocked)
  7799  			atomic.AddInt64(&s.length, -1)
  7800  			return true
  7801  		}
  7802  		return false
  7803  	}
  7804  }
  7805  
  7806  // Range calls f sequentially for each value present in the skip set.
  7807  // If f returns false, range stops the iteration.
  7808  func (s *UintptrSet) Range(f func(value uintptr) bool) {
  7809  	x := s.header.atomicLoadNext(0)
  7810  	for x != nil {
  7811  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  7812  			x = x.atomicLoadNext(0)
  7813  			continue
  7814  		}
  7815  		if !f(x.value) {
  7816  			break
  7817  		}
  7818  		x = x.atomicLoadNext(0)
  7819  	}
  7820  }
  7821  
  7822  // Len return the length of this skip set.
  7823  func (s *UintptrSet) Len() int {
  7824  	return int(atomic.LoadInt64(&s.length))
  7825  }
  7826  
  7827  // UintptrSetDesc represents a set based on skip list in descending order.
  7828  type UintptrSetDesc struct {
  7829  	header       *uintptrNodeDesc
  7830  	length       int64
  7831  	highestLevel int64 // highest level for now
  7832  }
  7833  
  7834  type uintptrNodeDesc struct {
  7835  	value uintptr
  7836  	next  optionalArray // [level]*uintptrNodeDesc
  7837  	mu    sync.Mutex
  7838  	flags bitflag
  7839  	level uint32
  7840  }
  7841  
  7842  func newUintptrNodeDesc(value uintptr, level int) *uintptrNodeDesc {
  7843  	node := &uintptrNodeDesc{
  7844  		value: value,
  7845  		level: uint32(level),
  7846  	}
  7847  	if level > op1 {
  7848  		node.next.extra = new([op2]unsafe.Pointer)
  7849  	}
  7850  	return node
  7851  }
  7852  
  7853  func (n *uintptrNodeDesc) loadNext(i int) *uintptrNodeDesc {
  7854  	return (*uintptrNodeDesc)(n.next.load(i))
  7855  }
  7856  
  7857  func (n *uintptrNodeDesc) storeNext(i int, node *uintptrNodeDesc) {
  7858  	n.next.store(i, unsafe.Pointer(node))
  7859  }
  7860  
  7861  func (n *uintptrNodeDesc) atomicLoadNext(i int) *uintptrNodeDesc {
  7862  	return (*uintptrNodeDesc)(n.next.atomicLoad(i))
  7863  }
  7864  
  7865  func (n *uintptrNodeDesc) atomicStoreNext(i int, node *uintptrNodeDesc) {
  7866  	n.next.atomicStore(i, unsafe.Pointer(node))
  7867  }
  7868  
  7869  func (n *uintptrNodeDesc) lessthan(value uintptr) bool {
  7870  	return n.value > value
  7871  }
  7872  
  7873  func (n *uintptrNodeDesc) equal(value uintptr) bool {
  7874  	return n.value == value
  7875  }
  7876  
  7877  // NewUintptrDesc return an empty uintptr skip set in descending order.
  7878  func NewUintptrDesc() *UintptrSetDesc {
  7879  	h := newUintptrNodeDesc(0, maxLevel)
  7880  	h.flags.SetTrue(fullyLinked)
  7881  	return &UintptrSetDesc{
  7882  		header:       h,
  7883  		highestLevel: defaultHighestLevel,
  7884  	}
  7885  }
  7886  
  7887  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  7888  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7889  func (s *UintptrSetDesc) findNodeRemove(value uintptr, preds *[maxLevel]*uintptrNodeDesc, succs *[maxLevel]*uintptrNodeDesc) int {
  7890  	// lFound represents the index of the first layer at which it found a node.
  7891  	lFound, x := -1, s.header
  7892  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7893  		succ := x.atomicLoadNext(i)
  7894  		for succ != nil && succ.lessthan(value) {
  7895  			x = succ
  7896  			succ = x.atomicLoadNext(i)
  7897  		}
  7898  		preds[i] = x
  7899  		succs[i] = succ
  7900  
  7901  		// Check if the value already in the skip list.
  7902  		if lFound == -1 && succ != nil && succ.equal(value) {
  7903  			lFound = i
  7904  		}
  7905  	}
  7906  	return lFound
  7907  }
  7908  
  7909  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  7910  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  7911  func (s *UintptrSetDesc) findNodeAdd(value uintptr, preds *[maxLevel]*uintptrNodeDesc, succs *[maxLevel]*uintptrNodeDesc) int {
  7912  	x := s.header
  7913  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  7914  		succ := x.atomicLoadNext(i)
  7915  		for succ != nil && succ.lessthan(value) {
  7916  			x = succ
  7917  			succ = x.atomicLoadNext(i)
  7918  		}
  7919  		preds[i] = x
  7920  		succs[i] = succ
  7921  
  7922  		// Check if the value already in the skip list.
  7923  		if succ != nil && succ.equal(value) {
  7924  			return i
  7925  		}
  7926  	}
  7927  	return -1
  7928  }
  7929  
  7930  func unlockUintptrDesc(preds [maxLevel]*uintptrNodeDesc, highestLevel int) {
  7931  	var prevPred *uintptrNodeDesc
  7932  	for i := highestLevel; i >= 0; i-- {
  7933  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  7934  			preds[i].mu.Unlock()
  7935  			prevPred = preds[i]
  7936  		}
  7937  	}
  7938  }
  7939  
  7940  // Add add the value into skip set, return true if this process insert the value into skip set,
  7941  // return false if this process can't insert this value, because another process has insert the same value.
  7942  //
  7943  // If the value is in the skip set but not fully linked, this process will wait until it is.
  7944  func (s *UintptrSetDesc) Add(value uintptr) bool {
  7945  	level := s.randomLevel()
  7946  	var preds, succs [maxLevel]*uintptrNodeDesc
  7947  	for {
  7948  		lFound := s.findNodeAdd(value, &preds, &succs)
  7949  		if lFound != -1 { // indicating the value is already in the skip-list
  7950  			nodeFound := succs[lFound]
  7951  			if !nodeFound.flags.Get(marked) {
  7952  				for !nodeFound.flags.Get(fullyLinked) {
  7953  					// The node is not yet fully linked, just waits until it is.
  7954  				}
  7955  				return false
  7956  			}
  7957  			// If the node is marked, represents some other thread is in the process of deleting this node,
  7958  			// we need to add this node in next loop.
  7959  			continue
  7960  		}
  7961  		// Add this node into skip list.
  7962  		var (
  7963  			highestLocked        = -1 // the highest level being locked by this process
  7964  			valid                = true
  7965  			pred, succ, prevPred *uintptrNodeDesc
  7966  		)
  7967  		for layer := 0; valid && layer < level; layer++ {
  7968  			pred = preds[layer]   // target node's previous node
  7969  			succ = succs[layer]   // target node's next node
  7970  			if pred != prevPred { // the node in this layer could be locked by previous loop
  7971  				pred.mu.Lock()
  7972  				highestLocked = layer
  7973  				prevPred = pred
  7974  			}
  7975  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  7976  			// It is valid if:
  7977  			// 1. The previous node and next node both are not marked.
  7978  			// 2. The previous node's next node is succ in this layer.
  7979  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  7980  		}
  7981  		if !valid {
  7982  			unlockUintptrDesc(preds, highestLocked)
  7983  			continue
  7984  		}
  7985  
  7986  		nn := newUintptrNodeDesc(value, level)
  7987  		for layer := 0; layer < level; layer++ {
  7988  			nn.storeNext(layer, succs[layer])
  7989  			preds[layer].atomicStoreNext(layer, nn)
  7990  		}
  7991  		nn.flags.SetTrue(fullyLinked)
  7992  		unlockUintptrDesc(preds, highestLocked)
  7993  		atomic.AddInt64(&s.length, 1)
  7994  		return true
  7995  	}
  7996  }
  7997  
  7998  func (s *UintptrSetDesc) randomLevel() int {
  7999  	// Generate random level.
  8000  	level := randomLevel()
  8001  	// Update highest level if possible.
  8002  	for {
  8003  		hl := atomic.LoadInt64(&s.highestLevel)
  8004  		if int64(level) <= hl {
  8005  			break
  8006  		}
  8007  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  8008  			break
  8009  		}
  8010  	}
  8011  	return level
  8012  }
  8013  
  8014  // Contains check if the value is in the skip set.
  8015  func (s *UintptrSetDesc) Contains(value uintptr) bool {
  8016  	x := s.header
  8017  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8018  		nex := x.atomicLoadNext(i)
  8019  		for nex != nil && nex.lessthan(value) {
  8020  			x = nex
  8021  			nex = x.atomicLoadNext(i)
  8022  		}
  8023  
  8024  		// Check if the value already in the skip list.
  8025  		if nex != nil && nex.equal(value) {
  8026  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  8027  		}
  8028  	}
  8029  	return false
  8030  }
  8031  
  8032  // Remove a node from the skip set.
  8033  func (s *UintptrSetDesc) Remove(value uintptr) bool {
  8034  	var (
  8035  		nodeToRemove *uintptrNodeDesc
  8036  		isMarked     bool // represents if this operation mark the node
  8037  		topLayer     = -1
  8038  		preds, succs [maxLevel]*uintptrNodeDesc
  8039  	)
  8040  	for {
  8041  		lFound := s.findNodeRemove(value, &preds, &succs)
  8042  		if isMarked || // this process mark this node or we can find this node in the skip list
  8043  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  8044  			if !isMarked { // we don't mark this node for now
  8045  				nodeToRemove = succs[lFound]
  8046  				topLayer = lFound
  8047  				nodeToRemove.mu.Lock()
  8048  				if nodeToRemove.flags.Get(marked) {
  8049  					// The node is marked by another process,
  8050  					// the physical deletion will be accomplished by another process.
  8051  					nodeToRemove.mu.Unlock()
  8052  					return false
  8053  				}
  8054  				nodeToRemove.flags.SetTrue(marked)
  8055  				isMarked = true
  8056  			}
  8057  			// Accomplish the physical deletion.
  8058  			var (
  8059  				highestLocked        = -1 // the highest level being locked by this process
  8060  				valid                = true
  8061  				pred, succ, prevPred *uintptrNodeDesc
  8062  			)
  8063  			for layer := 0; valid && (layer <= topLayer); layer++ {
  8064  				pred, succ = preds[layer], succs[layer]
  8065  				if pred != prevPred { // the node in this layer could be locked by previous loop
  8066  					pred.mu.Lock()
  8067  					highestLocked = layer
  8068  					prevPred = pred
  8069  				}
  8070  				// valid check if there is another node has inserted into the skip list in this layer
  8071  				// during this process, or the previous is removed by another process.
  8072  				// It is valid if:
  8073  				// 1. the previous node exists.
  8074  				// 2. no another node has inserted into the skip list in this layer.
  8075  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  8076  			}
  8077  			if !valid {
  8078  				unlockUintptrDesc(preds, highestLocked)
  8079  				continue
  8080  			}
  8081  			for i := topLayer; i >= 0; i-- {
  8082  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  8083  				// So we don't need `nodeToRemove.loadNext`
  8084  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  8085  			}
  8086  			nodeToRemove.mu.Unlock()
  8087  			unlockUintptrDesc(preds, highestLocked)
  8088  			atomic.AddInt64(&s.length, -1)
  8089  			return true
  8090  		}
  8091  		return false
  8092  	}
  8093  }
  8094  
  8095  // Range calls f sequentially for each value present in the skip set.
  8096  // If f returns false, range stops the iteration.
  8097  func (s *UintptrSetDesc) Range(f func(value uintptr) bool) {
  8098  	x := s.header.atomicLoadNext(0)
  8099  	for x != nil {
  8100  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  8101  			x = x.atomicLoadNext(0)
  8102  			continue
  8103  		}
  8104  		if !f(x.value) {
  8105  			break
  8106  		}
  8107  		x = x.atomicLoadNext(0)
  8108  	}
  8109  }
  8110  
  8111  // Len return the length of this skip set.
  8112  func (s *UintptrSetDesc) Len() int {
  8113  	return int(atomic.LoadInt64(&s.length))
  8114  }
  8115  
  8116  // StringSet represents a set based on skip list.
  8117  type StringSet struct {
  8118  	header       *stringNode
  8119  	length       int64
  8120  	highestLevel int64 // highest level for now
  8121  }
  8122  
  8123  type stringNode struct {
  8124  	value string
  8125  	score uint64
  8126  	next  optionalArray // [level]*stringNode
  8127  	mu    sync.Mutex
  8128  	flags bitflag
  8129  	level uint32
  8130  }
  8131  
  8132  func newStringNode(value string, level int) *stringNode {
  8133  	node := &stringNode{
  8134  		score: hash(value),
  8135  		value: value,
  8136  		level: uint32(level),
  8137  	}
  8138  	if level > op1 {
  8139  		node.next.extra = new([op2]unsafe.Pointer)
  8140  	}
  8141  	return node
  8142  }
  8143  
  8144  func (n *stringNode) loadNext(i int) *stringNode {
  8145  	return (*stringNode)(n.next.load(i))
  8146  }
  8147  
  8148  func (n *stringNode) storeNext(i int, node *stringNode) {
  8149  	n.next.store(i, unsafe.Pointer(node))
  8150  }
  8151  
  8152  func (n *stringNode) atomicLoadNext(i int) *stringNode {
  8153  	return (*stringNode)(n.next.atomicLoad(i))
  8154  }
  8155  
  8156  func (n *stringNode) atomicStoreNext(i int, node *stringNode) {
  8157  	n.next.atomicStore(i, unsafe.Pointer(node))
  8158  }
  8159  
  8160  // NewString return an empty string skip set.
  8161  func NewString() *StringSet {
  8162  	h := newStringNode("", maxLevel)
  8163  	h.flags.SetTrue(fullyLinked)
  8164  	return &StringSet{
  8165  		header:       h,
  8166  		highestLevel: defaultHighestLevel,
  8167  	}
  8168  }
  8169  
  8170  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  8171  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  8172  func (s *StringSet) findNodeRemove(value string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int {
  8173  	score := hash(value)
  8174  	// lFound represents the index of the first layer at which it found a node.
  8175  	lFound, x := -1, s.header
  8176  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8177  		succ := x.atomicLoadNext(i)
  8178  		for succ != nil && succ.cmp(score, value) < 0 {
  8179  			x = succ
  8180  			succ = x.atomicLoadNext(i)
  8181  		}
  8182  		preds[i] = x
  8183  		succs[i] = succ
  8184  
  8185  		// Check if the value already in the skip list.
  8186  		if lFound == -1 && succ != nil && succ.cmp(score, value) == 0 {
  8187  			lFound = i
  8188  		}
  8189  	}
  8190  	return lFound
  8191  }
  8192  
  8193  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  8194  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  8195  func (s *StringSet) findNodeAdd(value string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int {
  8196  	score := hash(value)
  8197  	x := s.header
  8198  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8199  		succ := x.atomicLoadNext(i)
  8200  		for succ != nil && succ.cmp(score, value) < 0 {
  8201  			x = succ
  8202  			succ = x.atomicLoadNext(i)
  8203  		}
  8204  		preds[i] = x
  8205  		succs[i] = succ
  8206  
  8207  		// Check if the value already in the skip list.
  8208  		if succ != nil && succ.cmp(score, value) == 0 {
  8209  			return i
  8210  		}
  8211  	}
  8212  	return -1
  8213  }
  8214  
  8215  func unlockString(preds [maxLevel]*stringNode, highestLevel int) {
  8216  	var prevPred *stringNode
  8217  	for i := highestLevel; i >= 0; i-- {
  8218  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  8219  			preds[i].mu.Unlock()
  8220  			prevPred = preds[i]
  8221  		}
  8222  	}
  8223  }
  8224  
  8225  // Add add the value into skip set, return true if this process insert the value into skip set,
  8226  // return false if this process can't insert this value, because another process has insert the same value.
  8227  //
  8228  // If the value is in the skip set but not fully linked, this process will wait until it is.
  8229  func (s *StringSet) Add(value string) bool {
  8230  	level := s.randomLevel()
  8231  	var preds, succs [maxLevel]*stringNode
  8232  	for {
  8233  		lFound := s.findNodeAdd(value, &preds, &succs)
  8234  		if lFound != -1 { // indicating the value is already in the skip-list
  8235  			nodeFound := succs[lFound]
  8236  			if !nodeFound.flags.Get(marked) {
  8237  				for !nodeFound.flags.Get(fullyLinked) {
  8238  					// The node is not yet fully linked, just waits until it is.
  8239  				}
  8240  				return false
  8241  			}
  8242  			// If the node is marked, represents some other thread is in the process of deleting this node,
  8243  			// we need to add this node in next loop.
  8244  			continue
  8245  		}
  8246  		// Add this node into skip list.
  8247  		var (
  8248  			highestLocked        = -1 // the highest level being locked by this process
  8249  			valid                = true
  8250  			pred, succ, prevPred *stringNode
  8251  		)
  8252  		for layer := 0; valid && layer < level; layer++ {
  8253  			pred = preds[layer]   // target node's previous node
  8254  			succ = succs[layer]   // target node's next node
  8255  			if pred != prevPred { // the node in this layer could be locked by previous loop
  8256  				pred.mu.Lock()
  8257  				highestLocked = layer
  8258  				prevPred = pred
  8259  			}
  8260  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  8261  			// It is valid if:
  8262  			// 1. The previous node and next node both are not marked.
  8263  			// 2. The previous node's next node is succ in this layer.
  8264  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  8265  		}
  8266  		if !valid {
  8267  			unlockString(preds, highestLocked)
  8268  			continue
  8269  		}
  8270  
  8271  		nn := newStringNode(value, level)
  8272  		for layer := 0; layer < level; layer++ {
  8273  			nn.storeNext(layer, succs[layer])
  8274  			preds[layer].atomicStoreNext(layer, nn)
  8275  		}
  8276  		nn.flags.SetTrue(fullyLinked)
  8277  		unlockString(preds, highestLocked)
  8278  		atomic.AddInt64(&s.length, 1)
  8279  		return true
  8280  	}
  8281  }
  8282  
  8283  func (s *StringSet) randomLevel() int {
  8284  	// Generate random level.
  8285  	level := randomLevel()
  8286  	// Update highest level if possible.
  8287  	for {
  8288  		hl := atomic.LoadInt64(&s.highestLevel)
  8289  		if int64(level) <= hl {
  8290  			break
  8291  		}
  8292  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  8293  			break
  8294  		}
  8295  	}
  8296  	return level
  8297  }
  8298  
  8299  // Contains check if the value is in the skip set.
  8300  func (s *StringSet) Contains(value string) bool {
  8301  	score := hash(value)
  8302  	x := s.header
  8303  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  8304  		nex := x.atomicLoadNext(i)
  8305  		for nex != nil && nex.cmp(score, value) < 0 {
  8306  			x = nex
  8307  			nex = x.atomicLoadNext(i)
  8308  		}
  8309  
  8310  		// Check if the value already in the skip list.
  8311  		if nex != nil && nex.cmp(score, value) == 0 {
  8312  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  8313  		}
  8314  	}
  8315  	return false
  8316  }
  8317  
  8318  // Remove a node from the skip set.
  8319  func (s *StringSet) Remove(value string) bool {
  8320  	var (
  8321  		nodeToRemove *stringNode
  8322  		isMarked     bool // represents if this operation mark the node
  8323  		topLayer     = -1
  8324  		preds, succs [maxLevel]*stringNode
  8325  	)
  8326  	for {
  8327  		lFound := s.findNodeRemove(value, &preds, &succs)
  8328  		if isMarked || // this process mark this node or we can find this node in the skip list
  8329  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  8330  			if !isMarked { // we don't mark this node for now
  8331  				nodeToRemove = succs[lFound]
  8332  				topLayer = lFound
  8333  				nodeToRemove.mu.Lock()
  8334  				if nodeToRemove.flags.Get(marked) {
  8335  					// The node is marked by another process,
  8336  					// the physical deletion will be accomplished by another process.
  8337  					nodeToRemove.mu.Unlock()
  8338  					return false
  8339  				}
  8340  				nodeToRemove.flags.SetTrue(marked)
  8341  				isMarked = true
  8342  			}
  8343  			// Accomplish the physical deletion.
  8344  			var (
  8345  				highestLocked        = -1 // the highest level being locked by this process
  8346  				valid                = true
  8347  				pred, succ, prevPred *stringNode
  8348  			)
  8349  			for layer := 0; valid && (layer <= topLayer); layer++ {
  8350  				pred, succ = preds[layer], succs[layer]
  8351  				if pred != prevPred { // the node in this layer could be locked by previous loop
  8352  					pred.mu.Lock()
  8353  					highestLocked = layer
  8354  					prevPred = pred
  8355  				}
  8356  				// valid check if there is another node has inserted into the skip list in this layer
  8357  				// during this process, or the previous is removed by another process.
  8358  				// It is valid if:
  8359  				// 1. the previous node exists.
  8360  				// 2. no another node has inserted into the skip list in this layer.
  8361  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  8362  			}
  8363  			if !valid {
  8364  				unlockString(preds, highestLocked)
  8365  				continue
  8366  			}
  8367  			for i := topLayer; i >= 0; i-- {
  8368  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  8369  				// So we don't need `nodeToRemove.loadNext`
  8370  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  8371  			}
  8372  			nodeToRemove.mu.Unlock()
  8373  			unlockString(preds, highestLocked)
  8374  			atomic.AddInt64(&s.length, -1)
  8375  			return true
  8376  		}
  8377  		return false
  8378  	}
  8379  }
  8380  
  8381  // Range calls f sequentially for each value present in the skip set.
  8382  // If f returns false, range stops the iteration.
  8383  func (s *StringSet) Range(f func(value string) bool) {
  8384  	x := s.header.atomicLoadNext(0)
  8385  	for x != nil {
  8386  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  8387  			x = x.atomicLoadNext(0)
  8388  			continue
  8389  		}
  8390  		if !f(x.value) {
  8391  			break
  8392  		}
  8393  		x = x.atomicLoadNext(0)
  8394  	}
  8395  }
  8396  
  8397  // Len return the length of this skip set.
  8398  func (s *StringSet) Len() int {
  8399  	return int(atomic.LoadInt64(&s.length))
  8400  }
  8401  
  8402  // Return 1 if n is bigger, 0 if equal, else -1.
  8403  func (n *stringNode) cmp(score uint64, value string) int {
  8404  	if n.score > score {
  8405  		return 1
  8406  	} else if n.score == score {
  8407  		return cmpstring(n.value, value)
  8408  	}
  8409  	return -1
  8410  }