github.com/bytedance/gopkg@v0.0.0-20240514070511-01b2cbcf35e1/collection/skipset/types.go (about)

     1  // Copyright 2021 ByteDance Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by go run types_gen.go; DO NOT EDIT.
    16  package skipset
    17  
    18  import (
    19  	"sync"
    20  	"sync/atomic"
    21  	"unsafe"
    22  )
    23  
    24  // Float32Set represents a set based on skip list in ascending order.
    25  type Float32Set struct {
    26  	header       *float32Node
    27  	length       int64
    28  	highestLevel int64 // highest level for now
    29  }
    30  
    31  type float32Node struct {
    32  	value float32
    33  	next  optionalArray // [level]*float32Node
    34  	mu    sync.Mutex
    35  	flags bitflag
    36  	level uint32
    37  }
    38  
    39  func newFloat32Node(value float32, level int) *float32Node {
    40  	node := &float32Node{
    41  		value: value,
    42  		level: uint32(level),
    43  	}
    44  	if level > op1 {
    45  		node.next.extra = new([op2]unsafe.Pointer)
    46  	}
    47  	return node
    48  }
    49  
    50  func (n *float32Node) loadNext(i int) *float32Node {
    51  	return (*float32Node)(n.next.load(i))
    52  }
    53  
    54  func (n *float32Node) storeNext(i int, node *float32Node) {
    55  	n.next.store(i, unsafe.Pointer(node))
    56  }
    57  
    58  func (n *float32Node) atomicLoadNext(i int) *float32Node {
    59  	return (*float32Node)(n.next.atomicLoad(i))
    60  }
    61  
    62  func (n *float32Node) atomicStoreNext(i int, node *float32Node) {
    63  	n.next.atomicStore(i, unsafe.Pointer(node))
    64  }
    65  
    66  func (n *float32Node) lessthan(value float32) bool {
    67  	return n.value < value
    68  }
    69  
    70  func (n *float32Node) equal(value float32) bool {
    71  	return n.value == value
    72  }
    73  
    74  // NewFloat32 return an empty float32 skip set in ascending order.
    75  func NewFloat32() *Float32Set {
    76  	h := newFloat32Node(0, maxLevel)
    77  	h.flags.SetTrue(fullyLinked)
    78  	return &Float32Set{
    79  		header:       h,
    80  		highestLevel: defaultHighestLevel,
    81  	}
    82  }
    83  
    84  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
    85  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
    86  func (s *Float32Set) findNodeRemove(value float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int {
    87  	// lFound represents the index of the first layer at which it found a node.
    88  	lFound, x := -1, s.header
    89  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
    90  		succ := x.atomicLoadNext(i)
    91  		for succ != nil && succ.lessthan(value) {
    92  			x = succ
    93  			succ = x.atomicLoadNext(i)
    94  		}
    95  		preds[i] = x
    96  		succs[i] = succ
    97  
    98  		// Check if the value already in the skip list.
    99  		if lFound == -1 && succ != nil && succ.equal(value) {
   100  			lFound = i
   101  		}
   102  	}
   103  	return lFound
   104  }
   105  
   106  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   107  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   108  func (s *Float32Set) findNodeAdd(value float32, preds *[maxLevel]*float32Node, succs *[maxLevel]*float32Node) int {
   109  	x := s.header
   110  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   111  		succ := x.atomicLoadNext(i)
   112  		for succ != nil && succ.lessthan(value) {
   113  			x = succ
   114  			succ = x.atomicLoadNext(i)
   115  		}
   116  		preds[i] = x
   117  		succs[i] = succ
   118  
   119  		// Check if the value already in the skip list.
   120  		if succ != nil && succ.equal(value) {
   121  			return i
   122  		}
   123  	}
   124  	return -1
   125  }
   126  
   127  func unlockFloat32(preds [maxLevel]*float32Node, highestLevel int) {
   128  	var prevPred *float32Node
   129  	for i := highestLevel; i >= 0; i-- {
   130  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   131  			preds[i].mu.Unlock()
   132  			prevPred = preds[i]
   133  		}
   134  	}
   135  }
   136  
   137  // Add add the value into skip set, return true if this process insert the value into skip set,
   138  // return false if this process can't insert this value, because another process has insert the same value.
   139  //
   140  // If the value is in the skip set but not fully linked, this process will wait until it is.
   141  func (s *Float32Set) Add(value float32) bool {
   142  	level := s.randomlevel()
   143  	var preds, succs [maxLevel]*float32Node
   144  	for {
   145  		lFound := s.findNodeAdd(value, &preds, &succs)
   146  		if lFound != -1 { // indicating the value is already in the skip-list
   147  			nodeFound := succs[lFound]
   148  			if !nodeFound.flags.Get(marked) {
   149  				for !nodeFound.flags.Get(fullyLinked) {
   150  					// The node is not yet fully linked, just waits until it is.
   151  				}
   152  				return false
   153  			}
   154  			// If the node is marked, represents some other thread is in the process of deleting this node,
   155  			// we need to add this node in next loop.
   156  			continue
   157  		}
   158  		// Add this node into skip list.
   159  		var (
   160  			highestLocked        = -1 // the highest level being locked by this process
   161  			valid                = true
   162  			pred, succ, prevPred *float32Node
   163  		)
   164  		for layer := 0; valid && layer < level; layer++ {
   165  			pred = preds[layer]   // target node's previous node
   166  			succ = succs[layer]   // target node's next node
   167  			if pred != prevPred { // the node in this layer could be locked by previous loop
   168  				pred.mu.Lock()
   169  				highestLocked = layer
   170  				prevPred = pred
   171  			}
   172  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   173  			// It is valid if:
   174  			// 1. The previous node and next node both are not marked.
   175  			// 2. The previous node's next node is succ in this layer.
   176  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   177  		}
   178  		if !valid {
   179  			unlockFloat32(preds, highestLocked)
   180  			continue
   181  		}
   182  
   183  		nn := newFloat32Node(value, level)
   184  		for layer := 0; layer < level; layer++ {
   185  			nn.storeNext(layer, succs[layer])
   186  			preds[layer].atomicStoreNext(layer, nn)
   187  		}
   188  		nn.flags.SetTrue(fullyLinked)
   189  		unlockFloat32(preds, highestLocked)
   190  		atomic.AddInt64(&s.length, 1)
   191  		return true
   192  	}
   193  }
   194  
   195  func (s *Float32Set) randomlevel() int {
   196  	// Generate random level.
   197  	level := randomLevel()
   198  	// Update highest level if possible.
   199  	for {
   200  		hl := atomic.LoadInt64(&s.highestLevel)
   201  		if int64(level) <= hl {
   202  			break
   203  		}
   204  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   205  			break
   206  		}
   207  	}
   208  	return level
   209  }
   210  
   211  // Contains check if the value is in the skip set.
   212  func (s *Float32Set) Contains(value float32) bool {
   213  	x := s.header
   214  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   215  		nex := x.atomicLoadNext(i)
   216  		for nex != nil && nex.lessthan(value) {
   217  			x = nex
   218  			nex = x.atomicLoadNext(i)
   219  		}
   220  
   221  		// Check if the value already in the skip list.
   222  		if nex != nil && nex.equal(value) {
   223  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
   224  		}
   225  	}
   226  	return false
   227  }
   228  
   229  // Remove a node from the skip set.
   230  func (s *Float32Set) Remove(value float32) bool {
   231  	var (
   232  		nodeToRemove *float32Node
   233  		isMarked     bool // represents if this operation mark the node
   234  		topLayer     = -1
   235  		preds, succs [maxLevel]*float32Node
   236  	)
   237  	for {
   238  		lFound := s.findNodeRemove(value, &preds, &succs)
   239  		if isMarked || // this process mark this node or we can find this node in the skip list
   240  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   241  			if !isMarked { // we don't mark this node for now
   242  				nodeToRemove = succs[lFound]
   243  				topLayer = lFound
   244  				nodeToRemove.mu.Lock()
   245  				if nodeToRemove.flags.Get(marked) {
   246  					// The node is marked by another process,
   247  					// the physical deletion will be accomplished by another process.
   248  					nodeToRemove.mu.Unlock()
   249  					return false
   250  				}
   251  				nodeToRemove.flags.SetTrue(marked)
   252  				isMarked = true
   253  			}
   254  			// Accomplish the physical deletion.
   255  			var (
   256  				highestLocked        = -1 // the highest level being locked by this process
   257  				valid                = true
   258  				pred, succ, prevPred *float32Node
   259  			)
   260  			for layer := 0; valid && (layer <= topLayer); layer++ {
   261  				pred, succ = preds[layer], succs[layer]
   262  				if pred != prevPred { // the node in this layer could be locked by previous loop
   263  					pred.mu.Lock()
   264  					highestLocked = layer
   265  					prevPred = pred
   266  				}
   267  				// valid check if there is another node has inserted into the skip list in this layer
   268  				// during this process, or the previous is removed by another process.
   269  				// It is valid if:
   270  				// 1. the previous node exists.
   271  				// 2. no another node has inserted into the skip list in this layer.
   272  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   273  			}
   274  			if !valid {
   275  				unlockFloat32(preds, highestLocked)
   276  				continue
   277  			}
   278  			for i := topLayer; i >= 0; i-- {
   279  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
   280  				// So we don't need `nodeToRemove.loadNext`
   281  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
   282  			}
   283  			nodeToRemove.mu.Unlock()
   284  			unlockFloat32(preds, highestLocked)
   285  			atomic.AddInt64(&s.length, -1)
   286  			return true
   287  		}
   288  		return false
   289  	}
   290  }
   291  
   292  // Range calls f sequentially for each value present in the skip set.
   293  // If f returns false, range stops the iteration.
   294  func (s *Float32Set) Range(f func(value float32) bool) {
   295  	x := s.header.atomicLoadNext(0)
   296  	for x != nil {
   297  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
   298  			x = x.atomicLoadNext(0)
   299  			continue
   300  		}
   301  		if !f(x.value) {
   302  			break
   303  		}
   304  		x = x.atomicLoadNext(0)
   305  	}
   306  }
   307  
   308  // Len return the length of this skip set.
   309  func (s *Float32Set) Len() int {
   310  	return int(atomic.LoadInt64(&s.length))
   311  }
   312  
   313  // Float32SetDesc represents a set based on skip list in descending order.
   314  type Float32SetDesc struct {
   315  	header       *float32NodeDesc
   316  	length       int64
   317  	highestLevel int64 // highest level for now
   318  }
   319  
   320  type float32NodeDesc struct {
   321  	value float32
   322  	next  optionalArray // [level]*float32NodeDesc
   323  	mu    sync.Mutex
   324  	flags bitflag
   325  	level uint32
   326  }
   327  
   328  func newFloat32NodeDesc(value float32, level int) *float32NodeDesc {
   329  	node := &float32NodeDesc{
   330  		value: value,
   331  		level: uint32(level),
   332  	}
   333  	if level > op1 {
   334  		node.next.extra = new([op2]unsafe.Pointer)
   335  	}
   336  	return node
   337  }
   338  
   339  func (n *float32NodeDesc) loadNext(i int) *float32NodeDesc {
   340  	return (*float32NodeDesc)(n.next.load(i))
   341  }
   342  
   343  func (n *float32NodeDesc) storeNext(i int, node *float32NodeDesc) {
   344  	n.next.store(i, unsafe.Pointer(node))
   345  }
   346  
   347  func (n *float32NodeDesc) atomicLoadNext(i int) *float32NodeDesc {
   348  	return (*float32NodeDesc)(n.next.atomicLoad(i))
   349  }
   350  
   351  func (n *float32NodeDesc) atomicStoreNext(i int, node *float32NodeDesc) {
   352  	n.next.atomicStore(i, unsafe.Pointer(node))
   353  }
   354  
   355  func (n *float32NodeDesc) lessthan(value float32) bool {
   356  	return n.value > value
   357  }
   358  
   359  func (n *float32NodeDesc) equal(value float32) bool {
   360  	return n.value == value
   361  }
   362  
   363  // NewFloat32Desc return an empty float32 skip set in descending order.
   364  func NewFloat32Desc() *Float32SetDesc {
   365  	h := newFloat32NodeDesc(0, maxLevel)
   366  	h.flags.SetTrue(fullyLinked)
   367  	return &Float32SetDesc{
   368  		header:       h,
   369  		highestLevel: defaultHighestLevel,
   370  	}
   371  }
   372  
   373  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
   374  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   375  func (s *Float32SetDesc) findNodeRemove(value float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int {
   376  	// lFound represents the index of the first layer at which it found a node.
   377  	lFound, x := -1, s.header
   378  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   379  		succ := x.atomicLoadNext(i)
   380  		for succ != nil && succ.lessthan(value) {
   381  			x = succ
   382  			succ = x.atomicLoadNext(i)
   383  		}
   384  		preds[i] = x
   385  		succs[i] = succ
   386  
   387  		// Check if the value already in the skip list.
   388  		if lFound == -1 && succ != nil && succ.equal(value) {
   389  			lFound = i
   390  		}
   391  	}
   392  	return lFound
   393  }
   394  
   395  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   396  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   397  func (s *Float32SetDesc) findNodeAdd(value float32, preds *[maxLevel]*float32NodeDesc, succs *[maxLevel]*float32NodeDesc) int {
   398  	x := s.header
   399  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   400  		succ := x.atomicLoadNext(i)
   401  		for succ != nil && succ.lessthan(value) {
   402  			x = succ
   403  			succ = x.atomicLoadNext(i)
   404  		}
   405  		preds[i] = x
   406  		succs[i] = succ
   407  
   408  		// Check if the value already in the skip list.
   409  		if succ != nil && succ.equal(value) {
   410  			return i
   411  		}
   412  	}
   413  	return -1
   414  }
   415  
   416  func unlockFloat32Desc(preds [maxLevel]*float32NodeDesc, highestLevel int) {
   417  	var prevPred *float32NodeDesc
   418  	for i := highestLevel; i >= 0; i-- {
   419  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   420  			preds[i].mu.Unlock()
   421  			prevPred = preds[i]
   422  		}
   423  	}
   424  }
   425  
   426  // Add add the value into skip set, return true if this process insert the value into skip set,
   427  // return false if this process can't insert this value, because another process has insert the same value.
   428  //
   429  // If the value is in the skip set but not fully linked, this process will wait until it is.
   430  func (s *Float32SetDesc) Add(value float32) bool {
   431  	level := s.randomlevel()
   432  	var preds, succs [maxLevel]*float32NodeDesc
   433  	for {
   434  		lFound := s.findNodeAdd(value, &preds, &succs)
   435  		if lFound != -1 { // indicating the value is already in the skip-list
   436  			nodeFound := succs[lFound]
   437  			if !nodeFound.flags.Get(marked) {
   438  				for !nodeFound.flags.Get(fullyLinked) {
   439  					// The node is not yet fully linked, just waits until it is.
   440  				}
   441  				return false
   442  			}
   443  			// If the node is marked, represents some other thread is in the process of deleting this node,
   444  			// we need to add this node in next loop.
   445  			continue
   446  		}
   447  		// Add this node into skip list.
   448  		var (
   449  			highestLocked        = -1 // the highest level being locked by this process
   450  			valid                = true
   451  			pred, succ, prevPred *float32NodeDesc
   452  		)
   453  		for layer := 0; valid && layer < level; layer++ {
   454  			pred = preds[layer]   // target node's previous node
   455  			succ = succs[layer]   // target node's next node
   456  			if pred != prevPred { // the node in this layer could be locked by previous loop
   457  				pred.mu.Lock()
   458  				highestLocked = layer
   459  				prevPred = pred
   460  			}
   461  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   462  			// It is valid if:
   463  			// 1. The previous node and next node both are not marked.
   464  			// 2. The previous node's next node is succ in this layer.
   465  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   466  		}
   467  		if !valid {
   468  			unlockFloat32Desc(preds, highestLocked)
   469  			continue
   470  		}
   471  
   472  		nn := newFloat32NodeDesc(value, level)
   473  		for layer := 0; layer < level; layer++ {
   474  			nn.storeNext(layer, succs[layer])
   475  			preds[layer].atomicStoreNext(layer, nn)
   476  		}
   477  		nn.flags.SetTrue(fullyLinked)
   478  		unlockFloat32Desc(preds, highestLocked)
   479  		atomic.AddInt64(&s.length, 1)
   480  		return true
   481  	}
   482  }
   483  
   484  func (s *Float32SetDesc) randomlevel() int {
   485  	// Generate random level.
   486  	level := randomLevel()
   487  	// Update highest level if possible.
   488  	for {
   489  		hl := atomic.LoadInt64(&s.highestLevel)
   490  		if int64(level) <= hl {
   491  			break
   492  		}
   493  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   494  			break
   495  		}
   496  	}
   497  	return level
   498  }
   499  
   500  // Contains check if the value is in the skip set.
   501  func (s *Float32SetDesc) Contains(value float32) bool {
   502  	x := s.header
   503  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   504  		nex := x.atomicLoadNext(i)
   505  		for nex != nil && nex.lessthan(value) {
   506  			x = nex
   507  			nex = x.atomicLoadNext(i)
   508  		}
   509  
   510  		// Check if the value already in the skip list.
   511  		if nex != nil && nex.equal(value) {
   512  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
   513  		}
   514  	}
   515  	return false
   516  }
   517  
   518  // Remove a node from the skip set.
   519  func (s *Float32SetDesc) Remove(value float32) bool {
   520  	var (
   521  		nodeToRemove *float32NodeDesc
   522  		isMarked     bool // represents if this operation mark the node
   523  		topLayer     = -1
   524  		preds, succs [maxLevel]*float32NodeDesc
   525  	)
   526  	for {
   527  		lFound := s.findNodeRemove(value, &preds, &succs)
   528  		if isMarked || // this process mark this node or we can find this node in the skip list
   529  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   530  			if !isMarked { // we don't mark this node for now
   531  				nodeToRemove = succs[lFound]
   532  				topLayer = lFound
   533  				nodeToRemove.mu.Lock()
   534  				if nodeToRemove.flags.Get(marked) {
   535  					// The node is marked by another process,
   536  					// the physical deletion will be accomplished by another process.
   537  					nodeToRemove.mu.Unlock()
   538  					return false
   539  				}
   540  				nodeToRemove.flags.SetTrue(marked)
   541  				isMarked = true
   542  			}
   543  			// Accomplish the physical deletion.
   544  			var (
   545  				highestLocked        = -1 // the highest level being locked by this process
   546  				valid                = true
   547  				pred, succ, prevPred *float32NodeDesc
   548  			)
   549  			for layer := 0; valid && (layer <= topLayer); layer++ {
   550  				pred, succ = preds[layer], succs[layer]
   551  				if pred != prevPred { // the node in this layer could be locked by previous loop
   552  					pred.mu.Lock()
   553  					highestLocked = layer
   554  					prevPred = pred
   555  				}
   556  				// valid check if there is another node has inserted into the skip list in this layer
   557  				// during this process, or the previous is removed by another process.
   558  				// It is valid if:
   559  				// 1. the previous node exists.
   560  				// 2. no another node has inserted into the skip list in this layer.
   561  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   562  			}
   563  			if !valid {
   564  				unlockFloat32Desc(preds, highestLocked)
   565  				continue
   566  			}
   567  			for i := topLayer; i >= 0; i-- {
   568  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
   569  				// So we don't need `nodeToRemove.loadNext`
   570  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
   571  			}
   572  			nodeToRemove.mu.Unlock()
   573  			unlockFloat32Desc(preds, highestLocked)
   574  			atomic.AddInt64(&s.length, -1)
   575  			return true
   576  		}
   577  		return false
   578  	}
   579  }
   580  
   581  // Range calls f sequentially for each value present in the skip set.
   582  // If f returns false, range stops the iteration.
   583  func (s *Float32SetDesc) Range(f func(value float32) bool) {
   584  	x := s.header.atomicLoadNext(0)
   585  	for x != nil {
   586  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
   587  			x = x.atomicLoadNext(0)
   588  			continue
   589  		}
   590  		if !f(x.value) {
   591  			break
   592  		}
   593  		x = x.atomicLoadNext(0)
   594  	}
   595  }
   596  
   597  // Len return the length of this skip set.
   598  func (s *Float32SetDesc) Len() int {
   599  	return int(atomic.LoadInt64(&s.length))
   600  }
   601  
   602  // Float64Set represents a set based on skip list in ascending order.
   603  type Float64Set struct {
   604  	header       *float64Node
   605  	length       int64
   606  	highestLevel int64 // highest level for now
   607  }
   608  
   609  type float64Node struct {
   610  	value float64
   611  	next  optionalArray // [level]*float64Node
   612  	mu    sync.Mutex
   613  	flags bitflag
   614  	level uint32
   615  }
   616  
   617  func newFloat64Node(value float64, level int) *float64Node {
   618  	node := &float64Node{
   619  		value: value,
   620  		level: uint32(level),
   621  	}
   622  	if level > op1 {
   623  		node.next.extra = new([op2]unsafe.Pointer)
   624  	}
   625  	return node
   626  }
   627  
   628  func (n *float64Node) loadNext(i int) *float64Node {
   629  	return (*float64Node)(n.next.load(i))
   630  }
   631  
   632  func (n *float64Node) storeNext(i int, node *float64Node) {
   633  	n.next.store(i, unsafe.Pointer(node))
   634  }
   635  
   636  func (n *float64Node) atomicLoadNext(i int) *float64Node {
   637  	return (*float64Node)(n.next.atomicLoad(i))
   638  }
   639  
   640  func (n *float64Node) atomicStoreNext(i int, node *float64Node) {
   641  	n.next.atomicStore(i, unsafe.Pointer(node))
   642  }
   643  
   644  func (n *float64Node) lessthan(value float64) bool {
   645  	return n.value < value
   646  }
   647  
   648  func (n *float64Node) equal(value float64) bool {
   649  	return n.value == value
   650  }
   651  
   652  // NewFloat64 return an empty float64 skip set in ascending order.
   653  func NewFloat64() *Float64Set {
   654  	h := newFloat64Node(0, maxLevel)
   655  	h.flags.SetTrue(fullyLinked)
   656  	return &Float64Set{
   657  		header:       h,
   658  		highestLevel: defaultHighestLevel,
   659  	}
   660  }
   661  
   662  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
   663  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   664  func (s *Float64Set) findNodeRemove(value float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int {
   665  	// lFound represents the index of the first layer at which it found a node.
   666  	lFound, x := -1, s.header
   667  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   668  		succ := x.atomicLoadNext(i)
   669  		for succ != nil && succ.lessthan(value) {
   670  			x = succ
   671  			succ = x.atomicLoadNext(i)
   672  		}
   673  		preds[i] = x
   674  		succs[i] = succ
   675  
   676  		// Check if the value already in the skip list.
   677  		if lFound == -1 && succ != nil && succ.equal(value) {
   678  			lFound = i
   679  		}
   680  	}
   681  	return lFound
   682  }
   683  
   684  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   685  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   686  func (s *Float64Set) findNodeAdd(value float64, preds *[maxLevel]*float64Node, succs *[maxLevel]*float64Node) int {
   687  	x := s.header
   688  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   689  		succ := x.atomicLoadNext(i)
   690  		for succ != nil && succ.lessthan(value) {
   691  			x = succ
   692  			succ = x.atomicLoadNext(i)
   693  		}
   694  		preds[i] = x
   695  		succs[i] = succ
   696  
   697  		// Check if the value already in the skip list.
   698  		if succ != nil && succ.equal(value) {
   699  			return i
   700  		}
   701  	}
   702  	return -1
   703  }
   704  
   705  func unlockFloat64(preds [maxLevel]*float64Node, highestLevel int) {
   706  	var prevPred *float64Node
   707  	for i := highestLevel; i >= 0; i-- {
   708  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   709  			preds[i].mu.Unlock()
   710  			prevPred = preds[i]
   711  		}
   712  	}
   713  }
   714  
   715  // Add add the value into skip set, return true if this process insert the value into skip set,
   716  // return false if this process can't insert this value, because another process has insert the same value.
   717  //
   718  // If the value is in the skip set but not fully linked, this process will wait until it is.
   719  func (s *Float64Set) Add(value float64) bool {
   720  	level := s.randomlevel()
   721  	var preds, succs [maxLevel]*float64Node
   722  	for {
   723  		lFound := s.findNodeAdd(value, &preds, &succs)
   724  		if lFound != -1 { // indicating the value is already in the skip-list
   725  			nodeFound := succs[lFound]
   726  			if !nodeFound.flags.Get(marked) {
   727  				for !nodeFound.flags.Get(fullyLinked) {
   728  					// The node is not yet fully linked, just waits until it is.
   729  				}
   730  				return false
   731  			}
   732  			// If the node is marked, represents some other thread is in the process of deleting this node,
   733  			// we need to add this node in next loop.
   734  			continue
   735  		}
   736  		// Add this node into skip list.
   737  		var (
   738  			highestLocked        = -1 // the highest level being locked by this process
   739  			valid                = true
   740  			pred, succ, prevPred *float64Node
   741  		)
   742  		for layer := 0; valid && layer < level; layer++ {
   743  			pred = preds[layer]   // target node's previous node
   744  			succ = succs[layer]   // target node's next node
   745  			if pred != prevPred { // the node in this layer could be locked by previous loop
   746  				pred.mu.Lock()
   747  				highestLocked = layer
   748  				prevPred = pred
   749  			}
   750  			// valid check if there is another node has inserted into the skip list in this layer during this process.
   751  			// It is valid if:
   752  			// 1. The previous node and next node both are not marked.
   753  			// 2. The previous node's next node is succ in this layer.
   754  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
   755  		}
   756  		if !valid {
   757  			unlockFloat64(preds, highestLocked)
   758  			continue
   759  		}
   760  
   761  		nn := newFloat64Node(value, level)
   762  		for layer := 0; layer < level; layer++ {
   763  			nn.storeNext(layer, succs[layer])
   764  			preds[layer].atomicStoreNext(layer, nn)
   765  		}
   766  		nn.flags.SetTrue(fullyLinked)
   767  		unlockFloat64(preds, highestLocked)
   768  		atomic.AddInt64(&s.length, 1)
   769  		return true
   770  	}
   771  }
   772  
   773  func (s *Float64Set) randomlevel() int {
   774  	// Generate random level.
   775  	level := randomLevel()
   776  	// Update highest level if possible.
   777  	for {
   778  		hl := atomic.LoadInt64(&s.highestLevel)
   779  		if int64(level) <= hl {
   780  			break
   781  		}
   782  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
   783  			break
   784  		}
   785  	}
   786  	return level
   787  }
   788  
   789  // Contains check if the value is in the skip set.
   790  func (s *Float64Set) Contains(value float64) bool {
   791  	x := s.header
   792  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   793  		nex := x.atomicLoadNext(i)
   794  		for nex != nil && nex.lessthan(value) {
   795  			x = nex
   796  			nex = x.atomicLoadNext(i)
   797  		}
   798  
   799  		// Check if the value already in the skip list.
   800  		if nex != nil && nex.equal(value) {
   801  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
   802  		}
   803  	}
   804  	return false
   805  }
   806  
   807  // Remove a node from the skip set.
   808  func (s *Float64Set) Remove(value float64) bool {
   809  	var (
   810  		nodeToRemove *float64Node
   811  		isMarked     bool // represents if this operation mark the node
   812  		topLayer     = -1
   813  		preds, succs [maxLevel]*float64Node
   814  	)
   815  	for {
   816  		lFound := s.findNodeRemove(value, &preds, &succs)
   817  		if isMarked || // this process mark this node or we can find this node in the skip list
   818  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
   819  			if !isMarked { // we don't mark this node for now
   820  				nodeToRemove = succs[lFound]
   821  				topLayer = lFound
   822  				nodeToRemove.mu.Lock()
   823  				if nodeToRemove.flags.Get(marked) {
   824  					// The node is marked by another process,
   825  					// the physical deletion will be accomplished by another process.
   826  					nodeToRemove.mu.Unlock()
   827  					return false
   828  				}
   829  				nodeToRemove.flags.SetTrue(marked)
   830  				isMarked = true
   831  			}
   832  			// Accomplish the physical deletion.
   833  			var (
   834  				highestLocked        = -1 // the highest level being locked by this process
   835  				valid                = true
   836  				pred, succ, prevPred *float64Node
   837  			)
   838  			for layer := 0; valid && (layer <= topLayer); layer++ {
   839  				pred, succ = preds[layer], succs[layer]
   840  				if pred != prevPred { // the node in this layer could be locked by previous loop
   841  					pred.mu.Lock()
   842  					highestLocked = layer
   843  					prevPred = pred
   844  				}
   845  				// valid check if there is another node has inserted into the skip list in this layer
   846  				// during this process, or the previous is removed by another process.
   847  				// It is valid if:
   848  				// 1. the previous node exists.
   849  				// 2. no another node has inserted into the skip list in this layer.
   850  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
   851  			}
   852  			if !valid {
   853  				unlockFloat64(preds, highestLocked)
   854  				continue
   855  			}
   856  			for i := topLayer; i >= 0; i-- {
   857  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
   858  				// So we don't need `nodeToRemove.loadNext`
   859  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
   860  			}
   861  			nodeToRemove.mu.Unlock()
   862  			unlockFloat64(preds, highestLocked)
   863  			atomic.AddInt64(&s.length, -1)
   864  			return true
   865  		}
   866  		return false
   867  	}
   868  }
   869  
   870  // Range calls f sequentially for each value present in the skip set.
   871  // If f returns false, range stops the iteration.
   872  func (s *Float64Set) Range(f func(value float64) bool) {
   873  	x := s.header.atomicLoadNext(0)
   874  	for x != nil {
   875  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
   876  			x = x.atomicLoadNext(0)
   877  			continue
   878  		}
   879  		if !f(x.value) {
   880  			break
   881  		}
   882  		x = x.atomicLoadNext(0)
   883  	}
   884  }
   885  
   886  // Len return the length of this skip set.
   887  func (s *Float64Set) Len() int {
   888  	return int(atomic.LoadInt64(&s.length))
   889  }
   890  
   891  // Float64SetDesc represents a set based on skip list in descending order.
   892  type Float64SetDesc struct {
   893  	header       *float64NodeDesc
   894  	length       int64
   895  	highestLevel int64 // highest level for now
   896  }
   897  
   898  type float64NodeDesc struct {
   899  	value float64
   900  	next  optionalArray // [level]*float64NodeDesc
   901  	mu    sync.Mutex
   902  	flags bitflag
   903  	level uint32
   904  }
   905  
   906  func newFloat64NodeDesc(value float64, level int) *float64NodeDesc {
   907  	node := &float64NodeDesc{
   908  		value: value,
   909  		level: uint32(level),
   910  	}
   911  	if level > op1 {
   912  		node.next.extra = new([op2]unsafe.Pointer)
   913  	}
   914  	return node
   915  }
   916  
   917  func (n *float64NodeDesc) loadNext(i int) *float64NodeDesc {
   918  	return (*float64NodeDesc)(n.next.load(i))
   919  }
   920  
   921  func (n *float64NodeDesc) storeNext(i int, node *float64NodeDesc) {
   922  	n.next.store(i, unsafe.Pointer(node))
   923  }
   924  
   925  func (n *float64NodeDesc) atomicLoadNext(i int) *float64NodeDesc {
   926  	return (*float64NodeDesc)(n.next.atomicLoad(i))
   927  }
   928  
   929  func (n *float64NodeDesc) atomicStoreNext(i int, node *float64NodeDesc) {
   930  	n.next.atomicStore(i, unsafe.Pointer(node))
   931  }
   932  
   933  func (n *float64NodeDesc) lessthan(value float64) bool {
   934  	return n.value > value
   935  }
   936  
   937  func (n *float64NodeDesc) equal(value float64) bool {
   938  	return n.value == value
   939  }
   940  
   941  // NewFloat64Desc return an empty float64 skip set in descending order.
   942  func NewFloat64Desc() *Float64SetDesc {
   943  	h := newFloat64NodeDesc(0, maxLevel)
   944  	h.flags.SetTrue(fullyLinked)
   945  	return &Float64SetDesc{
   946  		header:       h,
   947  		highestLevel: defaultHighestLevel,
   948  	}
   949  }
   950  
   951  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
   952  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   953  func (s *Float64SetDesc) findNodeRemove(value float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int {
   954  	// lFound represents the index of the first layer at which it found a node.
   955  	lFound, x := -1, s.header
   956  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   957  		succ := x.atomicLoadNext(i)
   958  		for succ != nil && succ.lessthan(value) {
   959  			x = succ
   960  			succ = x.atomicLoadNext(i)
   961  		}
   962  		preds[i] = x
   963  		succs[i] = succ
   964  
   965  		// Check if the value already in the skip list.
   966  		if lFound == -1 && succ != nil && succ.equal(value) {
   967  			lFound = i
   968  		}
   969  	}
   970  	return lFound
   971  }
   972  
   973  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
   974  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
   975  func (s *Float64SetDesc) findNodeAdd(value float64, preds *[maxLevel]*float64NodeDesc, succs *[maxLevel]*float64NodeDesc) int {
   976  	x := s.header
   977  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
   978  		succ := x.atomicLoadNext(i)
   979  		for succ != nil && succ.lessthan(value) {
   980  			x = succ
   981  			succ = x.atomicLoadNext(i)
   982  		}
   983  		preds[i] = x
   984  		succs[i] = succ
   985  
   986  		// Check if the value already in the skip list.
   987  		if succ != nil && succ.equal(value) {
   988  			return i
   989  		}
   990  	}
   991  	return -1
   992  }
   993  
   994  func unlockFloat64Desc(preds [maxLevel]*float64NodeDesc, highestLevel int) {
   995  	var prevPred *float64NodeDesc
   996  	for i := highestLevel; i >= 0; i-- {
   997  		if preds[i] != prevPred { // the node could be unlocked by previous loop
   998  			preds[i].mu.Unlock()
   999  			prevPred = preds[i]
  1000  		}
  1001  	}
  1002  }
  1003  
  1004  // Add add the value into skip set, return true if this process insert the value into skip set,
  1005  // return false if this process can't insert this value, because another process has insert the same value.
  1006  //
  1007  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1008  func (s *Float64SetDesc) Add(value float64) bool {
  1009  	level := s.randomlevel()
  1010  	var preds, succs [maxLevel]*float64NodeDesc
  1011  	for {
  1012  		lFound := s.findNodeAdd(value, &preds, &succs)
  1013  		if lFound != -1 { // indicating the value is already in the skip-list
  1014  			nodeFound := succs[lFound]
  1015  			if !nodeFound.flags.Get(marked) {
  1016  				for !nodeFound.flags.Get(fullyLinked) {
  1017  					// The node is not yet fully linked, just waits until it is.
  1018  				}
  1019  				return false
  1020  			}
  1021  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1022  			// we need to add this node in next loop.
  1023  			continue
  1024  		}
  1025  		// Add this node into skip list.
  1026  		var (
  1027  			highestLocked        = -1 // the highest level being locked by this process
  1028  			valid                = true
  1029  			pred, succ, prevPred *float64NodeDesc
  1030  		)
  1031  		for layer := 0; valid && layer < level; layer++ {
  1032  			pred = preds[layer]   // target node's previous node
  1033  			succ = succs[layer]   // target node's next node
  1034  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1035  				pred.mu.Lock()
  1036  				highestLocked = layer
  1037  				prevPred = pred
  1038  			}
  1039  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1040  			// It is valid if:
  1041  			// 1. The previous node and next node both are not marked.
  1042  			// 2. The previous node's next node is succ in this layer.
  1043  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1044  		}
  1045  		if !valid {
  1046  			unlockFloat64Desc(preds, highestLocked)
  1047  			continue
  1048  		}
  1049  
  1050  		nn := newFloat64NodeDesc(value, level)
  1051  		for layer := 0; layer < level; layer++ {
  1052  			nn.storeNext(layer, succs[layer])
  1053  			preds[layer].atomicStoreNext(layer, nn)
  1054  		}
  1055  		nn.flags.SetTrue(fullyLinked)
  1056  		unlockFloat64Desc(preds, highestLocked)
  1057  		atomic.AddInt64(&s.length, 1)
  1058  		return true
  1059  	}
  1060  }
  1061  
  1062  func (s *Float64SetDesc) randomlevel() int {
  1063  	// Generate random level.
  1064  	level := randomLevel()
  1065  	// Update highest level if possible.
  1066  	for {
  1067  		hl := atomic.LoadInt64(&s.highestLevel)
  1068  		if int64(level) <= hl {
  1069  			break
  1070  		}
  1071  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1072  			break
  1073  		}
  1074  	}
  1075  	return level
  1076  }
  1077  
  1078  // Contains check if the value is in the skip set.
  1079  func (s *Float64SetDesc) Contains(value float64) bool {
  1080  	x := s.header
  1081  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1082  		nex := x.atomicLoadNext(i)
  1083  		for nex != nil && nex.lessthan(value) {
  1084  			x = nex
  1085  			nex = x.atomicLoadNext(i)
  1086  		}
  1087  
  1088  		// Check if the value already in the skip list.
  1089  		if nex != nil && nex.equal(value) {
  1090  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1091  		}
  1092  	}
  1093  	return false
  1094  }
  1095  
  1096  // Remove a node from the skip set.
  1097  func (s *Float64SetDesc) Remove(value float64) bool {
  1098  	var (
  1099  		nodeToRemove *float64NodeDesc
  1100  		isMarked     bool // represents if this operation mark the node
  1101  		topLayer     = -1
  1102  		preds, succs [maxLevel]*float64NodeDesc
  1103  	)
  1104  	for {
  1105  		lFound := s.findNodeRemove(value, &preds, &succs)
  1106  		if isMarked || // this process mark this node or we can find this node in the skip list
  1107  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1108  			if !isMarked { // we don't mark this node for now
  1109  				nodeToRemove = succs[lFound]
  1110  				topLayer = lFound
  1111  				nodeToRemove.mu.Lock()
  1112  				if nodeToRemove.flags.Get(marked) {
  1113  					// The node is marked by another process,
  1114  					// the physical deletion will be accomplished by another process.
  1115  					nodeToRemove.mu.Unlock()
  1116  					return false
  1117  				}
  1118  				nodeToRemove.flags.SetTrue(marked)
  1119  				isMarked = true
  1120  			}
  1121  			// Accomplish the physical deletion.
  1122  			var (
  1123  				highestLocked        = -1 // the highest level being locked by this process
  1124  				valid                = true
  1125  				pred, succ, prevPred *float64NodeDesc
  1126  			)
  1127  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1128  				pred, succ = preds[layer], succs[layer]
  1129  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1130  					pred.mu.Lock()
  1131  					highestLocked = layer
  1132  					prevPred = pred
  1133  				}
  1134  				// valid check if there is another node has inserted into the skip list in this layer
  1135  				// during this process, or the previous is removed by another process.
  1136  				// It is valid if:
  1137  				// 1. the previous node exists.
  1138  				// 2. no another node has inserted into the skip list in this layer.
  1139  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1140  			}
  1141  			if !valid {
  1142  				unlockFloat64Desc(preds, highestLocked)
  1143  				continue
  1144  			}
  1145  			for i := topLayer; i >= 0; i-- {
  1146  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  1147  				// So we don't need `nodeToRemove.loadNext`
  1148  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  1149  			}
  1150  			nodeToRemove.mu.Unlock()
  1151  			unlockFloat64Desc(preds, highestLocked)
  1152  			atomic.AddInt64(&s.length, -1)
  1153  			return true
  1154  		}
  1155  		return false
  1156  	}
  1157  }
  1158  
  1159  // Range calls f sequentially for each value present in the skip set.
  1160  // If f returns false, range stops the iteration.
  1161  func (s *Float64SetDesc) Range(f func(value float64) bool) {
  1162  	x := s.header.atomicLoadNext(0)
  1163  	for x != nil {
  1164  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1165  			x = x.atomicLoadNext(0)
  1166  			continue
  1167  		}
  1168  		if !f(x.value) {
  1169  			break
  1170  		}
  1171  		x = x.atomicLoadNext(0)
  1172  	}
  1173  }
  1174  
  1175  // Len return the length of this skip set.
  1176  func (s *Float64SetDesc) Len() int {
  1177  	return int(atomic.LoadInt64(&s.length))
  1178  }
  1179  
  1180  // Int32Set represents a set based on skip list in ascending order.
  1181  type Int32Set struct {
  1182  	header       *int32Node
  1183  	length       int64
  1184  	highestLevel int64 // highest level for now
  1185  }
  1186  
  1187  type int32Node struct {
  1188  	value int32
  1189  	next  optionalArray // [level]*int32Node
  1190  	mu    sync.Mutex
  1191  	flags bitflag
  1192  	level uint32
  1193  }
  1194  
  1195  func newInt32Node(value int32, level int) *int32Node {
  1196  	node := &int32Node{
  1197  		value: value,
  1198  		level: uint32(level),
  1199  	}
  1200  	if level > op1 {
  1201  		node.next.extra = new([op2]unsafe.Pointer)
  1202  	}
  1203  	return node
  1204  }
  1205  
  1206  func (n *int32Node) loadNext(i int) *int32Node {
  1207  	return (*int32Node)(n.next.load(i))
  1208  }
  1209  
  1210  func (n *int32Node) storeNext(i int, node *int32Node) {
  1211  	n.next.store(i, unsafe.Pointer(node))
  1212  }
  1213  
  1214  func (n *int32Node) atomicLoadNext(i int) *int32Node {
  1215  	return (*int32Node)(n.next.atomicLoad(i))
  1216  }
  1217  
  1218  func (n *int32Node) atomicStoreNext(i int, node *int32Node) {
  1219  	n.next.atomicStore(i, unsafe.Pointer(node))
  1220  }
  1221  
  1222  func (n *int32Node) lessthan(value int32) bool {
  1223  	return n.value < value
  1224  }
  1225  
  1226  func (n *int32Node) equal(value int32) bool {
  1227  	return n.value == value
  1228  }
  1229  
  1230  // NewInt32 return an empty int32 skip set in ascending order.
  1231  func NewInt32() *Int32Set {
  1232  	h := newInt32Node(0, maxLevel)
  1233  	h.flags.SetTrue(fullyLinked)
  1234  	return &Int32Set{
  1235  		header:       h,
  1236  		highestLevel: defaultHighestLevel,
  1237  	}
  1238  }
  1239  
  1240  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1241  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1242  func (s *Int32Set) findNodeRemove(value int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int {
  1243  	// lFound represents the index of the first layer at which it found a node.
  1244  	lFound, x := -1, s.header
  1245  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1246  		succ := x.atomicLoadNext(i)
  1247  		for succ != nil && succ.lessthan(value) {
  1248  			x = succ
  1249  			succ = x.atomicLoadNext(i)
  1250  		}
  1251  		preds[i] = x
  1252  		succs[i] = succ
  1253  
  1254  		// Check if the value already in the skip list.
  1255  		if lFound == -1 && succ != nil && succ.equal(value) {
  1256  			lFound = i
  1257  		}
  1258  	}
  1259  	return lFound
  1260  }
  1261  
  1262  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  1263  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1264  func (s *Int32Set) findNodeAdd(value int32, preds *[maxLevel]*int32Node, succs *[maxLevel]*int32Node) int {
  1265  	x := s.header
  1266  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1267  		succ := x.atomicLoadNext(i)
  1268  		for succ != nil && succ.lessthan(value) {
  1269  			x = succ
  1270  			succ = x.atomicLoadNext(i)
  1271  		}
  1272  		preds[i] = x
  1273  		succs[i] = succ
  1274  
  1275  		// Check if the value already in the skip list.
  1276  		if succ != nil && succ.equal(value) {
  1277  			return i
  1278  		}
  1279  	}
  1280  	return -1
  1281  }
  1282  
  1283  func unlockInt32(preds [maxLevel]*int32Node, highestLevel int) {
  1284  	var prevPred *int32Node
  1285  	for i := highestLevel; i >= 0; i-- {
  1286  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1287  			preds[i].mu.Unlock()
  1288  			prevPred = preds[i]
  1289  		}
  1290  	}
  1291  }
  1292  
  1293  // Add add the value into skip set, return true if this process insert the value into skip set,
  1294  // return false if this process can't insert this value, because another process has insert the same value.
  1295  //
  1296  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1297  func (s *Int32Set) Add(value int32) bool {
  1298  	level := s.randomlevel()
  1299  	var preds, succs [maxLevel]*int32Node
  1300  	for {
  1301  		lFound := s.findNodeAdd(value, &preds, &succs)
  1302  		if lFound != -1 { // indicating the value is already in the skip-list
  1303  			nodeFound := succs[lFound]
  1304  			if !nodeFound.flags.Get(marked) {
  1305  				for !nodeFound.flags.Get(fullyLinked) {
  1306  					// The node is not yet fully linked, just waits until it is.
  1307  				}
  1308  				return false
  1309  			}
  1310  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1311  			// we need to add this node in next loop.
  1312  			continue
  1313  		}
  1314  		// Add this node into skip list.
  1315  		var (
  1316  			highestLocked        = -1 // the highest level being locked by this process
  1317  			valid                = true
  1318  			pred, succ, prevPred *int32Node
  1319  		)
  1320  		for layer := 0; valid && layer < level; layer++ {
  1321  			pred = preds[layer]   // target node's previous node
  1322  			succ = succs[layer]   // target node's next node
  1323  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1324  				pred.mu.Lock()
  1325  				highestLocked = layer
  1326  				prevPred = pred
  1327  			}
  1328  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1329  			// It is valid if:
  1330  			// 1. The previous node and next node both are not marked.
  1331  			// 2. The previous node's next node is succ in this layer.
  1332  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1333  		}
  1334  		if !valid {
  1335  			unlockInt32(preds, highestLocked)
  1336  			continue
  1337  		}
  1338  
  1339  		nn := newInt32Node(value, level)
  1340  		for layer := 0; layer < level; layer++ {
  1341  			nn.storeNext(layer, succs[layer])
  1342  			preds[layer].atomicStoreNext(layer, nn)
  1343  		}
  1344  		nn.flags.SetTrue(fullyLinked)
  1345  		unlockInt32(preds, highestLocked)
  1346  		atomic.AddInt64(&s.length, 1)
  1347  		return true
  1348  	}
  1349  }
  1350  
  1351  func (s *Int32Set) randomlevel() int {
  1352  	// Generate random level.
  1353  	level := randomLevel()
  1354  	// Update highest level if possible.
  1355  	for {
  1356  		hl := atomic.LoadInt64(&s.highestLevel)
  1357  		if int64(level) <= hl {
  1358  			break
  1359  		}
  1360  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1361  			break
  1362  		}
  1363  	}
  1364  	return level
  1365  }
  1366  
  1367  // Contains check if the value is in the skip set.
  1368  func (s *Int32Set) Contains(value int32) bool {
  1369  	x := s.header
  1370  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1371  		nex := x.atomicLoadNext(i)
  1372  		for nex != nil && nex.lessthan(value) {
  1373  			x = nex
  1374  			nex = x.atomicLoadNext(i)
  1375  		}
  1376  
  1377  		// Check if the value already in the skip list.
  1378  		if nex != nil && nex.equal(value) {
  1379  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1380  		}
  1381  	}
  1382  	return false
  1383  }
  1384  
  1385  // Remove a node from the skip set.
  1386  func (s *Int32Set) Remove(value int32) bool {
  1387  	var (
  1388  		nodeToRemove *int32Node
  1389  		isMarked     bool // represents if this operation mark the node
  1390  		topLayer     = -1
  1391  		preds, succs [maxLevel]*int32Node
  1392  	)
  1393  	for {
  1394  		lFound := s.findNodeRemove(value, &preds, &succs)
  1395  		if isMarked || // this process mark this node or we can find this node in the skip list
  1396  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1397  			if !isMarked { // we don't mark this node for now
  1398  				nodeToRemove = succs[lFound]
  1399  				topLayer = lFound
  1400  				nodeToRemove.mu.Lock()
  1401  				if nodeToRemove.flags.Get(marked) {
  1402  					// The node is marked by another process,
  1403  					// the physical deletion will be accomplished by another process.
  1404  					nodeToRemove.mu.Unlock()
  1405  					return false
  1406  				}
  1407  				nodeToRemove.flags.SetTrue(marked)
  1408  				isMarked = true
  1409  			}
  1410  			// Accomplish the physical deletion.
  1411  			var (
  1412  				highestLocked        = -1 // the highest level being locked by this process
  1413  				valid                = true
  1414  				pred, succ, prevPred *int32Node
  1415  			)
  1416  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1417  				pred, succ = preds[layer], succs[layer]
  1418  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1419  					pred.mu.Lock()
  1420  					highestLocked = layer
  1421  					prevPred = pred
  1422  				}
  1423  				// valid check if there is another node has inserted into the skip list in this layer
  1424  				// during this process, or the previous is removed by another process.
  1425  				// It is valid if:
  1426  				// 1. the previous node exists.
  1427  				// 2. no another node has inserted into the skip list in this layer.
  1428  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1429  			}
  1430  			if !valid {
  1431  				unlockInt32(preds, highestLocked)
  1432  				continue
  1433  			}
  1434  			for i := topLayer; i >= 0; i-- {
  1435  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  1436  				// So we don't need `nodeToRemove.loadNext`
  1437  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  1438  			}
  1439  			nodeToRemove.mu.Unlock()
  1440  			unlockInt32(preds, highestLocked)
  1441  			atomic.AddInt64(&s.length, -1)
  1442  			return true
  1443  		}
  1444  		return false
  1445  	}
  1446  }
  1447  
  1448  // Range calls f sequentially for each value present in the skip set.
  1449  // If f returns false, range stops the iteration.
  1450  func (s *Int32Set) Range(f func(value int32) bool) {
  1451  	x := s.header.atomicLoadNext(0)
  1452  	for x != nil {
  1453  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1454  			x = x.atomicLoadNext(0)
  1455  			continue
  1456  		}
  1457  		if !f(x.value) {
  1458  			break
  1459  		}
  1460  		x = x.atomicLoadNext(0)
  1461  	}
  1462  }
  1463  
  1464  // Len return the length of this skip set.
  1465  func (s *Int32Set) Len() int {
  1466  	return int(atomic.LoadInt64(&s.length))
  1467  }
  1468  
  1469  // Int32SetDesc represents a set based on skip list in descending order.
  1470  type Int32SetDesc struct {
  1471  	header       *int32NodeDesc
  1472  	length       int64
  1473  	highestLevel int64 // highest level for now
  1474  }
  1475  
  1476  type int32NodeDesc struct {
  1477  	value int32
  1478  	next  optionalArray // [level]*int32NodeDesc
  1479  	mu    sync.Mutex
  1480  	flags bitflag
  1481  	level uint32
  1482  }
  1483  
  1484  func newInt32NodeDesc(value int32, level int) *int32NodeDesc {
  1485  	node := &int32NodeDesc{
  1486  		value: value,
  1487  		level: uint32(level),
  1488  	}
  1489  	if level > op1 {
  1490  		node.next.extra = new([op2]unsafe.Pointer)
  1491  	}
  1492  	return node
  1493  }
  1494  
  1495  func (n *int32NodeDesc) loadNext(i int) *int32NodeDesc {
  1496  	return (*int32NodeDesc)(n.next.load(i))
  1497  }
  1498  
  1499  func (n *int32NodeDesc) storeNext(i int, node *int32NodeDesc) {
  1500  	n.next.store(i, unsafe.Pointer(node))
  1501  }
  1502  
  1503  func (n *int32NodeDesc) atomicLoadNext(i int) *int32NodeDesc {
  1504  	return (*int32NodeDesc)(n.next.atomicLoad(i))
  1505  }
  1506  
  1507  func (n *int32NodeDesc) atomicStoreNext(i int, node *int32NodeDesc) {
  1508  	n.next.atomicStore(i, unsafe.Pointer(node))
  1509  }
  1510  
  1511  func (n *int32NodeDesc) lessthan(value int32) bool {
  1512  	return n.value > value
  1513  }
  1514  
  1515  func (n *int32NodeDesc) equal(value int32) bool {
  1516  	return n.value == value
  1517  }
  1518  
  1519  // NewInt32Desc return an empty int32 skip set in descending order.
  1520  func NewInt32Desc() *Int32SetDesc {
  1521  	h := newInt32NodeDesc(0, maxLevel)
  1522  	h.flags.SetTrue(fullyLinked)
  1523  	return &Int32SetDesc{
  1524  		header:       h,
  1525  		highestLevel: defaultHighestLevel,
  1526  	}
  1527  }
  1528  
  1529  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1530  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1531  func (s *Int32SetDesc) findNodeRemove(value int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int {
  1532  	// lFound represents the index of the first layer at which it found a node.
  1533  	lFound, x := -1, s.header
  1534  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1535  		succ := x.atomicLoadNext(i)
  1536  		for succ != nil && succ.lessthan(value) {
  1537  			x = succ
  1538  			succ = x.atomicLoadNext(i)
  1539  		}
  1540  		preds[i] = x
  1541  		succs[i] = succ
  1542  
  1543  		// Check if the value already in the skip list.
  1544  		if lFound == -1 && succ != nil && succ.equal(value) {
  1545  			lFound = i
  1546  		}
  1547  	}
  1548  	return lFound
  1549  }
  1550  
  1551  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  1552  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1553  func (s *Int32SetDesc) findNodeAdd(value int32, preds *[maxLevel]*int32NodeDesc, succs *[maxLevel]*int32NodeDesc) int {
  1554  	x := s.header
  1555  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1556  		succ := x.atomicLoadNext(i)
  1557  		for succ != nil && succ.lessthan(value) {
  1558  			x = succ
  1559  			succ = x.atomicLoadNext(i)
  1560  		}
  1561  		preds[i] = x
  1562  		succs[i] = succ
  1563  
  1564  		// Check if the value already in the skip list.
  1565  		if succ != nil && succ.equal(value) {
  1566  			return i
  1567  		}
  1568  	}
  1569  	return -1
  1570  }
  1571  
  1572  func unlockInt32Desc(preds [maxLevel]*int32NodeDesc, highestLevel int) {
  1573  	var prevPred *int32NodeDesc
  1574  	for i := highestLevel; i >= 0; i-- {
  1575  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1576  			preds[i].mu.Unlock()
  1577  			prevPred = preds[i]
  1578  		}
  1579  	}
  1580  }
  1581  
  1582  // Add add the value into skip set, return true if this process insert the value into skip set,
  1583  // return false if this process can't insert this value, because another process has insert the same value.
  1584  //
  1585  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1586  func (s *Int32SetDesc) Add(value int32) bool {
  1587  	level := s.randomlevel()
  1588  	var preds, succs [maxLevel]*int32NodeDesc
  1589  	for {
  1590  		lFound := s.findNodeAdd(value, &preds, &succs)
  1591  		if lFound != -1 { // indicating the value is already in the skip-list
  1592  			nodeFound := succs[lFound]
  1593  			if !nodeFound.flags.Get(marked) {
  1594  				for !nodeFound.flags.Get(fullyLinked) {
  1595  					// The node is not yet fully linked, just waits until it is.
  1596  				}
  1597  				return false
  1598  			}
  1599  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1600  			// we need to add this node in next loop.
  1601  			continue
  1602  		}
  1603  		// Add this node into skip list.
  1604  		var (
  1605  			highestLocked        = -1 // the highest level being locked by this process
  1606  			valid                = true
  1607  			pred, succ, prevPred *int32NodeDesc
  1608  		)
  1609  		for layer := 0; valid && layer < level; layer++ {
  1610  			pred = preds[layer]   // target node's previous node
  1611  			succ = succs[layer]   // target node's next node
  1612  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1613  				pred.mu.Lock()
  1614  				highestLocked = layer
  1615  				prevPred = pred
  1616  			}
  1617  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1618  			// It is valid if:
  1619  			// 1. The previous node and next node both are not marked.
  1620  			// 2. The previous node's next node is succ in this layer.
  1621  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1622  		}
  1623  		if !valid {
  1624  			unlockInt32Desc(preds, highestLocked)
  1625  			continue
  1626  		}
  1627  
  1628  		nn := newInt32NodeDesc(value, level)
  1629  		for layer := 0; layer < level; layer++ {
  1630  			nn.storeNext(layer, succs[layer])
  1631  			preds[layer].atomicStoreNext(layer, nn)
  1632  		}
  1633  		nn.flags.SetTrue(fullyLinked)
  1634  		unlockInt32Desc(preds, highestLocked)
  1635  		atomic.AddInt64(&s.length, 1)
  1636  		return true
  1637  	}
  1638  }
  1639  
  1640  func (s *Int32SetDesc) randomlevel() int {
  1641  	// Generate random level.
  1642  	level := randomLevel()
  1643  	// Update highest level if possible.
  1644  	for {
  1645  		hl := atomic.LoadInt64(&s.highestLevel)
  1646  		if int64(level) <= hl {
  1647  			break
  1648  		}
  1649  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1650  			break
  1651  		}
  1652  	}
  1653  	return level
  1654  }
  1655  
  1656  // Contains check if the value is in the skip set.
  1657  func (s *Int32SetDesc) Contains(value int32) bool {
  1658  	x := s.header
  1659  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1660  		nex := x.atomicLoadNext(i)
  1661  		for nex != nil && nex.lessthan(value) {
  1662  			x = nex
  1663  			nex = x.atomicLoadNext(i)
  1664  		}
  1665  
  1666  		// Check if the value already in the skip list.
  1667  		if nex != nil && nex.equal(value) {
  1668  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1669  		}
  1670  	}
  1671  	return false
  1672  }
  1673  
  1674  // Remove a node from the skip set.
  1675  func (s *Int32SetDesc) Remove(value int32) bool {
  1676  	var (
  1677  		nodeToRemove *int32NodeDesc
  1678  		isMarked     bool // represents if this operation mark the node
  1679  		topLayer     = -1
  1680  		preds, succs [maxLevel]*int32NodeDesc
  1681  	)
  1682  	for {
  1683  		lFound := s.findNodeRemove(value, &preds, &succs)
  1684  		if isMarked || // this process mark this node or we can find this node in the skip list
  1685  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1686  			if !isMarked { // we don't mark this node for now
  1687  				nodeToRemove = succs[lFound]
  1688  				topLayer = lFound
  1689  				nodeToRemove.mu.Lock()
  1690  				if nodeToRemove.flags.Get(marked) {
  1691  					// The node is marked by another process,
  1692  					// the physical deletion will be accomplished by another process.
  1693  					nodeToRemove.mu.Unlock()
  1694  					return false
  1695  				}
  1696  				nodeToRemove.flags.SetTrue(marked)
  1697  				isMarked = true
  1698  			}
  1699  			// Accomplish the physical deletion.
  1700  			var (
  1701  				highestLocked        = -1 // the highest level being locked by this process
  1702  				valid                = true
  1703  				pred, succ, prevPred *int32NodeDesc
  1704  			)
  1705  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1706  				pred, succ = preds[layer], succs[layer]
  1707  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1708  					pred.mu.Lock()
  1709  					highestLocked = layer
  1710  					prevPred = pred
  1711  				}
  1712  				// valid check if there is another node has inserted into the skip list in this layer
  1713  				// during this process, or the previous is removed by another process.
  1714  				// It is valid if:
  1715  				// 1. the previous node exists.
  1716  				// 2. no another node has inserted into the skip list in this layer.
  1717  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  1718  			}
  1719  			if !valid {
  1720  				unlockInt32Desc(preds, highestLocked)
  1721  				continue
  1722  			}
  1723  			for i := topLayer; i >= 0; i-- {
  1724  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  1725  				// So we don't need `nodeToRemove.loadNext`
  1726  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  1727  			}
  1728  			nodeToRemove.mu.Unlock()
  1729  			unlockInt32Desc(preds, highestLocked)
  1730  			atomic.AddInt64(&s.length, -1)
  1731  			return true
  1732  		}
  1733  		return false
  1734  	}
  1735  }
  1736  
  1737  // Range calls f sequentially for each value present in the skip set.
  1738  // If f returns false, range stops the iteration.
  1739  func (s *Int32SetDesc) Range(f func(value int32) bool) {
  1740  	x := s.header.atomicLoadNext(0)
  1741  	for x != nil {
  1742  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  1743  			x = x.atomicLoadNext(0)
  1744  			continue
  1745  		}
  1746  		if !f(x.value) {
  1747  			break
  1748  		}
  1749  		x = x.atomicLoadNext(0)
  1750  	}
  1751  }
  1752  
  1753  // Len return the length of this skip set.
  1754  func (s *Int32SetDesc) Len() int {
  1755  	return int(atomic.LoadInt64(&s.length))
  1756  }
  1757  
  1758  // Int16Set represents a set based on skip list in ascending order.
  1759  type Int16Set struct {
  1760  	header       *int16Node
  1761  	length       int64
  1762  	highestLevel int64 // highest level for now
  1763  }
  1764  
  1765  type int16Node struct {
  1766  	value int16
  1767  	next  optionalArray // [level]*int16Node
  1768  	mu    sync.Mutex
  1769  	flags bitflag
  1770  	level uint32
  1771  }
  1772  
  1773  func newInt16Node(value int16, level int) *int16Node {
  1774  	node := &int16Node{
  1775  		value: value,
  1776  		level: uint32(level),
  1777  	}
  1778  	if level > op1 {
  1779  		node.next.extra = new([op2]unsafe.Pointer)
  1780  	}
  1781  	return node
  1782  }
  1783  
  1784  func (n *int16Node) loadNext(i int) *int16Node {
  1785  	return (*int16Node)(n.next.load(i))
  1786  }
  1787  
  1788  func (n *int16Node) storeNext(i int, node *int16Node) {
  1789  	n.next.store(i, unsafe.Pointer(node))
  1790  }
  1791  
  1792  func (n *int16Node) atomicLoadNext(i int) *int16Node {
  1793  	return (*int16Node)(n.next.atomicLoad(i))
  1794  }
  1795  
  1796  func (n *int16Node) atomicStoreNext(i int, node *int16Node) {
  1797  	n.next.atomicStore(i, unsafe.Pointer(node))
  1798  }
  1799  
  1800  func (n *int16Node) lessthan(value int16) bool {
  1801  	return n.value < value
  1802  }
  1803  
  1804  func (n *int16Node) equal(value int16) bool {
  1805  	return n.value == value
  1806  }
  1807  
  1808  // NewInt16 return an empty int16 skip set in ascending order.
  1809  func NewInt16() *Int16Set {
  1810  	h := newInt16Node(0, maxLevel)
  1811  	h.flags.SetTrue(fullyLinked)
  1812  	return &Int16Set{
  1813  		header:       h,
  1814  		highestLevel: defaultHighestLevel,
  1815  	}
  1816  }
  1817  
  1818  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  1819  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1820  func (s *Int16Set) findNodeRemove(value int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int {
  1821  	// lFound represents the index of the first layer at which it found a node.
  1822  	lFound, x := -1, s.header
  1823  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1824  		succ := x.atomicLoadNext(i)
  1825  		for succ != nil && succ.lessthan(value) {
  1826  			x = succ
  1827  			succ = x.atomicLoadNext(i)
  1828  		}
  1829  		preds[i] = x
  1830  		succs[i] = succ
  1831  
  1832  		// Check if the value already in the skip list.
  1833  		if lFound == -1 && succ != nil && succ.equal(value) {
  1834  			lFound = i
  1835  		}
  1836  	}
  1837  	return lFound
  1838  }
  1839  
  1840  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  1841  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  1842  func (s *Int16Set) findNodeAdd(value int16, preds *[maxLevel]*int16Node, succs *[maxLevel]*int16Node) int {
  1843  	x := s.header
  1844  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1845  		succ := x.atomicLoadNext(i)
  1846  		for succ != nil && succ.lessthan(value) {
  1847  			x = succ
  1848  			succ = x.atomicLoadNext(i)
  1849  		}
  1850  		preds[i] = x
  1851  		succs[i] = succ
  1852  
  1853  		// Check if the value already in the skip list.
  1854  		if succ != nil && succ.equal(value) {
  1855  			return i
  1856  		}
  1857  	}
  1858  	return -1
  1859  }
  1860  
  1861  func unlockInt16(preds [maxLevel]*int16Node, highestLevel int) {
  1862  	var prevPred *int16Node
  1863  	for i := highestLevel; i >= 0; i-- {
  1864  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  1865  			preds[i].mu.Unlock()
  1866  			prevPred = preds[i]
  1867  		}
  1868  	}
  1869  }
  1870  
  1871  // Add add the value into skip set, return true if this process insert the value into skip set,
  1872  // return false if this process can't insert this value, because another process has insert the same value.
  1873  //
  1874  // If the value is in the skip set but not fully linked, this process will wait until it is.
  1875  func (s *Int16Set) Add(value int16) bool {
  1876  	level := s.randomlevel()
  1877  	var preds, succs [maxLevel]*int16Node
  1878  	for {
  1879  		lFound := s.findNodeAdd(value, &preds, &succs)
  1880  		if lFound != -1 { // indicating the value is already in the skip-list
  1881  			nodeFound := succs[lFound]
  1882  			if !nodeFound.flags.Get(marked) {
  1883  				for !nodeFound.flags.Get(fullyLinked) {
  1884  					// The node is not yet fully linked, just waits until it is.
  1885  				}
  1886  				return false
  1887  			}
  1888  			// If the node is marked, represents some other thread is in the process of deleting this node,
  1889  			// we need to add this node in next loop.
  1890  			continue
  1891  		}
  1892  		// Add this node into skip list.
  1893  		var (
  1894  			highestLocked        = -1 // the highest level being locked by this process
  1895  			valid                = true
  1896  			pred, succ, prevPred *int16Node
  1897  		)
  1898  		for layer := 0; valid && layer < level; layer++ {
  1899  			pred = preds[layer]   // target node's previous node
  1900  			succ = succs[layer]   // target node's next node
  1901  			if pred != prevPred { // the node in this layer could be locked by previous loop
  1902  				pred.mu.Lock()
  1903  				highestLocked = layer
  1904  				prevPred = pred
  1905  			}
  1906  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  1907  			// It is valid if:
  1908  			// 1. The previous node and next node both are not marked.
  1909  			// 2. The previous node's next node is succ in this layer.
  1910  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  1911  		}
  1912  		if !valid {
  1913  			unlockInt16(preds, highestLocked)
  1914  			continue
  1915  		}
  1916  
  1917  		nn := newInt16Node(value, level)
  1918  		for layer := 0; layer < level; layer++ {
  1919  			nn.storeNext(layer, succs[layer])
  1920  			preds[layer].atomicStoreNext(layer, nn)
  1921  		}
  1922  		nn.flags.SetTrue(fullyLinked)
  1923  		unlockInt16(preds, highestLocked)
  1924  		atomic.AddInt64(&s.length, 1)
  1925  		return true
  1926  	}
  1927  }
  1928  
  1929  func (s *Int16Set) randomlevel() int {
  1930  	// Generate random level.
  1931  	level := randomLevel()
  1932  	// Update highest level if possible.
  1933  	for {
  1934  		hl := atomic.LoadInt64(&s.highestLevel)
  1935  		if int64(level) <= hl {
  1936  			break
  1937  		}
  1938  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  1939  			break
  1940  		}
  1941  	}
  1942  	return level
  1943  }
  1944  
  1945  // Contains check if the value is in the skip set.
  1946  func (s *Int16Set) Contains(value int16) bool {
  1947  	x := s.header
  1948  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  1949  		nex := x.atomicLoadNext(i)
  1950  		for nex != nil && nex.lessthan(value) {
  1951  			x = nex
  1952  			nex = x.atomicLoadNext(i)
  1953  		}
  1954  
  1955  		// Check if the value already in the skip list.
  1956  		if nex != nil && nex.equal(value) {
  1957  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  1958  		}
  1959  	}
  1960  	return false
  1961  }
  1962  
  1963  // Remove a node from the skip set.
  1964  func (s *Int16Set) Remove(value int16) bool {
  1965  	var (
  1966  		nodeToRemove *int16Node
  1967  		isMarked     bool // represents if this operation mark the node
  1968  		topLayer     = -1
  1969  		preds, succs [maxLevel]*int16Node
  1970  	)
  1971  	for {
  1972  		lFound := s.findNodeRemove(value, &preds, &succs)
  1973  		if isMarked || // this process mark this node or we can find this node in the skip list
  1974  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  1975  			if !isMarked { // we don't mark this node for now
  1976  				nodeToRemove = succs[lFound]
  1977  				topLayer = lFound
  1978  				nodeToRemove.mu.Lock()
  1979  				if nodeToRemove.flags.Get(marked) {
  1980  					// The node is marked by another process,
  1981  					// the physical deletion will be accomplished by another process.
  1982  					nodeToRemove.mu.Unlock()
  1983  					return false
  1984  				}
  1985  				nodeToRemove.flags.SetTrue(marked)
  1986  				isMarked = true
  1987  			}
  1988  			// Accomplish the physical deletion.
  1989  			var (
  1990  				highestLocked        = -1 // the highest level being locked by this process
  1991  				valid                = true
  1992  				pred, succ, prevPred *int16Node
  1993  			)
  1994  			for layer := 0; valid && (layer <= topLayer); layer++ {
  1995  				pred, succ = preds[layer], succs[layer]
  1996  				if pred != prevPred { // the node in this layer could be locked by previous loop
  1997  					pred.mu.Lock()
  1998  					highestLocked = layer
  1999  					prevPred = pred
  2000  				}
  2001  				// valid check if there is another node has inserted into the skip list in this layer
  2002  				// during this process, or the previous is removed by another process.
  2003  				// It is valid if:
  2004  				// 1. the previous node exists.
  2005  				// 2. no another node has inserted into the skip list in this layer.
  2006  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2007  			}
  2008  			if !valid {
  2009  				unlockInt16(preds, highestLocked)
  2010  				continue
  2011  			}
  2012  			for i := topLayer; i >= 0; i-- {
  2013  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2014  				// So we don't need `nodeToRemove.loadNext`
  2015  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2016  			}
  2017  			nodeToRemove.mu.Unlock()
  2018  			unlockInt16(preds, highestLocked)
  2019  			atomic.AddInt64(&s.length, -1)
  2020  			return true
  2021  		}
  2022  		return false
  2023  	}
  2024  }
  2025  
  2026  // Range calls f sequentially for each value present in the skip set.
  2027  // If f returns false, range stops the iteration.
  2028  func (s *Int16Set) Range(f func(value int16) bool) {
  2029  	x := s.header.atomicLoadNext(0)
  2030  	for x != nil {
  2031  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2032  			x = x.atomicLoadNext(0)
  2033  			continue
  2034  		}
  2035  		if !f(x.value) {
  2036  			break
  2037  		}
  2038  		x = x.atomicLoadNext(0)
  2039  	}
  2040  }
  2041  
  2042  // Len return the length of this skip set.
  2043  func (s *Int16Set) Len() int {
  2044  	return int(atomic.LoadInt64(&s.length))
  2045  }
  2046  
  2047  // Int16SetDesc represents a set based on skip list in descending order.
  2048  type Int16SetDesc struct {
  2049  	header       *int16NodeDesc
  2050  	length       int64
  2051  	highestLevel int64 // highest level for now
  2052  }
  2053  
  2054  type int16NodeDesc struct {
  2055  	value int16
  2056  	next  optionalArray // [level]*int16NodeDesc
  2057  	mu    sync.Mutex
  2058  	flags bitflag
  2059  	level uint32
  2060  }
  2061  
  2062  func newInt16NodeDesc(value int16, level int) *int16NodeDesc {
  2063  	node := &int16NodeDesc{
  2064  		value: value,
  2065  		level: uint32(level),
  2066  	}
  2067  	if level > op1 {
  2068  		node.next.extra = new([op2]unsafe.Pointer)
  2069  	}
  2070  	return node
  2071  }
  2072  
  2073  func (n *int16NodeDesc) loadNext(i int) *int16NodeDesc {
  2074  	return (*int16NodeDesc)(n.next.load(i))
  2075  }
  2076  
  2077  func (n *int16NodeDesc) storeNext(i int, node *int16NodeDesc) {
  2078  	n.next.store(i, unsafe.Pointer(node))
  2079  }
  2080  
  2081  func (n *int16NodeDesc) atomicLoadNext(i int) *int16NodeDesc {
  2082  	return (*int16NodeDesc)(n.next.atomicLoad(i))
  2083  }
  2084  
  2085  func (n *int16NodeDesc) atomicStoreNext(i int, node *int16NodeDesc) {
  2086  	n.next.atomicStore(i, unsafe.Pointer(node))
  2087  }
  2088  
  2089  func (n *int16NodeDesc) lessthan(value int16) bool {
  2090  	return n.value > value
  2091  }
  2092  
  2093  func (n *int16NodeDesc) equal(value int16) bool {
  2094  	return n.value == value
  2095  }
  2096  
  2097  // NewInt16Desc return an empty int16 skip set in descending order.
  2098  func NewInt16Desc() *Int16SetDesc {
  2099  	h := newInt16NodeDesc(0, maxLevel)
  2100  	h.flags.SetTrue(fullyLinked)
  2101  	return &Int16SetDesc{
  2102  		header:       h,
  2103  		highestLevel: defaultHighestLevel,
  2104  	}
  2105  }
  2106  
  2107  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2108  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2109  func (s *Int16SetDesc) findNodeRemove(value int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int {
  2110  	// lFound represents the index of the first layer at which it found a node.
  2111  	lFound, x := -1, s.header
  2112  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2113  		succ := x.atomicLoadNext(i)
  2114  		for succ != nil && succ.lessthan(value) {
  2115  			x = succ
  2116  			succ = x.atomicLoadNext(i)
  2117  		}
  2118  		preds[i] = x
  2119  		succs[i] = succ
  2120  
  2121  		// Check if the value already in the skip list.
  2122  		if lFound == -1 && succ != nil && succ.equal(value) {
  2123  			lFound = i
  2124  		}
  2125  	}
  2126  	return lFound
  2127  }
  2128  
  2129  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2130  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2131  func (s *Int16SetDesc) findNodeAdd(value int16, preds *[maxLevel]*int16NodeDesc, succs *[maxLevel]*int16NodeDesc) int {
  2132  	x := s.header
  2133  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2134  		succ := x.atomicLoadNext(i)
  2135  		for succ != nil && succ.lessthan(value) {
  2136  			x = succ
  2137  			succ = x.atomicLoadNext(i)
  2138  		}
  2139  		preds[i] = x
  2140  		succs[i] = succ
  2141  
  2142  		// Check if the value already in the skip list.
  2143  		if succ != nil && succ.equal(value) {
  2144  			return i
  2145  		}
  2146  	}
  2147  	return -1
  2148  }
  2149  
  2150  func unlockInt16Desc(preds [maxLevel]*int16NodeDesc, highestLevel int) {
  2151  	var prevPred *int16NodeDesc
  2152  	for i := highestLevel; i >= 0; i-- {
  2153  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2154  			preds[i].mu.Unlock()
  2155  			prevPred = preds[i]
  2156  		}
  2157  	}
  2158  }
  2159  
  2160  // Add add the value into skip set, return true if this process insert the value into skip set,
  2161  // return false if this process can't insert this value, because another process has insert the same value.
  2162  //
  2163  // If the value is in the skip set but not fully linked, this process will wait until it is.
  2164  func (s *Int16SetDesc) Add(value int16) bool {
  2165  	level := s.randomlevel()
  2166  	var preds, succs [maxLevel]*int16NodeDesc
  2167  	for {
  2168  		lFound := s.findNodeAdd(value, &preds, &succs)
  2169  		if lFound != -1 { // indicating the value is already in the skip-list
  2170  			nodeFound := succs[lFound]
  2171  			if !nodeFound.flags.Get(marked) {
  2172  				for !nodeFound.flags.Get(fullyLinked) {
  2173  					// The node is not yet fully linked, just waits until it is.
  2174  				}
  2175  				return false
  2176  			}
  2177  			// If the node is marked, represents some other thread is in the process of deleting this node,
  2178  			// we need to add this node in next loop.
  2179  			continue
  2180  		}
  2181  		// Add this node into skip list.
  2182  		var (
  2183  			highestLocked        = -1 // the highest level being locked by this process
  2184  			valid                = true
  2185  			pred, succ, prevPred *int16NodeDesc
  2186  		)
  2187  		for layer := 0; valid && layer < level; layer++ {
  2188  			pred = preds[layer]   // target node's previous node
  2189  			succ = succs[layer]   // target node's next node
  2190  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2191  				pred.mu.Lock()
  2192  				highestLocked = layer
  2193  				prevPred = pred
  2194  			}
  2195  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2196  			// It is valid if:
  2197  			// 1. The previous node and next node both are not marked.
  2198  			// 2. The previous node's next node is succ in this layer.
  2199  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2200  		}
  2201  		if !valid {
  2202  			unlockInt16Desc(preds, highestLocked)
  2203  			continue
  2204  		}
  2205  
  2206  		nn := newInt16NodeDesc(value, level)
  2207  		for layer := 0; layer < level; layer++ {
  2208  			nn.storeNext(layer, succs[layer])
  2209  			preds[layer].atomicStoreNext(layer, nn)
  2210  		}
  2211  		nn.flags.SetTrue(fullyLinked)
  2212  		unlockInt16Desc(preds, highestLocked)
  2213  		atomic.AddInt64(&s.length, 1)
  2214  		return true
  2215  	}
  2216  }
  2217  
  2218  func (s *Int16SetDesc) randomlevel() int {
  2219  	// Generate random level.
  2220  	level := randomLevel()
  2221  	// Update highest level if possible.
  2222  	for {
  2223  		hl := atomic.LoadInt64(&s.highestLevel)
  2224  		if int64(level) <= hl {
  2225  			break
  2226  		}
  2227  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2228  			break
  2229  		}
  2230  	}
  2231  	return level
  2232  }
  2233  
  2234  // Contains check if the value is in the skip set.
  2235  func (s *Int16SetDesc) Contains(value int16) bool {
  2236  	x := s.header
  2237  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2238  		nex := x.atomicLoadNext(i)
  2239  		for nex != nil && nex.lessthan(value) {
  2240  			x = nex
  2241  			nex = x.atomicLoadNext(i)
  2242  		}
  2243  
  2244  		// Check if the value already in the skip list.
  2245  		if nex != nil && nex.equal(value) {
  2246  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  2247  		}
  2248  	}
  2249  	return false
  2250  }
  2251  
  2252  // Remove a node from the skip set.
  2253  func (s *Int16SetDesc) Remove(value int16) bool {
  2254  	var (
  2255  		nodeToRemove *int16NodeDesc
  2256  		isMarked     bool // represents if this operation mark the node
  2257  		topLayer     = -1
  2258  		preds, succs [maxLevel]*int16NodeDesc
  2259  	)
  2260  	for {
  2261  		lFound := s.findNodeRemove(value, &preds, &succs)
  2262  		if isMarked || // this process mark this node or we can find this node in the skip list
  2263  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2264  			if !isMarked { // we don't mark this node for now
  2265  				nodeToRemove = succs[lFound]
  2266  				topLayer = lFound
  2267  				nodeToRemove.mu.Lock()
  2268  				if nodeToRemove.flags.Get(marked) {
  2269  					// The node is marked by another process,
  2270  					// the physical deletion will be accomplished by another process.
  2271  					nodeToRemove.mu.Unlock()
  2272  					return false
  2273  				}
  2274  				nodeToRemove.flags.SetTrue(marked)
  2275  				isMarked = true
  2276  			}
  2277  			// Accomplish the physical deletion.
  2278  			var (
  2279  				highestLocked        = -1 // the highest level being locked by this process
  2280  				valid                = true
  2281  				pred, succ, prevPred *int16NodeDesc
  2282  			)
  2283  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2284  				pred, succ = preds[layer], succs[layer]
  2285  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2286  					pred.mu.Lock()
  2287  					highestLocked = layer
  2288  					prevPred = pred
  2289  				}
  2290  				// valid check if there is another node has inserted into the skip list in this layer
  2291  				// during this process, or the previous is removed by another process.
  2292  				// It is valid if:
  2293  				// 1. the previous node exists.
  2294  				// 2. no another node has inserted into the skip list in this layer.
  2295  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2296  			}
  2297  			if !valid {
  2298  				unlockInt16Desc(preds, highestLocked)
  2299  				continue
  2300  			}
  2301  			for i := topLayer; i >= 0; i-- {
  2302  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2303  				// So we don't need `nodeToRemove.loadNext`
  2304  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2305  			}
  2306  			nodeToRemove.mu.Unlock()
  2307  			unlockInt16Desc(preds, highestLocked)
  2308  			atomic.AddInt64(&s.length, -1)
  2309  			return true
  2310  		}
  2311  		return false
  2312  	}
  2313  }
  2314  
  2315  // Range calls f sequentially for each value present in the skip set.
  2316  // If f returns false, range stops the iteration.
  2317  func (s *Int16SetDesc) Range(f func(value int16) bool) {
  2318  	x := s.header.atomicLoadNext(0)
  2319  	for x != nil {
  2320  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2321  			x = x.atomicLoadNext(0)
  2322  			continue
  2323  		}
  2324  		if !f(x.value) {
  2325  			break
  2326  		}
  2327  		x = x.atomicLoadNext(0)
  2328  	}
  2329  }
  2330  
  2331  // Len return the length of this skip set.
  2332  func (s *Int16SetDesc) Len() int {
  2333  	return int(atomic.LoadInt64(&s.length))
  2334  }
  2335  
  2336  // IntSet represents a set based on skip list in ascending order.
  2337  type IntSet struct {
  2338  	header       *intNode
  2339  	length       int64
  2340  	highestLevel int64 // highest level for now
  2341  }
  2342  
  2343  type intNode struct {
  2344  	value int
  2345  	next  optionalArray // [level]*intNode
  2346  	mu    sync.Mutex
  2347  	flags bitflag
  2348  	level uint32
  2349  }
  2350  
  2351  func newIntNode(value int, level int) *intNode {
  2352  	node := &intNode{
  2353  		value: value,
  2354  		level: uint32(level),
  2355  	}
  2356  	if level > op1 {
  2357  		node.next.extra = new([op2]unsafe.Pointer)
  2358  	}
  2359  	return node
  2360  }
  2361  
  2362  func (n *intNode) loadNext(i int) *intNode {
  2363  	return (*intNode)(n.next.load(i))
  2364  }
  2365  
  2366  func (n *intNode) storeNext(i int, node *intNode) {
  2367  	n.next.store(i, unsafe.Pointer(node))
  2368  }
  2369  
  2370  func (n *intNode) atomicLoadNext(i int) *intNode {
  2371  	return (*intNode)(n.next.atomicLoad(i))
  2372  }
  2373  
  2374  func (n *intNode) atomicStoreNext(i int, node *intNode) {
  2375  	n.next.atomicStore(i, unsafe.Pointer(node))
  2376  }
  2377  
  2378  func (n *intNode) lessthan(value int) bool {
  2379  	return n.value < value
  2380  }
  2381  
  2382  func (n *intNode) equal(value int) bool {
  2383  	return n.value == value
  2384  }
  2385  
  2386  // NewInt return an empty int skip set in ascending order.
  2387  func NewInt() *IntSet {
  2388  	h := newIntNode(0, maxLevel)
  2389  	h.flags.SetTrue(fullyLinked)
  2390  	return &IntSet{
  2391  		header:       h,
  2392  		highestLevel: defaultHighestLevel,
  2393  	}
  2394  }
  2395  
  2396  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2397  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2398  func (s *IntSet) findNodeRemove(value int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int {
  2399  	// lFound represents the index of the first layer at which it found a node.
  2400  	lFound, x := -1, s.header
  2401  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2402  		succ := x.atomicLoadNext(i)
  2403  		for succ != nil && succ.lessthan(value) {
  2404  			x = succ
  2405  			succ = x.atomicLoadNext(i)
  2406  		}
  2407  		preds[i] = x
  2408  		succs[i] = succ
  2409  
  2410  		// Check if the value already in the skip list.
  2411  		if lFound == -1 && succ != nil && succ.equal(value) {
  2412  			lFound = i
  2413  		}
  2414  	}
  2415  	return lFound
  2416  }
  2417  
  2418  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2419  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2420  func (s *IntSet) findNodeAdd(value int, preds *[maxLevel]*intNode, succs *[maxLevel]*intNode) int {
  2421  	x := s.header
  2422  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2423  		succ := x.atomicLoadNext(i)
  2424  		for succ != nil && succ.lessthan(value) {
  2425  			x = succ
  2426  			succ = x.atomicLoadNext(i)
  2427  		}
  2428  		preds[i] = x
  2429  		succs[i] = succ
  2430  
  2431  		// Check if the value already in the skip list.
  2432  		if succ != nil && succ.equal(value) {
  2433  			return i
  2434  		}
  2435  	}
  2436  	return -1
  2437  }
  2438  
  2439  func unlockInt(preds [maxLevel]*intNode, highestLevel int) {
  2440  	var prevPred *intNode
  2441  	for i := highestLevel; i >= 0; i-- {
  2442  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2443  			preds[i].mu.Unlock()
  2444  			prevPred = preds[i]
  2445  		}
  2446  	}
  2447  }
  2448  
  2449  // Add add the value into skip set, return true if this process insert the value into skip set,
  2450  // return false if this process can't insert this value, because another process has insert the same value.
  2451  //
  2452  // If the value is in the skip set but not fully linked, this process will wait until it is.
  2453  func (s *IntSet) Add(value int) bool {
  2454  	level := s.randomlevel()
  2455  	var preds, succs [maxLevel]*intNode
  2456  	for {
  2457  		lFound := s.findNodeAdd(value, &preds, &succs)
  2458  		if lFound != -1 { // indicating the value is already in the skip-list
  2459  			nodeFound := succs[lFound]
  2460  			if !nodeFound.flags.Get(marked) {
  2461  				for !nodeFound.flags.Get(fullyLinked) {
  2462  					// The node is not yet fully linked, just waits until it is.
  2463  				}
  2464  				return false
  2465  			}
  2466  			// If the node is marked, represents some other thread is in the process of deleting this node,
  2467  			// we need to add this node in next loop.
  2468  			continue
  2469  		}
  2470  		// Add this node into skip list.
  2471  		var (
  2472  			highestLocked        = -1 // the highest level being locked by this process
  2473  			valid                = true
  2474  			pred, succ, prevPred *intNode
  2475  		)
  2476  		for layer := 0; valid && layer < level; layer++ {
  2477  			pred = preds[layer]   // target node's previous node
  2478  			succ = succs[layer]   // target node's next node
  2479  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2480  				pred.mu.Lock()
  2481  				highestLocked = layer
  2482  				prevPred = pred
  2483  			}
  2484  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2485  			// It is valid if:
  2486  			// 1. The previous node and next node both are not marked.
  2487  			// 2. The previous node's next node is succ in this layer.
  2488  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2489  		}
  2490  		if !valid {
  2491  			unlockInt(preds, highestLocked)
  2492  			continue
  2493  		}
  2494  
  2495  		nn := newIntNode(value, level)
  2496  		for layer := 0; layer < level; layer++ {
  2497  			nn.storeNext(layer, succs[layer])
  2498  			preds[layer].atomicStoreNext(layer, nn)
  2499  		}
  2500  		nn.flags.SetTrue(fullyLinked)
  2501  		unlockInt(preds, highestLocked)
  2502  		atomic.AddInt64(&s.length, 1)
  2503  		return true
  2504  	}
  2505  }
  2506  
  2507  func (s *IntSet) randomlevel() int {
  2508  	// Generate random level.
  2509  	level := randomLevel()
  2510  	// Update highest level if possible.
  2511  	for {
  2512  		hl := atomic.LoadInt64(&s.highestLevel)
  2513  		if int64(level) <= hl {
  2514  			break
  2515  		}
  2516  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2517  			break
  2518  		}
  2519  	}
  2520  	return level
  2521  }
  2522  
  2523  // Contains check if the value is in the skip set.
  2524  func (s *IntSet) Contains(value int) bool {
  2525  	x := s.header
  2526  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2527  		nex := x.atomicLoadNext(i)
  2528  		for nex != nil && nex.lessthan(value) {
  2529  			x = nex
  2530  			nex = x.atomicLoadNext(i)
  2531  		}
  2532  
  2533  		// Check if the value already in the skip list.
  2534  		if nex != nil && nex.equal(value) {
  2535  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  2536  		}
  2537  	}
  2538  	return false
  2539  }
  2540  
  2541  // Remove a node from the skip set.
  2542  func (s *IntSet) Remove(value int) bool {
  2543  	var (
  2544  		nodeToRemove *intNode
  2545  		isMarked     bool // represents if this operation mark the node
  2546  		topLayer     = -1
  2547  		preds, succs [maxLevel]*intNode
  2548  	)
  2549  	for {
  2550  		lFound := s.findNodeRemove(value, &preds, &succs)
  2551  		if isMarked || // this process mark this node or we can find this node in the skip list
  2552  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2553  			if !isMarked { // we don't mark this node for now
  2554  				nodeToRemove = succs[lFound]
  2555  				topLayer = lFound
  2556  				nodeToRemove.mu.Lock()
  2557  				if nodeToRemove.flags.Get(marked) {
  2558  					// The node is marked by another process,
  2559  					// the physical deletion will be accomplished by another process.
  2560  					nodeToRemove.mu.Unlock()
  2561  					return false
  2562  				}
  2563  				nodeToRemove.flags.SetTrue(marked)
  2564  				isMarked = true
  2565  			}
  2566  			// Accomplish the physical deletion.
  2567  			var (
  2568  				highestLocked        = -1 // the highest level being locked by this process
  2569  				valid                = true
  2570  				pred, succ, prevPred *intNode
  2571  			)
  2572  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2573  				pred, succ = preds[layer], succs[layer]
  2574  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2575  					pred.mu.Lock()
  2576  					highestLocked = layer
  2577  					prevPred = pred
  2578  				}
  2579  				// valid check if there is another node has inserted into the skip list in this layer
  2580  				// during this process, or the previous is removed by another process.
  2581  				// It is valid if:
  2582  				// 1. the previous node exists.
  2583  				// 2. no another node has inserted into the skip list in this layer.
  2584  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2585  			}
  2586  			if !valid {
  2587  				unlockInt(preds, highestLocked)
  2588  				continue
  2589  			}
  2590  			for i := topLayer; i >= 0; i-- {
  2591  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2592  				// So we don't need `nodeToRemove.loadNext`
  2593  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2594  			}
  2595  			nodeToRemove.mu.Unlock()
  2596  			unlockInt(preds, highestLocked)
  2597  			atomic.AddInt64(&s.length, -1)
  2598  			return true
  2599  		}
  2600  		return false
  2601  	}
  2602  }
  2603  
  2604  // Range calls f sequentially for each value present in the skip set.
  2605  // If f returns false, range stops the iteration.
  2606  func (s *IntSet) Range(f func(value int) bool) {
  2607  	x := s.header.atomicLoadNext(0)
  2608  	for x != nil {
  2609  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2610  			x = x.atomicLoadNext(0)
  2611  			continue
  2612  		}
  2613  		if !f(x.value) {
  2614  			break
  2615  		}
  2616  		x = x.atomicLoadNext(0)
  2617  	}
  2618  }
  2619  
  2620  // Len return the length of this skip set.
  2621  func (s *IntSet) Len() int {
  2622  	return int(atomic.LoadInt64(&s.length))
  2623  }
  2624  
  2625  // IntSetDesc represents a set based on skip list in descending order.
  2626  type IntSetDesc struct {
  2627  	header       *intNodeDesc
  2628  	length       int64
  2629  	highestLevel int64 // highest level for now
  2630  }
  2631  
  2632  type intNodeDesc struct {
  2633  	value int
  2634  	next  optionalArray // [level]*intNodeDesc
  2635  	mu    sync.Mutex
  2636  	flags bitflag
  2637  	level uint32
  2638  }
  2639  
  2640  func newIntNodeDesc(value int, level int) *intNodeDesc {
  2641  	node := &intNodeDesc{
  2642  		value: value,
  2643  		level: uint32(level),
  2644  	}
  2645  	if level > op1 {
  2646  		node.next.extra = new([op2]unsafe.Pointer)
  2647  	}
  2648  	return node
  2649  }
  2650  
  2651  func (n *intNodeDesc) loadNext(i int) *intNodeDesc {
  2652  	return (*intNodeDesc)(n.next.load(i))
  2653  }
  2654  
  2655  func (n *intNodeDesc) storeNext(i int, node *intNodeDesc) {
  2656  	n.next.store(i, unsafe.Pointer(node))
  2657  }
  2658  
  2659  func (n *intNodeDesc) atomicLoadNext(i int) *intNodeDesc {
  2660  	return (*intNodeDesc)(n.next.atomicLoad(i))
  2661  }
  2662  
  2663  func (n *intNodeDesc) atomicStoreNext(i int, node *intNodeDesc) {
  2664  	n.next.atomicStore(i, unsafe.Pointer(node))
  2665  }
  2666  
  2667  func (n *intNodeDesc) lessthan(value int) bool {
  2668  	return n.value > value
  2669  }
  2670  
  2671  func (n *intNodeDesc) equal(value int) bool {
  2672  	return n.value == value
  2673  }
  2674  
  2675  // NewIntDesc return an empty int skip set in descending order.
  2676  func NewIntDesc() *IntSetDesc {
  2677  	h := newIntNodeDesc(0, maxLevel)
  2678  	h.flags.SetTrue(fullyLinked)
  2679  	return &IntSetDesc{
  2680  		header:       h,
  2681  		highestLevel: defaultHighestLevel,
  2682  	}
  2683  }
  2684  
  2685  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2686  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2687  func (s *IntSetDesc) findNodeRemove(value int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int {
  2688  	// lFound represents the index of the first layer at which it found a node.
  2689  	lFound, x := -1, s.header
  2690  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2691  		succ := x.atomicLoadNext(i)
  2692  		for succ != nil && succ.lessthan(value) {
  2693  			x = succ
  2694  			succ = x.atomicLoadNext(i)
  2695  		}
  2696  		preds[i] = x
  2697  		succs[i] = succ
  2698  
  2699  		// Check if the value already in the skip list.
  2700  		if lFound == -1 && succ != nil && succ.equal(value) {
  2701  			lFound = i
  2702  		}
  2703  	}
  2704  	return lFound
  2705  }
  2706  
  2707  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2708  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2709  func (s *IntSetDesc) findNodeAdd(value int, preds *[maxLevel]*intNodeDesc, succs *[maxLevel]*intNodeDesc) int {
  2710  	x := s.header
  2711  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2712  		succ := x.atomicLoadNext(i)
  2713  		for succ != nil && succ.lessthan(value) {
  2714  			x = succ
  2715  			succ = x.atomicLoadNext(i)
  2716  		}
  2717  		preds[i] = x
  2718  		succs[i] = succ
  2719  
  2720  		// Check if the value already in the skip list.
  2721  		if succ != nil && succ.equal(value) {
  2722  			return i
  2723  		}
  2724  	}
  2725  	return -1
  2726  }
  2727  
  2728  func unlockIntDesc(preds [maxLevel]*intNodeDesc, highestLevel int) {
  2729  	var prevPred *intNodeDesc
  2730  	for i := highestLevel; i >= 0; i-- {
  2731  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  2732  			preds[i].mu.Unlock()
  2733  			prevPred = preds[i]
  2734  		}
  2735  	}
  2736  }
  2737  
  2738  // Add add the value into skip set, return true if this process insert the value into skip set,
  2739  // return false if this process can't insert this value, because another process has insert the same value.
  2740  //
  2741  // If the value is in the skip set but not fully linked, this process will wait until it is.
  2742  func (s *IntSetDesc) Add(value int) bool {
  2743  	level := s.randomlevel()
  2744  	var preds, succs [maxLevel]*intNodeDesc
  2745  	for {
  2746  		lFound := s.findNodeAdd(value, &preds, &succs)
  2747  		if lFound != -1 { // indicating the value is already in the skip-list
  2748  			nodeFound := succs[lFound]
  2749  			if !nodeFound.flags.Get(marked) {
  2750  				for !nodeFound.flags.Get(fullyLinked) {
  2751  					// The node is not yet fully linked, just waits until it is.
  2752  				}
  2753  				return false
  2754  			}
  2755  			// If the node is marked, represents some other thread is in the process of deleting this node,
  2756  			// we need to add this node in next loop.
  2757  			continue
  2758  		}
  2759  		// Add this node into skip list.
  2760  		var (
  2761  			highestLocked        = -1 // the highest level being locked by this process
  2762  			valid                = true
  2763  			pred, succ, prevPred *intNodeDesc
  2764  		)
  2765  		for layer := 0; valid && layer < level; layer++ {
  2766  			pred = preds[layer]   // target node's previous node
  2767  			succ = succs[layer]   // target node's next node
  2768  			if pred != prevPred { // the node in this layer could be locked by previous loop
  2769  				pred.mu.Lock()
  2770  				highestLocked = layer
  2771  				prevPred = pred
  2772  			}
  2773  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  2774  			// It is valid if:
  2775  			// 1. The previous node and next node both are not marked.
  2776  			// 2. The previous node's next node is succ in this layer.
  2777  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  2778  		}
  2779  		if !valid {
  2780  			unlockIntDesc(preds, highestLocked)
  2781  			continue
  2782  		}
  2783  
  2784  		nn := newIntNodeDesc(value, level)
  2785  		for layer := 0; layer < level; layer++ {
  2786  			nn.storeNext(layer, succs[layer])
  2787  			preds[layer].atomicStoreNext(layer, nn)
  2788  		}
  2789  		nn.flags.SetTrue(fullyLinked)
  2790  		unlockIntDesc(preds, highestLocked)
  2791  		atomic.AddInt64(&s.length, 1)
  2792  		return true
  2793  	}
  2794  }
  2795  
  2796  func (s *IntSetDesc) randomlevel() int {
  2797  	// Generate random level.
  2798  	level := randomLevel()
  2799  	// Update highest level if possible.
  2800  	for {
  2801  		hl := atomic.LoadInt64(&s.highestLevel)
  2802  		if int64(level) <= hl {
  2803  			break
  2804  		}
  2805  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  2806  			break
  2807  		}
  2808  	}
  2809  	return level
  2810  }
  2811  
  2812  // Contains check if the value is in the skip set.
  2813  func (s *IntSetDesc) Contains(value int) bool {
  2814  	x := s.header
  2815  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2816  		nex := x.atomicLoadNext(i)
  2817  		for nex != nil && nex.lessthan(value) {
  2818  			x = nex
  2819  			nex = x.atomicLoadNext(i)
  2820  		}
  2821  
  2822  		// Check if the value already in the skip list.
  2823  		if nex != nil && nex.equal(value) {
  2824  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  2825  		}
  2826  	}
  2827  	return false
  2828  }
  2829  
  2830  // Remove a node from the skip set.
  2831  func (s *IntSetDesc) Remove(value int) bool {
  2832  	var (
  2833  		nodeToRemove *intNodeDesc
  2834  		isMarked     bool // represents if this operation mark the node
  2835  		topLayer     = -1
  2836  		preds, succs [maxLevel]*intNodeDesc
  2837  	)
  2838  	for {
  2839  		lFound := s.findNodeRemove(value, &preds, &succs)
  2840  		if isMarked || // this process mark this node or we can find this node in the skip list
  2841  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  2842  			if !isMarked { // we don't mark this node for now
  2843  				nodeToRemove = succs[lFound]
  2844  				topLayer = lFound
  2845  				nodeToRemove.mu.Lock()
  2846  				if nodeToRemove.flags.Get(marked) {
  2847  					// The node is marked by another process,
  2848  					// the physical deletion will be accomplished by another process.
  2849  					nodeToRemove.mu.Unlock()
  2850  					return false
  2851  				}
  2852  				nodeToRemove.flags.SetTrue(marked)
  2853  				isMarked = true
  2854  			}
  2855  			// Accomplish the physical deletion.
  2856  			var (
  2857  				highestLocked        = -1 // the highest level being locked by this process
  2858  				valid                = true
  2859  				pred, succ, prevPred *intNodeDesc
  2860  			)
  2861  			for layer := 0; valid && (layer <= topLayer); layer++ {
  2862  				pred, succ = preds[layer], succs[layer]
  2863  				if pred != prevPred { // the node in this layer could be locked by previous loop
  2864  					pred.mu.Lock()
  2865  					highestLocked = layer
  2866  					prevPred = pred
  2867  				}
  2868  				// valid check if there is another node has inserted into the skip list in this layer
  2869  				// during this process, or the previous is removed by another process.
  2870  				// It is valid if:
  2871  				// 1. the previous node exists.
  2872  				// 2. no another node has inserted into the skip list in this layer.
  2873  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  2874  			}
  2875  			if !valid {
  2876  				unlockIntDesc(preds, highestLocked)
  2877  				continue
  2878  			}
  2879  			for i := topLayer; i >= 0; i-- {
  2880  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  2881  				// So we don't need `nodeToRemove.loadNext`
  2882  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  2883  			}
  2884  			nodeToRemove.mu.Unlock()
  2885  			unlockIntDesc(preds, highestLocked)
  2886  			atomic.AddInt64(&s.length, -1)
  2887  			return true
  2888  		}
  2889  		return false
  2890  	}
  2891  }
  2892  
  2893  // Range calls f sequentially for each value present in the skip set.
  2894  // If f returns false, range stops the iteration.
  2895  func (s *IntSetDesc) Range(f func(value int) bool) {
  2896  	x := s.header.atomicLoadNext(0)
  2897  	for x != nil {
  2898  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  2899  			x = x.atomicLoadNext(0)
  2900  			continue
  2901  		}
  2902  		if !f(x.value) {
  2903  			break
  2904  		}
  2905  		x = x.atomicLoadNext(0)
  2906  	}
  2907  }
  2908  
  2909  // Len return the length of this skip set.
  2910  func (s *IntSetDesc) Len() int {
  2911  	return int(atomic.LoadInt64(&s.length))
  2912  }
  2913  
  2914  // Uint64Set represents a set based on skip list in ascending order.
  2915  type Uint64Set struct {
  2916  	header       *uint64Node
  2917  	length       int64
  2918  	highestLevel int64 // highest level for now
  2919  }
  2920  
  2921  type uint64Node struct {
  2922  	value uint64
  2923  	next  optionalArray // [level]*uint64Node
  2924  	mu    sync.Mutex
  2925  	flags bitflag
  2926  	level uint32
  2927  }
  2928  
  2929  func newUuint64Node(value uint64, level int) *uint64Node {
  2930  	node := &uint64Node{
  2931  		value: value,
  2932  		level: uint32(level),
  2933  	}
  2934  	if level > op1 {
  2935  		node.next.extra = new([op2]unsafe.Pointer)
  2936  	}
  2937  	return node
  2938  }
  2939  
  2940  func (n *uint64Node) loadNext(i int) *uint64Node {
  2941  	return (*uint64Node)(n.next.load(i))
  2942  }
  2943  
  2944  func (n *uint64Node) storeNext(i int, node *uint64Node) {
  2945  	n.next.store(i, unsafe.Pointer(node))
  2946  }
  2947  
  2948  func (n *uint64Node) atomicLoadNext(i int) *uint64Node {
  2949  	return (*uint64Node)(n.next.atomicLoad(i))
  2950  }
  2951  
  2952  func (n *uint64Node) atomicStoreNext(i int, node *uint64Node) {
  2953  	n.next.atomicStore(i, unsafe.Pointer(node))
  2954  }
  2955  
  2956  func (n *uint64Node) lessthan(value uint64) bool {
  2957  	return n.value < value
  2958  }
  2959  
  2960  func (n *uint64Node) equal(value uint64) bool {
  2961  	return n.value == value
  2962  }
  2963  
  2964  // NewUint64 return an empty uint64 skip set in ascending order.
  2965  func NewUint64() *Uint64Set {
  2966  	h := newUuint64Node(0, maxLevel)
  2967  	h.flags.SetTrue(fullyLinked)
  2968  	return &Uint64Set{
  2969  		header:       h,
  2970  		highestLevel: defaultHighestLevel,
  2971  	}
  2972  }
  2973  
  2974  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  2975  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2976  func (s *Uint64Set) findNodeRemove(value uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int {
  2977  	// lFound represents the index of the first layer at which it found a node.
  2978  	lFound, x := -1, s.header
  2979  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  2980  		succ := x.atomicLoadNext(i)
  2981  		for succ != nil && succ.lessthan(value) {
  2982  			x = succ
  2983  			succ = x.atomicLoadNext(i)
  2984  		}
  2985  		preds[i] = x
  2986  		succs[i] = succ
  2987  
  2988  		// Check if the value already in the skip list.
  2989  		if lFound == -1 && succ != nil && succ.equal(value) {
  2990  			lFound = i
  2991  		}
  2992  	}
  2993  	return lFound
  2994  }
  2995  
  2996  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  2997  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  2998  func (s *Uint64Set) findNodeAdd(value uint64, preds *[maxLevel]*uint64Node, succs *[maxLevel]*uint64Node) int {
  2999  	x := s.header
  3000  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3001  		succ := x.atomicLoadNext(i)
  3002  		for succ != nil && succ.lessthan(value) {
  3003  			x = succ
  3004  			succ = x.atomicLoadNext(i)
  3005  		}
  3006  		preds[i] = x
  3007  		succs[i] = succ
  3008  
  3009  		// Check if the value already in the skip list.
  3010  		if succ != nil && succ.equal(value) {
  3011  			return i
  3012  		}
  3013  	}
  3014  	return -1
  3015  }
  3016  
  3017  func unlockUint64(preds [maxLevel]*uint64Node, highestLevel int) {
  3018  	var prevPred *uint64Node
  3019  	for i := highestLevel; i >= 0; i-- {
  3020  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3021  			preds[i].mu.Unlock()
  3022  			prevPred = preds[i]
  3023  		}
  3024  	}
  3025  }
  3026  
  3027  // Add add the value into skip set, return true if this process insert the value into skip set,
  3028  // return false if this process can't insert this value, because another process has insert the same value.
  3029  //
  3030  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3031  func (s *Uint64Set) Add(value uint64) bool {
  3032  	level := s.randomlevel()
  3033  	var preds, succs [maxLevel]*uint64Node
  3034  	for {
  3035  		lFound := s.findNodeAdd(value, &preds, &succs)
  3036  		if lFound != -1 { // indicating the value is already in the skip-list
  3037  			nodeFound := succs[lFound]
  3038  			if !nodeFound.flags.Get(marked) {
  3039  				for !nodeFound.flags.Get(fullyLinked) {
  3040  					// The node is not yet fully linked, just waits until it is.
  3041  				}
  3042  				return false
  3043  			}
  3044  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3045  			// we need to add this node in next loop.
  3046  			continue
  3047  		}
  3048  		// Add this node into skip list.
  3049  		var (
  3050  			highestLocked        = -1 // the highest level being locked by this process
  3051  			valid                = true
  3052  			pred, succ, prevPred *uint64Node
  3053  		)
  3054  		for layer := 0; valid && layer < level; layer++ {
  3055  			pred = preds[layer]   // target node's previous node
  3056  			succ = succs[layer]   // target node's next node
  3057  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3058  				pred.mu.Lock()
  3059  				highestLocked = layer
  3060  				prevPred = pred
  3061  			}
  3062  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3063  			// It is valid if:
  3064  			// 1. The previous node and next node both are not marked.
  3065  			// 2. The previous node's next node is succ in this layer.
  3066  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3067  		}
  3068  		if !valid {
  3069  			unlockUint64(preds, highestLocked)
  3070  			continue
  3071  		}
  3072  
  3073  		nn := newUuint64Node(value, level)
  3074  		for layer := 0; layer < level; layer++ {
  3075  			nn.storeNext(layer, succs[layer])
  3076  			preds[layer].atomicStoreNext(layer, nn)
  3077  		}
  3078  		nn.flags.SetTrue(fullyLinked)
  3079  		unlockUint64(preds, highestLocked)
  3080  		atomic.AddInt64(&s.length, 1)
  3081  		return true
  3082  	}
  3083  }
  3084  
  3085  func (s *Uint64Set) randomlevel() int {
  3086  	// Generate random level.
  3087  	level := randomLevel()
  3088  	// Update highest level if possible.
  3089  	for {
  3090  		hl := atomic.LoadInt64(&s.highestLevel)
  3091  		if int64(level) <= hl {
  3092  			break
  3093  		}
  3094  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3095  			break
  3096  		}
  3097  	}
  3098  	return level
  3099  }
  3100  
  3101  // Contains check if the value is in the skip set.
  3102  func (s *Uint64Set) Contains(value uint64) bool {
  3103  	x := s.header
  3104  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3105  		nex := x.atomicLoadNext(i)
  3106  		for nex != nil && nex.lessthan(value) {
  3107  			x = nex
  3108  			nex = x.atomicLoadNext(i)
  3109  		}
  3110  
  3111  		// Check if the value already in the skip list.
  3112  		if nex != nil && nex.equal(value) {
  3113  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3114  		}
  3115  	}
  3116  	return false
  3117  }
  3118  
  3119  // Remove a node from the skip set.
  3120  func (s *Uint64Set) Remove(value uint64) bool {
  3121  	var (
  3122  		nodeToRemove *uint64Node
  3123  		isMarked     bool // represents if this operation mark the node
  3124  		topLayer     = -1
  3125  		preds, succs [maxLevel]*uint64Node
  3126  	)
  3127  	for {
  3128  		lFound := s.findNodeRemove(value, &preds, &succs)
  3129  		if isMarked || // this process mark this node or we can find this node in the skip list
  3130  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3131  			if !isMarked { // we don't mark this node for now
  3132  				nodeToRemove = succs[lFound]
  3133  				topLayer = lFound
  3134  				nodeToRemove.mu.Lock()
  3135  				if nodeToRemove.flags.Get(marked) {
  3136  					// The node is marked by another process,
  3137  					// the physical deletion will be accomplished by another process.
  3138  					nodeToRemove.mu.Unlock()
  3139  					return false
  3140  				}
  3141  				nodeToRemove.flags.SetTrue(marked)
  3142  				isMarked = true
  3143  			}
  3144  			// Accomplish the physical deletion.
  3145  			var (
  3146  				highestLocked        = -1 // the highest level being locked by this process
  3147  				valid                = true
  3148  				pred, succ, prevPred *uint64Node
  3149  			)
  3150  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3151  				pred, succ = preds[layer], succs[layer]
  3152  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3153  					pred.mu.Lock()
  3154  					highestLocked = layer
  3155  					prevPred = pred
  3156  				}
  3157  				// valid check if there is another node has inserted into the skip list in this layer
  3158  				// during this process, or the previous is removed by another process.
  3159  				// It is valid if:
  3160  				// 1. the previous node exists.
  3161  				// 2. no another node has inserted into the skip list in this layer.
  3162  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3163  			}
  3164  			if !valid {
  3165  				unlockUint64(preds, highestLocked)
  3166  				continue
  3167  			}
  3168  			for i := topLayer; i >= 0; i-- {
  3169  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  3170  				// So we don't need `nodeToRemove.loadNext`
  3171  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  3172  			}
  3173  			nodeToRemove.mu.Unlock()
  3174  			unlockUint64(preds, highestLocked)
  3175  			atomic.AddInt64(&s.length, -1)
  3176  			return true
  3177  		}
  3178  		return false
  3179  	}
  3180  }
  3181  
  3182  // Range calls f sequentially for each value present in the skip set.
  3183  // If f returns false, range stops the iteration.
  3184  func (s *Uint64Set) Range(f func(value uint64) bool) {
  3185  	x := s.header.atomicLoadNext(0)
  3186  	for x != nil {
  3187  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3188  			x = x.atomicLoadNext(0)
  3189  			continue
  3190  		}
  3191  		if !f(x.value) {
  3192  			break
  3193  		}
  3194  		x = x.atomicLoadNext(0)
  3195  	}
  3196  }
  3197  
  3198  // Len return the length of this skip set.
  3199  func (s *Uint64Set) Len() int {
  3200  	return int(atomic.LoadInt64(&s.length))
  3201  }
  3202  
  3203  // Uint64SetDesc represents a set based on skip list in descending order.
  3204  type Uint64SetDesc struct {
  3205  	header       *uint64NodeDesc
  3206  	length       int64
  3207  	highestLevel int64 // highest level for now
  3208  }
  3209  
  3210  type uint64NodeDesc struct {
  3211  	value uint64
  3212  	next  optionalArray // [level]*uint64NodeDesc
  3213  	mu    sync.Mutex
  3214  	flags bitflag
  3215  	level uint32
  3216  }
  3217  
  3218  func newUuint64NodeDescDesc(value uint64, level int) *uint64NodeDesc {
  3219  	node := &uint64NodeDesc{
  3220  		value: value,
  3221  		level: uint32(level),
  3222  	}
  3223  	if level > op1 {
  3224  		node.next.extra = new([op2]unsafe.Pointer)
  3225  	}
  3226  	return node
  3227  }
  3228  
  3229  func (n *uint64NodeDesc) loadNext(i int) *uint64NodeDesc {
  3230  	return (*uint64NodeDesc)(n.next.load(i))
  3231  }
  3232  
  3233  func (n *uint64NodeDesc) storeNext(i int, node *uint64NodeDesc) {
  3234  	n.next.store(i, unsafe.Pointer(node))
  3235  }
  3236  
  3237  func (n *uint64NodeDesc) atomicLoadNext(i int) *uint64NodeDesc {
  3238  	return (*uint64NodeDesc)(n.next.atomicLoad(i))
  3239  }
  3240  
  3241  func (n *uint64NodeDesc) atomicStoreNext(i int, node *uint64NodeDesc) {
  3242  	n.next.atomicStore(i, unsafe.Pointer(node))
  3243  }
  3244  
  3245  func (n *uint64NodeDesc) lessthan(value uint64) bool {
  3246  	return n.value > value
  3247  }
  3248  
  3249  func (n *uint64NodeDesc) equal(value uint64) bool {
  3250  	return n.value == value
  3251  }
  3252  
  3253  // NewUint64Desc return an empty uint64 skip set in descending order.
  3254  func NewUint64Desc() *Uint64SetDesc {
  3255  	h := newUuint64NodeDescDesc(0, maxLevel)
  3256  	h.flags.SetTrue(fullyLinked)
  3257  	return &Uint64SetDesc{
  3258  		header:       h,
  3259  		highestLevel: defaultHighestLevel,
  3260  	}
  3261  }
  3262  
  3263  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3264  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3265  func (s *Uint64SetDesc) findNodeRemove(value uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int {
  3266  	// lFound represents the index of the first layer at which it found a node.
  3267  	lFound, x := -1, s.header
  3268  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3269  		succ := x.atomicLoadNext(i)
  3270  		for succ != nil && succ.lessthan(value) {
  3271  			x = succ
  3272  			succ = x.atomicLoadNext(i)
  3273  		}
  3274  		preds[i] = x
  3275  		succs[i] = succ
  3276  
  3277  		// Check if the value already in the skip list.
  3278  		if lFound == -1 && succ != nil && succ.equal(value) {
  3279  			lFound = i
  3280  		}
  3281  	}
  3282  	return lFound
  3283  }
  3284  
  3285  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  3286  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3287  func (s *Uint64SetDesc) findNodeAdd(value uint64, preds *[maxLevel]*uint64NodeDesc, succs *[maxLevel]*uint64NodeDesc) int {
  3288  	x := s.header
  3289  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3290  		succ := x.atomicLoadNext(i)
  3291  		for succ != nil && succ.lessthan(value) {
  3292  			x = succ
  3293  			succ = x.atomicLoadNext(i)
  3294  		}
  3295  		preds[i] = x
  3296  		succs[i] = succ
  3297  
  3298  		// Check if the value already in the skip list.
  3299  		if succ != nil && succ.equal(value) {
  3300  			return i
  3301  		}
  3302  	}
  3303  	return -1
  3304  }
  3305  
  3306  func unlockUint64Desc(preds [maxLevel]*uint64NodeDesc, highestLevel int) {
  3307  	var prevPred *uint64NodeDesc
  3308  	for i := highestLevel; i >= 0; i-- {
  3309  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3310  			preds[i].mu.Unlock()
  3311  			prevPred = preds[i]
  3312  		}
  3313  	}
  3314  }
  3315  
  3316  // Add add the value into skip set, return true if this process insert the value into skip set,
  3317  // return false if this process can't insert this value, because another process has insert the same value.
  3318  //
  3319  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3320  func (s *Uint64SetDesc) Add(value uint64) bool {
  3321  	level := s.randomlevel()
  3322  	var preds, succs [maxLevel]*uint64NodeDesc
  3323  	for {
  3324  		lFound := s.findNodeAdd(value, &preds, &succs)
  3325  		if lFound != -1 { // indicating the value is already in the skip-list
  3326  			nodeFound := succs[lFound]
  3327  			if !nodeFound.flags.Get(marked) {
  3328  				for !nodeFound.flags.Get(fullyLinked) {
  3329  					// The node is not yet fully linked, just waits until it is.
  3330  				}
  3331  				return false
  3332  			}
  3333  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3334  			// we need to add this node in next loop.
  3335  			continue
  3336  		}
  3337  		// Add this node into skip list.
  3338  		var (
  3339  			highestLocked        = -1 // the highest level being locked by this process
  3340  			valid                = true
  3341  			pred, succ, prevPred *uint64NodeDesc
  3342  		)
  3343  		for layer := 0; valid && layer < level; layer++ {
  3344  			pred = preds[layer]   // target node's previous node
  3345  			succ = succs[layer]   // target node's next node
  3346  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3347  				pred.mu.Lock()
  3348  				highestLocked = layer
  3349  				prevPred = pred
  3350  			}
  3351  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3352  			// It is valid if:
  3353  			// 1. The previous node and next node both are not marked.
  3354  			// 2. The previous node's next node is succ in this layer.
  3355  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3356  		}
  3357  		if !valid {
  3358  			unlockUint64Desc(preds, highestLocked)
  3359  			continue
  3360  		}
  3361  
  3362  		nn := newUuint64NodeDescDesc(value, level)
  3363  		for layer := 0; layer < level; layer++ {
  3364  			nn.storeNext(layer, succs[layer])
  3365  			preds[layer].atomicStoreNext(layer, nn)
  3366  		}
  3367  		nn.flags.SetTrue(fullyLinked)
  3368  		unlockUint64Desc(preds, highestLocked)
  3369  		atomic.AddInt64(&s.length, 1)
  3370  		return true
  3371  	}
  3372  }
  3373  
  3374  func (s *Uint64SetDesc) randomlevel() int {
  3375  	// Generate random level.
  3376  	level := randomLevel()
  3377  	// Update highest level if possible.
  3378  	for {
  3379  		hl := atomic.LoadInt64(&s.highestLevel)
  3380  		if int64(level) <= hl {
  3381  			break
  3382  		}
  3383  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3384  			break
  3385  		}
  3386  	}
  3387  	return level
  3388  }
  3389  
  3390  // Contains check if the value is in the skip set.
  3391  func (s *Uint64SetDesc) Contains(value uint64) bool {
  3392  	x := s.header
  3393  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3394  		nex := x.atomicLoadNext(i)
  3395  		for nex != nil && nex.lessthan(value) {
  3396  			x = nex
  3397  			nex = x.atomicLoadNext(i)
  3398  		}
  3399  
  3400  		// Check if the value already in the skip list.
  3401  		if nex != nil && nex.equal(value) {
  3402  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3403  		}
  3404  	}
  3405  	return false
  3406  }
  3407  
  3408  // Remove a node from the skip set.
  3409  func (s *Uint64SetDesc) Remove(value uint64) bool {
  3410  	var (
  3411  		nodeToRemove *uint64NodeDesc
  3412  		isMarked     bool // represents if this operation mark the node
  3413  		topLayer     = -1
  3414  		preds, succs [maxLevel]*uint64NodeDesc
  3415  	)
  3416  	for {
  3417  		lFound := s.findNodeRemove(value, &preds, &succs)
  3418  		if isMarked || // this process mark this node or we can find this node in the skip list
  3419  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3420  			if !isMarked { // we don't mark this node for now
  3421  				nodeToRemove = succs[lFound]
  3422  				topLayer = lFound
  3423  				nodeToRemove.mu.Lock()
  3424  				if nodeToRemove.flags.Get(marked) {
  3425  					// The node is marked by another process,
  3426  					// the physical deletion will be accomplished by another process.
  3427  					nodeToRemove.mu.Unlock()
  3428  					return false
  3429  				}
  3430  				nodeToRemove.flags.SetTrue(marked)
  3431  				isMarked = true
  3432  			}
  3433  			// Accomplish the physical deletion.
  3434  			var (
  3435  				highestLocked        = -1 // the highest level being locked by this process
  3436  				valid                = true
  3437  				pred, succ, prevPred *uint64NodeDesc
  3438  			)
  3439  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3440  				pred, succ = preds[layer], succs[layer]
  3441  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3442  					pred.mu.Lock()
  3443  					highestLocked = layer
  3444  					prevPred = pred
  3445  				}
  3446  				// valid check if there is another node has inserted into the skip list in this layer
  3447  				// during this process, or the previous is removed by another process.
  3448  				// It is valid if:
  3449  				// 1. the previous node exists.
  3450  				// 2. no another node has inserted into the skip list in this layer.
  3451  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3452  			}
  3453  			if !valid {
  3454  				unlockUint64Desc(preds, highestLocked)
  3455  				continue
  3456  			}
  3457  			for i := topLayer; i >= 0; i-- {
  3458  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  3459  				// So we don't need `nodeToRemove.loadNext`
  3460  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  3461  			}
  3462  			nodeToRemove.mu.Unlock()
  3463  			unlockUint64Desc(preds, highestLocked)
  3464  			atomic.AddInt64(&s.length, -1)
  3465  			return true
  3466  		}
  3467  		return false
  3468  	}
  3469  }
  3470  
  3471  // Range calls f sequentially for each value present in the skip set.
  3472  // If f returns false, range stops the iteration.
  3473  func (s *Uint64SetDesc) Range(f func(value uint64) bool) {
  3474  	x := s.header.atomicLoadNext(0)
  3475  	for x != nil {
  3476  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3477  			x = x.atomicLoadNext(0)
  3478  			continue
  3479  		}
  3480  		if !f(x.value) {
  3481  			break
  3482  		}
  3483  		x = x.atomicLoadNext(0)
  3484  	}
  3485  }
  3486  
  3487  // Len return the length of this skip set.
  3488  func (s *Uint64SetDesc) Len() int {
  3489  	return int(atomic.LoadInt64(&s.length))
  3490  }
  3491  
  3492  // Uint32Set represents a set based on skip list in ascending order.
  3493  type Uint32Set struct {
  3494  	header       *uint32Node
  3495  	length       int64
  3496  	highestLevel int64 // highest level for now
  3497  }
  3498  
  3499  type uint32Node struct {
  3500  	value uint32
  3501  	next  optionalArray // [level]*uint32Node
  3502  	mu    sync.Mutex
  3503  	flags bitflag
  3504  	level uint32
  3505  }
  3506  
  3507  func newUint32Node(value uint32, level int) *uint32Node {
  3508  	node := &uint32Node{
  3509  		value: value,
  3510  		level: uint32(level),
  3511  	}
  3512  	if level > op1 {
  3513  		node.next.extra = new([op2]unsafe.Pointer)
  3514  	}
  3515  	return node
  3516  }
  3517  
  3518  func (n *uint32Node) loadNext(i int) *uint32Node {
  3519  	return (*uint32Node)(n.next.load(i))
  3520  }
  3521  
  3522  func (n *uint32Node) storeNext(i int, node *uint32Node) {
  3523  	n.next.store(i, unsafe.Pointer(node))
  3524  }
  3525  
  3526  func (n *uint32Node) atomicLoadNext(i int) *uint32Node {
  3527  	return (*uint32Node)(n.next.atomicLoad(i))
  3528  }
  3529  
  3530  func (n *uint32Node) atomicStoreNext(i int, node *uint32Node) {
  3531  	n.next.atomicStore(i, unsafe.Pointer(node))
  3532  }
  3533  
  3534  func (n *uint32Node) lessthan(value uint32) bool {
  3535  	return n.value < value
  3536  }
  3537  
  3538  func (n *uint32Node) equal(value uint32) bool {
  3539  	return n.value == value
  3540  }
  3541  
  3542  // NewUint32 return an empty uint32 skip set in ascending order.
  3543  func NewUint32() *Uint32Set {
  3544  	h := newUint32Node(0, maxLevel)
  3545  	h.flags.SetTrue(fullyLinked)
  3546  	return &Uint32Set{
  3547  		header:       h,
  3548  		highestLevel: defaultHighestLevel,
  3549  	}
  3550  }
  3551  
  3552  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3553  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3554  func (s *Uint32Set) findNodeRemove(value uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int {
  3555  	// lFound represents the index of the first layer at which it found a node.
  3556  	lFound, x := -1, s.header
  3557  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3558  		succ := x.atomicLoadNext(i)
  3559  		for succ != nil && succ.lessthan(value) {
  3560  			x = succ
  3561  			succ = x.atomicLoadNext(i)
  3562  		}
  3563  		preds[i] = x
  3564  		succs[i] = succ
  3565  
  3566  		// Check if the value already in the skip list.
  3567  		if lFound == -1 && succ != nil && succ.equal(value) {
  3568  			lFound = i
  3569  		}
  3570  	}
  3571  	return lFound
  3572  }
  3573  
  3574  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  3575  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3576  func (s *Uint32Set) findNodeAdd(value uint32, preds *[maxLevel]*uint32Node, succs *[maxLevel]*uint32Node) int {
  3577  	x := s.header
  3578  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3579  		succ := x.atomicLoadNext(i)
  3580  		for succ != nil && succ.lessthan(value) {
  3581  			x = succ
  3582  			succ = x.atomicLoadNext(i)
  3583  		}
  3584  		preds[i] = x
  3585  		succs[i] = succ
  3586  
  3587  		// Check if the value already in the skip list.
  3588  		if succ != nil && succ.equal(value) {
  3589  			return i
  3590  		}
  3591  	}
  3592  	return -1
  3593  }
  3594  
  3595  func unlockUint32(preds [maxLevel]*uint32Node, highestLevel int) {
  3596  	var prevPred *uint32Node
  3597  	for i := highestLevel; i >= 0; i-- {
  3598  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3599  			preds[i].mu.Unlock()
  3600  			prevPred = preds[i]
  3601  		}
  3602  	}
  3603  }
  3604  
  3605  // Add add the value into skip set, return true if this process insert the value into skip set,
  3606  // return false if this process can't insert this value, because another process has insert the same value.
  3607  //
  3608  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3609  func (s *Uint32Set) Add(value uint32) bool {
  3610  	level := s.randomlevel()
  3611  	var preds, succs [maxLevel]*uint32Node
  3612  	for {
  3613  		lFound := s.findNodeAdd(value, &preds, &succs)
  3614  		if lFound != -1 { // indicating the value is already in the skip-list
  3615  			nodeFound := succs[lFound]
  3616  			if !nodeFound.flags.Get(marked) {
  3617  				for !nodeFound.flags.Get(fullyLinked) {
  3618  					// The node is not yet fully linked, just waits until it is.
  3619  				}
  3620  				return false
  3621  			}
  3622  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3623  			// we need to add this node in next loop.
  3624  			continue
  3625  		}
  3626  		// Add this node into skip list.
  3627  		var (
  3628  			highestLocked        = -1 // the highest level being locked by this process
  3629  			valid                = true
  3630  			pred, succ, prevPred *uint32Node
  3631  		)
  3632  		for layer := 0; valid && layer < level; layer++ {
  3633  			pred = preds[layer]   // target node's previous node
  3634  			succ = succs[layer]   // target node's next node
  3635  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3636  				pred.mu.Lock()
  3637  				highestLocked = layer
  3638  				prevPred = pred
  3639  			}
  3640  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3641  			// It is valid if:
  3642  			// 1. The previous node and next node both are not marked.
  3643  			// 2. The previous node's next node is succ in this layer.
  3644  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3645  		}
  3646  		if !valid {
  3647  			unlockUint32(preds, highestLocked)
  3648  			continue
  3649  		}
  3650  
  3651  		nn := newUint32Node(value, level)
  3652  		for layer := 0; layer < level; layer++ {
  3653  			nn.storeNext(layer, succs[layer])
  3654  			preds[layer].atomicStoreNext(layer, nn)
  3655  		}
  3656  		nn.flags.SetTrue(fullyLinked)
  3657  		unlockUint32(preds, highestLocked)
  3658  		atomic.AddInt64(&s.length, 1)
  3659  		return true
  3660  	}
  3661  }
  3662  
  3663  func (s *Uint32Set) randomlevel() int {
  3664  	// Generate random level.
  3665  	level := randomLevel()
  3666  	// Update highest level if possible.
  3667  	for {
  3668  		hl := atomic.LoadInt64(&s.highestLevel)
  3669  		if int64(level) <= hl {
  3670  			break
  3671  		}
  3672  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3673  			break
  3674  		}
  3675  	}
  3676  	return level
  3677  }
  3678  
  3679  // Contains check if the value is in the skip set.
  3680  func (s *Uint32Set) Contains(value uint32) bool {
  3681  	x := s.header
  3682  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3683  		nex := x.atomicLoadNext(i)
  3684  		for nex != nil && nex.lessthan(value) {
  3685  			x = nex
  3686  			nex = x.atomicLoadNext(i)
  3687  		}
  3688  
  3689  		// Check if the value already in the skip list.
  3690  		if nex != nil && nex.equal(value) {
  3691  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3692  		}
  3693  	}
  3694  	return false
  3695  }
  3696  
  3697  // Remove a node from the skip set.
  3698  func (s *Uint32Set) Remove(value uint32) bool {
  3699  	var (
  3700  		nodeToRemove *uint32Node
  3701  		isMarked     bool // represents if this operation mark the node
  3702  		topLayer     = -1
  3703  		preds, succs [maxLevel]*uint32Node
  3704  	)
  3705  	for {
  3706  		lFound := s.findNodeRemove(value, &preds, &succs)
  3707  		if isMarked || // this process mark this node or we can find this node in the skip list
  3708  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3709  			if !isMarked { // we don't mark this node for now
  3710  				nodeToRemove = succs[lFound]
  3711  				topLayer = lFound
  3712  				nodeToRemove.mu.Lock()
  3713  				if nodeToRemove.flags.Get(marked) {
  3714  					// The node is marked by another process,
  3715  					// the physical deletion will be accomplished by another process.
  3716  					nodeToRemove.mu.Unlock()
  3717  					return false
  3718  				}
  3719  				nodeToRemove.flags.SetTrue(marked)
  3720  				isMarked = true
  3721  			}
  3722  			// Accomplish the physical deletion.
  3723  			var (
  3724  				highestLocked        = -1 // the highest level being locked by this process
  3725  				valid                = true
  3726  				pred, succ, prevPred *uint32Node
  3727  			)
  3728  			for layer := 0; valid && (layer <= topLayer); layer++ {
  3729  				pred, succ = preds[layer], succs[layer]
  3730  				if pred != prevPred { // the node in this layer could be locked by previous loop
  3731  					pred.mu.Lock()
  3732  					highestLocked = layer
  3733  					prevPred = pred
  3734  				}
  3735  				// valid check if there is another node has inserted into the skip list in this layer
  3736  				// during this process, or the previous is removed by another process.
  3737  				// It is valid if:
  3738  				// 1. the previous node exists.
  3739  				// 2. no another node has inserted into the skip list in this layer.
  3740  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  3741  			}
  3742  			if !valid {
  3743  				unlockUint32(preds, highestLocked)
  3744  				continue
  3745  			}
  3746  			for i := topLayer; i >= 0; i-- {
  3747  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  3748  				// So we don't need `nodeToRemove.loadNext`
  3749  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  3750  			}
  3751  			nodeToRemove.mu.Unlock()
  3752  			unlockUint32(preds, highestLocked)
  3753  			atomic.AddInt64(&s.length, -1)
  3754  			return true
  3755  		}
  3756  		return false
  3757  	}
  3758  }
  3759  
  3760  // Range calls f sequentially for each value present in the skip set.
  3761  // If f returns false, range stops the iteration.
  3762  func (s *Uint32Set) Range(f func(value uint32) bool) {
  3763  	x := s.header.atomicLoadNext(0)
  3764  	for x != nil {
  3765  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  3766  			x = x.atomicLoadNext(0)
  3767  			continue
  3768  		}
  3769  		if !f(x.value) {
  3770  			break
  3771  		}
  3772  		x = x.atomicLoadNext(0)
  3773  	}
  3774  }
  3775  
  3776  // Len return the length of this skip set.
  3777  func (s *Uint32Set) Len() int {
  3778  	return int(atomic.LoadInt64(&s.length))
  3779  }
  3780  
  3781  // Uint32SetDesc represents a set based on skip list in descending order.
  3782  type Uint32SetDesc struct {
  3783  	header       *uint32NodeDesc
  3784  	length       int64
  3785  	highestLevel int64 // highest level for now
  3786  }
  3787  
  3788  type uint32NodeDesc struct {
  3789  	value uint32
  3790  	next  optionalArray // [level]*uint32NodeDesc
  3791  	mu    sync.Mutex
  3792  	flags bitflag
  3793  	level uint32
  3794  }
  3795  
  3796  func newUint32NodeDesc(value uint32, level int) *uint32NodeDesc {
  3797  	node := &uint32NodeDesc{
  3798  		value: value,
  3799  		level: uint32(level),
  3800  	}
  3801  	if level > op1 {
  3802  		node.next.extra = new([op2]unsafe.Pointer)
  3803  	}
  3804  	return node
  3805  }
  3806  
  3807  func (n *uint32NodeDesc) loadNext(i int) *uint32NodeDesc {
  3808  	return (*uint32NodeDesc)(n.next.load(i))
  3809  }
  3810  
  3811  func (n *uint32NodeDesc) storeNext(i int, node *uint32NodeDesc) {
  3812  	n.next.store(i, unsafe.Pointer(node))
  3813  }
  3814  
  3815  func (n *uint32NodeDesc) atomicLoadNext(i int) *uint32NodeDesc {
  3816  	return (*uint32NodeDesc)(n.next.atomicLoad(i))
  3817  }
  3818  
  3819  func (n *uint32NodeDesc) atomicStoreNext(i int, node *uint32NodeDesc) {
  3820  	n.next.atomicStore(i, unsafe.Pointer(node))
  3821  }
  3822  
  3823  func (n *uint32NodeDesc) lessthan(value uint32) bool {
  3824  	return n.value > value
  3825  }
  3826  
  3827  func (n *uint32NodeDesc) equal(value uint32) bool {
  3828  	return n.value == value
  3829  }
  3830  
  3831  // NewUint32Desc return an empty uint32 skip set in descending order.
  3832  func NewUint32Desc() *Uint32SetDesc {
  3833  	h := newUint32NodeDesc(0, maxLevel)
  3834  	h.flags.SetTrue(fullyLinked)
  3835  	return &Uint32SetDesc{
  3836  		header:       h,
  3837  		highestLevel: defaultHighestLevel,
  3838  	}
  3839  }
  3840  
  3841  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  3842  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3843  func (s *Uint32SetDesc) findNodeRemove(value uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int {
  3844  	// lFound represents the index of the first layer at which it found a node.
  3845  	lFound, x := -1, s.header
  3846  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3847  		succ := x.atomicLoadNext(i)
  3848  		for succ != nil && succ.lessthan(value) {
  3849  			x = succ
  3850  			succ = x.atomicLoadNext(i)
  3851  		}
  3852  		preds[i] = x
  3853  		succs[i] = succ
  3854  
  3855  		// Check if the value already in the skip list.
  3856  		if lFound == -1 && succ != nil && succ.equal(value) {
  3857  			lFound = i
  3858  		}
  3859  	}
  3860  	return lFound
  3861  }
  3862  
  3863  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  3864  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  3865  func (s *Uint32SetDesc) findNodeAdd(value uint32, preds *[maxLevel]*uint32NodeDesc, succs *[maxLevel]*uint32NodeDesc) int {
  3866  	x := s.header
  3867  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3868  		succ := x.atomicLoadNext(i)
  3869  		for succ != nil && succ.lessthan(value) {
  3870  			x = succ
  3871  			succ = x.atomicLoadNext(i)
  3872  		}
  3873  		preds[i] = x
  3874  		succs[i] = succ
  3875  
  3876  		// Check if the value already in the skip list.
  3877  		if succ != nil && succ.equal(value) {
  3878  			return i
  3879  		}
  3880  	}
  3881  	return -1
  3882  }
  3883  
  3884  func unlockUint32Desc(preds [maxLevel]*uint32NodeDesc, highestLevel int) {
  3885  	var prevPred *uint32NodeDesc
  3886  	for i := highestLevel; i >= 0; i-- {
  3887  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  3888  			preds[i].mu.Unlock()
  3889  			prevPred = preds[i]
  3890  		}
  3891  	}
  3892  }
  3893  
  3894  // Add add the value into skip set, return true if this process insert the value into skip set,
  3895  // return false if this process can't insert this value, because another process has insert the same value.
  3896  //
  3897  // If the value is in the skip set but not fully linked, this process will wait until it is.
  3898  func (s *Uint32SetDesc) Add(value uint32) bool {
  3899  	level := s.randomlevel()
  3900  	var preds, succs [maxLevel]*uint32NodeDesc
  3901  	for {
  3902  		lFound := s.findNodeAdd(value, &preds, &succs)
  3903  		if lFound != -1 { // indicating the value is already in the skip-list
  3904  			nodeFound := succs[lFound]
  3905  			if !nodeFound.flags.Get(marked) {
  3906  				for !nodeFound.flags.Get(fullyLinked) {
  3907  					// The node is not yet fully linked, just waits until it is.
  3908  				}
  3909  				return false
  3910  			}
  3911  			// If the node is marked, represents some other thread is in the process of deleting this node,
  3912  			// we need to add this node in next loop.
  3913  			continue
  3914  		}
  3915  		// Add this node into skip list.
  3916  		var (
  3917  			highestLocked        = -1 // the highest level being locked by this process
  3918  			valid                = true
  3919  			pred, succ, prevPred *uint32NodeDesc
  3920  		)
  3921  		for layer := 0; valid && layer < level; layer++ {
  3922  			pred = preds[layer]   // target node's previous node
  3923  			succ = succs[layer]   // target node's next node
  3924  			if pred != prevPred { // the node in this layer could be locked by previous loop
  3925  				pred.mu.Lock()
  3926  				highestLocked = layer
  3927  				prevPred = pred
  3928  			}
  3929  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  3930  			// It is valid if:
  3931  			// 1. The previous node and next node both are not marked.
  3932  			// 2. The previous node's next node is succ in this layer.
  3933  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  3934  		}
  3935  		if !valid {
  3936  			unlockUint32Desc(preds, highestLocked)
  3937  			continue
  3938  		}
  3939  
  3940  		nn := newUint32NodeDesc(value, level)
  3941  		for layer := 0; layer < level; layer++ {
  3942  			nn.storeNext(layer, succs[layer])
  3943  			preds[layer].atomicStoreNext(layer, nn)
  3944  		}
  3945  		nn.flags.SetTrue(fullyLinked)
  3946  		unlockUint32Desc(preds, highestLocked)
  3947  		atomic.AddInt64(&s.length, 1)
  3948  		return true
  3949  	}
  3950  }
  3951  
  3952  func (s *Uint32SetDesc) randomlevel() int {
  3953  	// Generate random level.
  3954  	level := randomLevel()
  3955  	// Update highest level if possible.
  3956  	for {
  3957  		hl := atomic.LoadInt64(&s.highestLevel)
  3958  		if int64(level) <= hl {
  3959  			break
  3960  		}
  3961  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  3962  			break
  3963  		}
  3964  	}
  3965  	return level
  3966  }
  3967  
  3968  // Contains check if the value is in the skip set.
  3969  func (s *Uint32SetDesc) Contains(value uint32) bool {
  3970  	x := s.header
  3971  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  3972  		nex := x.atomicLoadNext(i)
  3973  		for nex != nil && nex.lessthan(value) {
  3974  			x = nex
  3975  			nex = x.atomicLoadNext(i)
  3976  		}
  3977  
  3978  		// Check if the value already in the skip list.
  3979  		if nex != nil && nex.equal(value) {
  3980  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  3981  		}
  3982  	}
  3983  	return false
  3984  }
  3985  
  3986  // Remove a node from the skip set.
  3987  func (s *Uint32SetDesc) Remove(value uint32) bool {
  3988  	var (
  3989  		nodeToRemove *uint32NodeDesc
  3990  		isMarked     bool // represents if this operation mark the node
  3991  		topLayer     = -1
  3992  		preds, succs [maxLevel]*uint32NodeDesc
  3993  	)
  3994  	for {
  3995  		lFound := s.findNodeRemove(value, &preds, &succs)
  3996  		if isMarked || // this process mark this node or we can find this node in the skip list
  3997  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  3998  			if !isMarked { // we don't mark this node for now
  3999  				nodeToRemove = succs[lFound]
  4000  				topLayer = lFound
  4001  				nodeToRemove.mu.Lock()
  4002  				if nodeToRemove.flags.Get(marked) {
  4003  					// The node is marked by another process,
  4004  					// the physical deletion will be accomplished by another process.
  4005  					nodeToRemove.mu.Unlock()
  4006  					return false
  4007  				}
  4008  				nodeToRemove.flags.SetTrue(marked)
  4009  				isMarked = true
  4010  			}
  4011  			// Accomplish the physical deletion.
  4012  			var (
  4013  				highestLocked        = -1 // the highest level being locked by this process
  4014  				valid                = true
  4015  				pred, succ, prevPred *uint32NodeDesc
  4016  			)
  4017  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4018  				pred, succ = preds[layer], succs[layer]
  4019  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4020  					pred.mu.Lock()
  4021  					highestLocked = layer
  4022  					prevPred = pred
  4023  				}
  4024  				// valid check if there is another node has inserted into the skip list in this layer
  4025  				// during this process, or the previous is removed by another process.
  4026  				// It is valid if:
  4027  				// 1. the previous node exists.
  4028  				// 2. no another node has inserted into the skip list in this layer.
  4029  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4030  			}
  4031  			if !valid {
  4032  				unlockUint32Desc(preds, highestLocked)
  4033  				continue
  4034  			}
  4035  			for i := topLayer; i >= 0; i-- {
  4036  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4037  				// So we don't need `nodeToRemove.loadNext`
  4038  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4039  			}
  4040  			nodeToRemove.mu.Unlock()
  4041  			unlockUint32Desc(preds, highestLocked)
  4042  			atomic.AddInt64(&s.length, -1)
  4043  			return true
  4044  		}
  4045  		return false
  4046  	}
  4047  }
  4048  
  4049  // Range calls f sequentially for each value present in the skip set.
  4050  // If f returns false, range stops the iteration.
  4051  func (s *Uint32SetDesc) Range(f func(value uint32) bool) {
  4052  	x := s.header.atomicLoadNext(0)
  4053  	for x != nil {
  4054  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4055  			x = x.atomicLoadNext(0)
  4056  			continue
  4057  		}
  4058  		if !f(x.value) {
  4059  			break
  4060  		}
  4061  		x = x.atomicLoadNext(0)
  4062  	}
  4063  }
  4064  
  4065  // Len return the length of this skip set.
  4066  func (s *Uint32SetDesc) Len() int {
  4067  	return int(atomic.LoadInt64(&s.length))
  4068  }
  4069  
  4070  // Uint16Set represents a set based on skip list in ascending order.
  4071  type Uint16Set struct {
  4072  	header       *uint16Node
  4073  	length       int64
  4074  	highestLevel int64 // highest level for now
  4075  }
  4076  
  4077  type uint16Node struct {
  4078  	value uint16
  4079  	next  optionalArray // [level]*uint16Node
  4080  	mu    sync.Mutex
  4081  	flags bitflag
  4082  	level uint32
  4083  }
  4084  
  4085  func newUint16Node(value uint16, level int) *uint16Node {
  4086  	node := &uint16Node{
  4087  		value: value,
  4088  		level: uint32(level),
  4089  	}
  4090  	if level > op1 {
  4091  		node.next.extra = new([op2]unsafe.Pointer)
  4092  	}
  4093  	return node
  4094  }
  4095  
  4096  func (n *uint16Node) loadNext(i int) *uint16Node {
  4097  	return (*uint16Node)(n.next.load(i))
  4098  }
  4099  
  4100  func (n *uint16Node) storeNext(i int, node *uint16Node) {
  4101  	n.next.store(i, unsafe.Pointer(node))
  4102  }
  4103  
  4104  func (n *uint16Node) atomicLoadNext(i int) *uint16Node {
  4105  	return (*uint16Node)(n.next.atomicLoad(i))
  4106  }
  4107  
  4108  func (n *uint16Node) atomicStoreNext(i int, node *uint16Node) {
  4109  	n.next.atomicStore(i, unsafe.Pointer(node))
  4110  }
  4111  
  4112  func (n *uint16Node) lessthan(value uint16) bool {
  4113  	return n.value < value
  4114  }
  4115  
  4116  func (n *uint16Node) equal(value uint16) bool {
  4117  	return n.value == value
  4118  }
  4119  
  4120  // NewUint16 return an empty uint16 skip set in ascending order.
  4121  func NewUint16() *Uint16Set {
  4122  	h := newUint16Node(0, maxLevel)
  4123  	h.flags.SetTrue(fullyLinked)
  4124  	return &Uint16Set{
  4125  		header:       h,
  4126  		highestLevel: defaultHighestLevel,
  4127  	}
  4128  }
  4129  
  4130  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4131  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4132  func (s *Uint16Set) findNodeRemove(value uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int {
  4133  	// lFound represents the index of the first layer at which it found a node.
  4134  	lFound, x := -1, s.header
  4135  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4136  		succ := x.atomicLoadNext(i)
  4137  		for succ != nil && succ.lessthan(value) {
  4138  			x = succ
  4139  			succ = x.atomicLoadNext(i)
  4140  		}
  4141  		preds[i] = x
  4142  		succs[i] = succ
  4143  
  4144  		// Check if the value already in the skip list.
  4145  		if lFound == -1 && succ != nil && succ.equal(value) {
  4146  			lFound = i
  4147  		}
  4148  	}
  4149  	return lFound
  4150  }
  4151  
  4152  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  4153  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4154  func (s *Uint16Set) findNodeAdd(value uint16, preds *[maxLevel]*uint16Node, succs *[maxLevel]*uint16Node) int {
  4155  	x := s.header
  4156  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4157  		succ := x.atomicLoadNext(i)
  4158  		for succ != nil && succ.lessthan(value) {
  4159  			x = succ
  4160  			succ = x.atomicLoadNext(i)
  4161  		}
  4162  		preds[i] = x
  4163  		succs[i] = succ
  4164  
  4165  		// Check if the value already in the skip list.
  4166  		if succ != nil && succ.equal(value) {
  4167  			return i
  4168  		}
  4169  	}
  4170  	return -1
  4171  }
  4172  
  4173  func unlockUint16(preds [maxLevel]*uint16Node, highestLevel int) {
  4174  	var prevPred *uint16Node
  4175  	for i := highestLevel; i >= 0; i-- {
  4176  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4177  			preds[i].mu.Unlock()
  4178  			prevPred = preds[i]
  4179  		}
  4180  	}
  4181  }
  4182  
  4183  // Add add the value into skip set, return true if this process insert the value into skip set,
  4184  // return false if this process can't insert this value, because another process has insert the same value.
  4185  //
  4186  // If the value is in the skip set but not fully linked, this process will wait until it is.
  4187  func (s *Uint16Set) Add(value uint16) bool {
  4188  	level := s.randomlevel()
  4189  	var preds, succs [maxLevel]*uint16Node
  4190  	for {
  4191  		lFound := s.findNodeAdd(value, &preds, &succs)
  4192  		if lFound != -1 { // indicating the value is already in the skip-list
  4193  			nodeFound := succs[lFound]
  4194  			if !nodeFound.flags.Get(marked) {
  4195  				for !nodeFound.flags.Get(fullyLinked) {
  4196  					// The node is not yet fully linked, just waits until it is.
  4197  				}
  4198  				return false
  4199  			}
  4200  			// If the node is marked, represents some other thread is in the process of deleting this node,
  4201  			// we need to add this node in next loop.
  4202  			continue
  4203  		}
  4204  		// Add this node into skip list.
  4205  		var (
  4206  			highestLocked        = -1 // the highest level being locked by this process
  4207  			valid                = true
  4208  			pred, succ, prevPred *uint16Node
  4209  		)
  4210  		for layer := 0; valid && layer < level; layer++ {
  4211  			pred = preds[layer]   // target node's previous node
  4212  			succ = succs[layer]   // target node's next node
  4213  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4214  				pred.mu.Lock()
  4215  				highestLocked = layer
  4216  				prevPred = pred
  4217  			}
  4218  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4219  			// It is valid if:
  4220  			// 1. The previous node and next node both are not marked.
  4221  			// 2. The previous node's next node is succ in this layer.
  4222  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4223  		}
  4224  		if !valid {
  4225  			unlockUint16(preds, highestLocked)
  4226  			continue
  4227  		}
  4228  
  4229  		nn := newUint16Node(value, level)
  4230  		for layer := 0; layer < level; layer++ {
  4231  			nn.storeNext(layer, succs[layer])
  4232  			preds[layer].atomicStoreNext(layer, nn)
  4233  		}
  4234  		nn.flags.SetTrue(fullyLinked)
  4235  		unlockUint16(preds, highestLocked)
  4236  		atomic.AddInt64(&s.length, 1)
  4237  		return true
  4238  	}
  4239  }
  4240  
  4241  func (s *Uint16Set) randomlevel() int {
  4242  	// Generate random level.
  4243  	level := randomLevel()
  4244  	// Update highest level if possible.
  4245  	for {
  4246  		hl := atomic.LoadInt64(&s.highestLevel)
  4247  		if int64(level) <= hl {
  4248  			break
  4249  		}
  4250  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4251  			break
  4252  		}
  4253  	}
  4254  	return level
  4255  }
  4256  
  4257  // Contains check if the value is in the skip set.
  4258  func (s *Uint16Set) Contains(value uint16) bool {
  4259  	x := s.header
  4260  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4261  		nex := x.atomicLoadNext(i)
  4262  		for nex != nil && nex.lessthan(value) {
  4263  			x = nex
  4264  			nex = x.atomicLoadNext(i)
  4265  		}
  4266  
  4267  		// Check if the value already in the skip list.
  4268  		if nex != nil && nex.equal(value) {
  4269  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  4270  		}
  4271  	}
  4272  	return false
  4273  }
  4274  
  4275  // Remove a node from the skip set.
  4276  func (s *Uint16Set) Remove(value uint16) bool {
  4277  	var (
  4278  		nodeToRemove *uint16Node
  4279  		isMarked     bool // represents if this operation mark the node
  4280  		topLayer     = -1
  4281  		preds, succs [maxLevel]*uint16Node
  4282  	)
  4283  	for {
  4284  		lFound := s.findNodeRemove(value, &preds, &succs)
  4285  		if isMarked || // this process mark this node or we can find this node in the skip list
  4286  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4287  			if !isMarked { // we don't mark this node for now
  4288  				nodeToRemove = succs[lFound]
  4289  				topLayer = lFound
  4290  				nodeToRemove.mu.Lock()
  4291  				if nodeToRemove.flags.Get(marked) {
  4292  					// The node is marked by another process,
  4293  					// the physical deletion will be accomplished by another process.
  4294  					nodeToRemove.mu.Unlock()
  4295  					return false
  4296  				}
  4297  				nodeToRemove.flags.SetTrue(marked)
  4298  				isMarked = true
  4299  			}
  4300  			// Accomplish the physical deletion.
  4301  			var (
  4302  				highestLocked        = -1 // the highest level being locked by this process
  4303  				valid                = true
  4304  				pred, succ, prevPred *uint16Node
  4305  			)
  4306  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4307  				pred, succ = preds[layer], succs[layer]
  4308  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4309  					pred.mu.Lock()
  4310  					highestLocked = layer
  4311  					prevPred = pred
  4312  				}
  4313  				// valid check if there is another node has inserted into the skip list in this layer
  4314  				// during this process, or the previous is removed by another process.
  4315  				// It is valid if:
  4316  				// 1. the previous node exists.
  4317  				// 2. no another node has inserted into the skip list in this layer.
  4318  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4319  			}
  4320  			if !valid {
  4321  				unlockUint16(preds, highestLocked)
  4322  				continue
  4323  			}
  4324  			for i := topLayer; i >= 0; i-- {
  4325  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4326  				// So we don't need `nodeToRemove.loadNext`
  4327  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4328  			}
  4329  			nodeToRemove.mu.Unlock()
  4330  			unlockUint16(preds, highestLocked)
  4331  			atomic.AddInt64(&s.length, -1)
  4332  			return true
  4333  		}
  4334  		return false
  4335  	}
  4336  }
  4337  
  4338  // Range calls f sequentially for each value present in the skip set.
  4339  // If f returns false, range stops the iteration.
  4340  func (s *Uint16Set) Range(f func(value uint16) bool) {
  4341  	x := s.header.atomicLoadNext(0)
  4342  	for x != nil {
  4343  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4344  			x = x.atomicLoadNext(0)
  4345  			continue
  4346  		}
  4347  		if !f(x.value) {
  4348  			break
  4349  		}
  4350  		x = x.atomicLoadNext(0)
  4351  	}
  4352  }
  4353  
  4354  // Len return the length of this skip set.
  4355  func (s *Uint16Set) Len() int {
  4356  	return int(atomic.LoadInt64(&s.length))
  4357  }
  4358  
  4359  // Uint16SetDesc represents a set based on skip list in descending order.
  4360  type Uint16SetDesc struct {
  4361  	header       *uint16NodeDesc
  4362  	length       int64
  4363  	highestLevel int64 // highest level for now
  4364  }
  4365  
  4366  type uint16NodeDesc struct {
  4367  	value uint16
  4368  	next  optionalArray // [level]*uint16NodeDesc
  4369  	mu    sync.Mutex
  4370  	flags bitflag
  4371  	level uint32
  4372  }
  4373  
  4374  func newUint16NodeDesc(value uint16, level int) *uint16NodeDesc {
  4375  	node := &uint16NodeDesc{
  4376  		value: value,
  4377  		level: uint32(level),
  4378  	}
  4379  	if level > op1 {
  4380  		node.next.extra = new([op2]unsafe.Pointer)
  4381  	}
  4382  	return node
  4383  }
  4384  
  4385  func (n *uint16NodeDesc) loadNext(i int) *uint16NodeDesc {
  4386  	return (*uint16NodeDesc)(n.next.load(i))
  4387  }
  4388  
  4389  func (n *uint16NodeDesc) storeNext(i int, node *uint16NodeDesc) {
  4390  	n.next.store(i, unsafe.Pointer(node))
  4391  }
  4392  
  4393  func (n *uint16NodeDesc) atomicLoadNext(i int) *uint16NodeDesc {
  4394  	return (*uint16NodeDesc)(n.next.atomicLoad(i))
  4395  }
  4396  
  4397  func (n *uint16NodeDesc) atomicStoreNext(i int, node *uint16NodeDesc) {
  4398  	n.next.atomicStore(i, unsafe.Pointer(node))
  4399  }
  4400  
  4401  func (n *uint16NodeDesc) lessthan(value uint16) bool {
  4402  	return n.value > value
  4403  }
  4404  
  4405  func (n *uint16NodeDesc) equal(value uint16) bool {
  4406  	return n.value == value
  4407  }
  4408  
  4409  // NewUint16Desc return an empty uint16 skip set in descending order.
  4410  func NewUint16Desc() *Uint16SetDesc {
  4411  	h := newUint16NodeDesc(0, maxLevel)
  4412  	h.flags.SetTrue(fullyLinked)
  4413  	return &Uint16SetDesc{
  4414  		header:       h,
  4415  		highestLevel: defaultHighestLevel,
  4416  	}
  4417  }
  4418  
  4419  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4420  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4421  func (s *Uint16SetDesc) findNodeRemove(value uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int {
  4422  	// lFound represents the index of the first layer at which it found a node.
  4423  	lFound, x := -1, s.header
  4424  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4425  		succ := x.atomicLoadNext(i)
  4426  		for succ != nil && succ.lessthan(value) {
  4427  			x = succ
  4428  			succ = x.atomicLoadNext(i)
  4429  		}
  4430  		preds[i] = x
  4431  		succs[i] = succ
  4432  
  4433  		// Check if the value already in the skip list.
  4434  		if lFound == -1 && succ != nil && succ.equal(value) {
  4435  			lFound = i
  4436  		}
  4437  	}
  4438  	return lFound
  4439  }
  4440  
  4441  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  4442  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4443  func (s *Uint16SetDesc) findNodeAdd(value uint16, preds *[maxLevel]*uint16NodeDesc, succs *[maxLevel]*uint16NodeDesc) int {
  4444  	x := s.header
  4445  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4446  		succ := x.atomicLoadNext(i)
  4447  		for succ != nil && succ.lessthan(value) {
  4448  			x = succ
  4449  			succ = x.atomicLoadNext(i)
  4450  		}
  4451  		preds[i] = x
  4452  		succs[i] = succ
  4453  
  4454  		// Check if the value already in the skip list.
  4455  		if succ != nil && succ.equal(value) {
  4456  			return i
  4457  		}
  4458  	}
  4459  	return -1
  4460  }
  4461  
  4462  func unlockUint16Desc(preds [maxLevel]*uint16NodeDesc, highestLevel int) {
  4463  	var prevPred *uint16NodeDesc
  4464  	for i := highestLevel; i >= 0; i-- {
  4465  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4466  			preds[i].mu.Unlock()
  4467  			prevPred = preds[i]
  4468  		}
  4469  	}
  4470  }
  4471  
  4472  // Add add the value into skip set, return true if this process insert the value into skip set,
  4473  // return false if this process can't insert this value, because another process has insert the same value.
  4474  //
  4475  // If the value is in the skip set but not fully linked, this process will wait until it is.
  4476  func (s *Uint16SetDesc) Add(value uint16) bool {
  4477  	level := s.randomlevel()
  4478  	var preds, succs [maxLevel]*uint16NodeDesc
  4479  	for {
  4480  		lFound := s.findNodeAdd(value, &preds, &succs)
  4481  		if lFound != -1 { // indicating the value is already in the skip-list
  4482  			nodeFound := succs[lFound]
  4483  			if !nodeFound.flags.Get(marked) {
  4484  				for !nodeFound.flags.Get(fullyLinked) {
  4485  					// The node is not yet fully linked, just waits until it is.
  4486  				}
  4487  				return false
  4488  			}
  4489  			// If the node is marked, represents some other thread is in the process of deleting this node,
  4490  			// we need to add this node in next loop.
  4491  			continue
  4492  		}
  4493  		// Add this node into skip list.
  4494  		var (
  4495  			highestLocked        = -1 // the highest level being locked by this process
  4496  			valid                = true
  4497  			pred, succ, prevPred *uint16NodeDesc
  4498  		)
  4499  		for layer := 0; valid && layer < level; layer++ {
  4500  			pred = preds[layer]   // target node's previous node
  4501  			succ = succs[layer]   // target node's next node
  4502  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4503  				pred.mu.Lock()
  4504  				highestLocked = layer
  4505  				prevPred = pred
  4506  			}
  4507  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4508  			// It is valid if:
  4509  			// 1. The previous node and next node both are not marked.
  4510  			// 2. The previous node's next node is succ in this layer.
  4511  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4512  		}
  4513  		if !valid {
  4514  			unlockUint16Desc(preds, highestLocked)
  4515  			continue
  4516  		}
  4517  
  4518  		nn := newUint16NodeDesc(value, level)
  4519  		for layer := 0; layer < level; layer++ {
  4520  			nn.storeNext(layer, succs[layer])
  4521  			preds[layer].atomicStoreNext(layer, nn)
  4522  		}
  4523  		nn.flags.SetTrue(fullyLinked)
  4524  		unlockUint16Desc(preds, highestLocked)
  4525  		atomic.AddInt64(&s.length, 1)
  4526  		return true
  4527  	}
  4528  }
  4529  
  4530  func (s *Uint16SetDesc) randomlevel() int {
  4531  	// Generate random level.
  4532  	level := randomLevel()
  4533  	// Update highest level if possible.
  4534  	for {
  4535  		hl := atomic.LoadInt64(&s.highestLevel)
  4536  		if int64(level) <= hl {
  4537  			break
  4538  		}
  4539  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4540  			break
  4541  		}
  4542  	}
  4543  	return level
  4544  }
  4545  
  4546  // Contains check if the value is in the skip set.
  4547  func (s *Uint16SetDesc) Contains(value uint16) bool {
  4548  	x := s.header
  4549  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4550  		nex := x.atomicLoadNext(i)
  4551  		for nex != nil && nex.lessthan(value) {
  4552  			x = nex
  4553  			nex = x.atomicLoadNext(i)
  4554  		}
  4555  
  4556  		// Check if the value already in the skip list.
  4557  		if nex != nil && nex.equal(value) {
  4558  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  4559  		}
  4560  	}
  4561  	return false
  4562  }
  4563  
  4564  // Remove a node from the skip set.
  4565  func (s *Uint16SetDesc) Remove(value uint16) bool {
  4566  	var (
  4567  		nodeToRemove *uint16NodeDesc
  4568  		isMarked     bool // represents if this operation mark the node
  4569  		topLayer     = -1
  4570  		preds, succs [maxLevel]*uint16NodeDesc
  4571  	)
  4572  	for {
  4573  		lFound := s.findNodeRemove(value, &preds, &succs)
  4574  		if isMarked || // this process mark this node or we can find this node in the skip list
  4575  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4576  			if !isMarked { // we don't mark this node for now
  4577  				nodeToRemove = succs[lFound]
  4578  				topLayer = lFound
  4579  				nodeToRemove.mu.Lock()
  4580  				if nodeToRemove.flags.Get(marked) {
  4581  					// The node is marked by another process,
  4582  					// the physical deletion will be accomplished by another process.
  4583  					nodeToRemove.mu.Unlock()
  4584  					return false
  4585  				}
  4586  				nodeToRemove.flags.SetTrue(marked)
  4587  				isMarked = true
  4588  			}
  4589  			// Accomplish the physical deletion.
  4590  			var (
  4591  				highestLocked        = -1 // the highest level being locked by this process
  4592  				valid                = true
  4593  				pred, succ, prevPred *uint16NodeDesc
  4594  			)
  4595  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4596  				pred, succ = preds[layer], succs[layer]
  4597  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4598  					pred.mu.Lock()
  4599  					highestLocked = layer
  4600  					prevPred = pred
  4601  				}
  4602  				// valid check if there is another node has inserted into the skip list in this layer
  4603  				// during this process, or the previous is removed by another process.
  4604  				// It is valid if:
  4605  				// 1. the previous node exists.
  4606  				// 2. no another node has inserted into the skip list in this layer.
  4607  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4608  			}
  4609  			if !valid {
  4610  				unlockUint16Desc(preds, highestLocked)
  4611  				continue
  4612  			}
  4613  			for i := topLayer; i >= 0; i-- {
  4614  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4615  				// So we don't need `nodeToRemove.loadNext`
  4616  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4617  			}
  4618  			nodeToRemove.mu.Unlock()
  4619  			unlockUint16Desc(preds, highestLocked)
  4620  			atomic.AddInt64(&s.length, -1)
  4621  			return true
  4622  		}
  4623  		return false
  4624  	}
  4625  }
  4626  
  4627  // Range calls f sequentially for each value present in the skip set.
  4628  // If f returns false, range stops the iteration.
  4629  func (s *Uint16SetDesc) Range(f func(value uint16) bool) {
  4630  	x := s.header.atomicLoadNext(0)
  4631  	for x != nil {
  4632  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4633  			x = x.atomicLoadNext(0)
  4634  			continue
  4635  		}
  4636  		if !f(x.value) {
  4637  			break
  4638  		}
  4639  		x = x.atomicLoadNext(0)
  4640  	}
  4641  }
  4642  
  4643  // Len return the length of this skip set.
  4644  func (s *Uint16SetDesc) Len() int {
  4645  	return int(atomic.LoadInt64(&s.length))
  4646  }
  4647  
  4648  // UintSet represents a set based on skip list in ascending order.
  4649  type UintSet struct {
  4650  	header       *uintNode
  4651  	length       int64
  4652  	highestLevel int64 // highest level for now
  4653  }
  4654  
  4655  type uintNode struct {
  4656  	value uint
  4657  	next  optionalArray // [level]*uintNode
  4658  	mu    sync.Mutex
  4659  	flags bitflag
  4660  	level uint32
  4661  }
  4662  
  4663  func newUintNode(value uint, level int) *uintNode {
  4664  	node := &uintNode{
  4665  		value: value,
  4666  		level: uint32(level),
  4667  	}
  4668  	if level > op1 {
  4669  		node.next.extra = new([op2]unsafe.Pointer)
  4670  	}
  4671  	return node
  4672  }
  4673  
  4674  func (n *uintNode) loadNext(i int) *uintNode {
  4675  	return (*uintNode)(n.next.load(i))
  4676  }
  4677  
  4678  func (n *uintNode) storeNext(i int, node *uintNode) {
  4679  	n.next.store(i, unsafe.Pointer(node))
  4680  }
  4681  
  4682  func (n *uintNode) atomicLoadNext(i int) *uintNode {
  4683  	return (*uintNode)(n.next.atomicLoad(i))
  4684  }
  4685  
  4686  func (n *uintNode) atomicStoreNext(i int, node *uintNode) {
  4687  	n.next.atomicStore(i, unsafe.Pointer(node))
  4688  }
  4689  
  4690  func (n *uintNode) lessthan(value uint) bool {
  4691  	return n.value < value
  4692  }
  4693  
  4694  func (n *uintNode) equal(value uint) bool {
  4695  	return n.value == value
  4696  }
  4697  
  4698  // NewUint return an empty uint skip set in ascending order.
  4699  func NewUint() *UintSet {
  4700  	h := newUintNode(0, maxLevel)
  4701  	h.flags.SetTrue(fullyLinked)
  4702  	return &UintSet{
  4703  		header:       h,
  4704  		highestLevel: defaultHighestLevel,
  4705  	}
  4706  }
  4707  
  4708  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4709  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4710  func (s *UintSet) findNodeRemove(value uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int {
  4711  	// lFound represents the index of the first layer at which it found a node.
  4712  	lFound, x := -1, s.header
  4713  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4714  		succ := x.atomicLoadNext(i)
  4715  		for succ != nil && succ.lessthan(value) {
  4716  			x = succ
  4717  			succ = x.atomicLoadNext(i)
  4718  		}
  4719  		preds[i] = x
  4720  		succs[i] = succ
  4721  
  4722  		// Check if the value already in the skip list.
  4723  		if lFound == -1 && succ != nil && succ.equal(value) {
  4724  			lFound = i
  4725  		}
  4726  	}
  4727  	return lFound
  4728  }
  4729  
  4730  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  4731  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4732  func (s *UintSet) findNodeAdd(value uint, preds *[maxLevel]*uintNode, succs *[maxLevel]*uintNode) int {
  4733  	x := s.header
  4734  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4735  		succ := x.atomicLoadNext(i)
  4736  		for succ != nil && succ.lessthan(value) {
  4737  			x = succ
  4738  			succ = x.atomicLoadNext(i)
  4739  		}
  4740  		preds[i] = x
  4741  		succs[i] = succ
  4742  
  4743  		// Check if the value already in the skip list.
  4744  		if succ != nil && succ.equal(value) {
  4745  			return i
  4746  		}
  4747  	}
  4748  	return -1
  4749  }
  4750  
  4751  func unlockUint(preds [maxLevel]*uintNode, highestLevel int) {
  4752  	var prevPred *uintNode
  4753  	for i := highestLevel; i >= 0; i-- {
  4754  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  4755  			preds[i].mu.Unlock()
  4756  			prevPred = preds[i]
  4757  		}
  4758  	}
  4759  }
  4760  
  4761  // Add add the value into skip set, return true if this process insert the value into skip set,
  4762  // return false if this process can't insert this value, because another process has insert the same value.
  4763  //
  4764  // If the value is in the skip set but not fully linked, this process will wait until it is.
  4765  func (s *UintSet) Add(value uint) bool {
  4766  	level := s.randomlevel()
  4767  	var preds, succs [maxLevel]*uintNode
  4768  	for {
  4769  		lFound := s.findNodeAdd(value, &preds, &succs)
  4770  		if lFound != -1 { // indicating the value is already in the skip-list
  4771  			nodeFound := succs[lFound]
  4772  			if !nodeFound.flags.Get(marked) {
  4773  				for !nodeFound.flags.Get(fullyLinked) {
  4774  					// The node is not yet fully linked, just waits until it is.
  4775  				}
  4776  				return false
  4777  			}
  4778  			// If the node is marked, represents some other thread is in the process of deleting this node,
  4779  			// we need to add this node in next loop.
  4780  			continue
  4781  		}
  4782  		// Add this node into skip list.
  4783  		var (
  4784  			highestLocked        = -1 // the highest level being locked by this process
  4785  			valid                = true
  4786  			pred, succ, prevPred *uintNode
  4787  		)
  4788  		for layer := 0; valid && layer < level; layer++ {
  4789  			pred = preds[layer]   // target node's previous node
  4790  			succ = succs[layer]   // target node's next node
  4791  			if pred != prevPred { // the node in this layer could be locked by previous loop
  4792  				pred.mu.Lock()
  4793  				highestLocked = layer
  4794  				prevPred = pred
  4795  			}
  4796  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  4797  			// It is valid if:
  4798  			// 1. The previous node and next node both are not marked.
  4799  			// 2. The previous node's next node is succ in this layer.
  4800  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  4801  		}
  4802  		if !valid {
  4803  			unlockUint(preds, highestLocked)
  4804  			continue
  4805  		}
  4806  
  4807  		nn := newUintNode(value, level)
  4808  		for layer := 0; layer < level; layer++ {
  4809  			nn.storeNext(layer, succs[layer])
  4810  			preds[layer].atomicStoreNext(layer, nn)
  4811  		}
  4812  		nn.flags.SetTrue(fullyLinked)
  4813  		unlockUint(preds, highestLocked)
  4814  		atomic.AddInt64(&s.length, 1)
  4815  		return true
  4816  	}
  4817  }
  4818  
  4819  func (s *UintSet) randomlevel() int {
  4820  	// Generate random level.
  4821  	level := randomLevel()
  4822  	// Update highest level if possible.
  4823  	for {
  4824  		hl := atomic.LoadInt64(&s.highestLevel)
  4825  		if int64(level) <= hl {
  4826  			break
  4827  		}
  4828  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  4829  			break
  4830  		}
  4831  	}
  4832  	return level
  4833  }
  4834  
  4835  // Contains check if the value is in the skip set.
  4836  func (s *UintSet) Contains(value uint) bool {
  4837  	x := s.header
  4838  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  4839  		nex := x.atomicLoadNext(i)
  4840  		for nex != nil && nex.lessthan(value) {
  4841  			x = nex
  4842  			nex = x.atomicLoadNext(i)
  4843  		}
  4844  
  4845  		// Check if the value already in the skip list.
  4846  		if nex != nil && nex.equal(value) {
  4847  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  4848  		}
  4849  	}
  4850  	return false
  4851  }
  4852  
  4853  // Remove a node from the skip set.
  4854  func (s *UintSet) Remove(value uint) bool {
  4855  	var (
  4856  		nodeToRemove *uintNode
  4857  		isMarked     bool // represents if this operation mark the node
  4858  		topLayer     = -1
  4859  		preds, succs [maxLevel]*uintNode
  4860  	)
  4861  	for {
  4862  		lFound := s.findNodeRemove(value, &preds, &succs)
  4863  		if isMarked || // this process mark this node or we can find this node in the skip list
  4864  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  4865  			if !isMarked { // we don't mark this node for now
  4866  				nodeToRemove = succs[lFound]
  4867  				topLayer = lFound
  4868  				nodeToRemove.mu.Lock()
  4869  				if nodeToRemove.flags.Get(marked) {
  4870  					// The node is marked by another process,
  4871  					// the physical deletion will be accomplished by another process.
  4872  					nodeToRemove.mu.Unlock()
  4873  					return false
  4874  				}
  4875  				nodeToRemove.flags.SetTrue(marked)
  4876  				isMarked = true
  4877  			}
  4878  			// Accomplish the physical deletion.
  4879  			var (
  4880  				highestLocked        = -1 // the highest level being locked by this process
  4881  				valid                = true
  4882  				pred, succ, prevPred *uintNode
  4883  			)
  4884  			for layer := 0; valid && (layer <= topLayer); layer++ {
  4885  				pred, succ = preds[layer], succs[layer]
  4886  				if pred != prevPred { // the node in this layer could be locked by previous loop
  4887  					pred.mu.Lock()
  4888  					highestLocked = layer
  4889  					prevPred = pred
  4890  				}
  4891  				// valid check if there is another node has inserted into the skip list in this layer
  4892  				// during this process, or the previous is removed by another process.
  4893  				// It is valid if:
  4894  				// 1. the previous node exists.
  4895  				// 2. no another node has inserted into the skip list in this layer.
  4896  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  4897  			}
  4898  			if !valid {
  4899  				unlockUint(preds, highestLocked)
  4900  				continue
  4901  			}
  4902  			for i := topLayer; i >= 0; i-- {
  4903  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  4904  				// So we don't need `nodeToRemove.loadNext`
  4905  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  4906  			}
  4907  			nodeToRemove.mu.Unlock()
  4908  			unlockUint(preds, highestLocked)
  4909  			atomic.AddInt64(&s.length, -1)
  4910  			return true
  4911  		}
  4912  		return false
  4913  	}
  4914  }
  4915  
  4916  // Range calls f sequentially for each value present in the skip set.
  4917  // If f returns false, range stops the iteration.
  4918  func (s *UintSet) Range(f func(value uint) bool) {
  4919  	x := s.header.atomicLoadNext(0)
  4920  	for x != nil {
  4921  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  4922  			x = x.atomicLoadNext(0)
  4923  			continue
  4924  		}
  4925  		if !f(x.value) {
  4926  			break
  4927  		}
  4928  		x = x.atomicLoadNext(0)
  4929  	}
  4930  }
  4931  
  4932  // Len return the length of this skip set.
  4933  func (s *UintSet) Len() int {
  4934  	return int(atomic.LoadInt64(&s.length))
  4935  }
  4936  
  4937  // UintSetDesc represents a set based on skip list in descending order.
  4938  type UintSetDesc struct {
  4939  	header       *uintNodeDesc
  4940  	length       int64
  4941  	highestLevel int64 // highest level for now
  4942  }
  4943  
  4944  type uintNodeDesc struct {
  4945  	value uint
  4946  	next  optionalArray // [level]*uintNodeDesc
  4947  	mu    sync.Mutex
  4948  	flags bitflag
  4949  	level uint32
  4950  }
  4951  
  4952  func newUintNodeDesc(value uint, level int) *uintNodeDesc {
  4953  	node := &uintNodeDesc{
  4954  		value: value,
  4955  		level: uint32(level),
  4956  	}
  4957  	if level > op1 {
  4958  		node.next.extra = new([op2]unsafe.Pointer)
  4959  	}
  4960  	return node
  4961  }
  4962  
  4963  func (n *uintNodeDesc) loadNext(i int) *uintNodeDesc {
  4964  	return (*uintNodeDesc)(n.next.load(i))
  4965  }
  4966  
  4967  func (n *uintNodeDesc) storeNext(i int, node *uintNodeDesc) {
  4968  	n.next.store(i, unsafe.Pointer(node))
  4969  }
  4970  
  4971  func (n *uintNodeDesc) atomicLoadNext(i int) *uintNodeDesc {
  4972  	return (*uintNodeDesc)(n.next.atomicLoad(i))
  4973  }
  4974  
  4975  func (n *uintNodeDesc) atomicStoreNext(i int, node *uintNodeDesc) {
  4976  	n.next.atomicStore(i, unsafe.Pointer(node))
  4977  }
  4978  
  4979  func (n *uintNodeDesc) lessthan(value uint) bool {
  4980  	return n.value > value
  4981  }
  4982  
  4983  func (n *uintNodeDesc) equal(value uint) bool {
  4984  	return n.value == value
  4985  }
  4986  
  4987  // NewUintDesc return an empty uint skip set in descending order.
  4988  func NewUintDesc() *UintSetDesc {
  4989  	h := newUintNodeDesc(0, maxLevel)
  4990  	h.flags.SetTrue(fullyLinked)
  4991  	return &UintSetDesc{
  4992  		header:       h,
  4993  		highestLevel: defaultHighestLevel,
  4994  	}
  4995  }
  4996  
  4997  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  4998  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  4999  func (s *UintSetDesc) findNodeRemove(value uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int {
  5000  	// lFound represents the index of the first layer at which it found a node.
  5001  	lFound, x := -1, s.header
  5002  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5003  		succ := x.atomicLoadNext(i)
  5004  		for succ != nil && succ.lessthan(value) {
  5005  			x = succ
  5006  			succ = x.atomicLoadNext(i)
  5007  		}
  5008  		preds[i] = x
  5009  		succs[i] = succ
  5010  
  5011  		// Check if the value already in the skip list.
  5012  		if lFound == -1 && succ != nil && succ.equal(value) {
  5013  			lFound = i
  5014  		}
  5015  	}
  5016  	return lFound
  5017  }
  5018  
  5019  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  5020  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5021  func (s *UintSetDesc) findNodeAdd(value uint, preds *[maxLevel]*uintNodeDesc, succs *[maxLevel]*uintNodeDesc) int {
  5022  	x := s.header
  5023  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5024  		succ := x.atomicLoadNext(i)
  5025  		for succ != nil && succ.lessthan(value) {
  5026  			x = succ
  5027  			succ = x.atomicLoadNext(i)
  5028  		}
  5029  		preds[i] = x
  5030  		succs[i] = succ
  5031  
  5032  		// Check if the value already in the skip list.
  5033  		if succ != nil && succ.equal(value) {
  5034  			return i
  5035  		}
  5036  	}
  5037  	return -1
  5038  }
  5039  
  5040  func unlockUintDesc(preds [maxLevel]*uintNodeDesc, highestLevel int) {
  5041  	var prevPred *uintNodeDesc
  5042  	for i := highestLevel; i >= 0; i-- {
  5043  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5044  			preds[i].mu.Unlock()
  5045  			prevPred = preds[i]
  5046  		}
  5047  	}
  5048  }
  5049  
  5050  // Add add the value into skip set, return true if this process insert the value into skip set,
  5051  // return false if this process can't insert this value, because another process has insert the same value.
  5052  //
  5053  // If the value is in the skip set but not fully linked, this process will wait until it is.
  5054  func (s *UintSetDesc) Add(value uint) bool {
  5055  	level := s.randomlevel()
  5056  	var preds, succs [maxLevel]*uintNodeDesc
  5057  	for {
  5058  		lFound := s.findNodeAdd(value, &preds, &succs)
  5059  		if lFound != -1 { // indicating the value is already in the skip-list
  5060  			nodeFound := succs[lFound]
  5061  			if !nodeFound.flags.Get(marked) {
  5062  				for !nodeFound.flags.Get(fullyLinked) {
  5063  					// The node is not yet fully linked, just waits until it is.
  5064  				}
  5065  				return false
  5066  			}
  5067  			// If the node is marked, represents some other thread is in the process of deleting this node,
  5068  			// we need to add this node in next loop.
  5069  			continue
  5070  		}
  5071  		// Add this node into skip list.
  5072  		var (
  5073  			highestLocked        = -1 // the highest level being locked by this process
  5074  			valid                = true
  5075  			pred, succ, prevPred *uintNodeDesc
  5076  		)
  5077  		for layer := 0; valid && layer < level; layer++ {
  5078  			pred = preds[layer]   // target node's previous node
  5079  			succ = succs[layer]   // target node's next node
  5080  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5081  				pred.mu.Lock()
  5082  				highestLocked = layer
  5083  				prevPred = pred
  5084  			}
  5085  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5086  			// It is valid if:
  5087  			// 1. The previous node and next node both are not marked.
  5088  			// 2. The previous node's next node is succ in this layer.
  5089  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5090  		}
  5091  		if !valid {
  5092  			unlockUintDesc(preds, highestLocked)
  5093  			continue
  5094  		}
  5095  
  5096  		nn := newUintNodeDesc(value, level)
  5097  		for layer := 0; layer < level; layer++ {
  5098  			nn.storeNext(layer, succs[layer])
  5099  			preds[layer].atomicStoreNext(layer, nn)
  5100  		}
  5101  		nn.flags.SetTrue(fullyLinked)
  5102  		unlockUintDesc(preds, highestLocked)
  5103  		atomic.AddInt64(&s.length, 1)
  5104  		return true
  5105  	}
  5106  }
  5107  
  5108  func (s *UintSetDesc) randomlevel() int {
  5109  	// Generate random level.
  5110  	level := randomLevel()
  5111  	// Update highest level if possible.
  5112  	for {
  5113  		hl := atomic.LoadInt64(&s.highestLevel)
  5114  		if int64(level) <= hl {
  5115  			break
  5116  		}
  5117  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5118  			break
  5119  		}
  5120  	}
  5121  	return level
  5122  }
  5123  
  5124  // Contains check if the value is in the skip set.
  5125  func (s *UintSetDesc) Contains(value uint) bool {
  5126  	x := s.header
  5127  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5128  		nex := x.atomicLoadNext(i)
  5129  		for nex != nil && nex.lessthan(value) {
  5130  			x = nex
  5131  			nex = x.atomicLoadNext(i)
  5132  		}
  5133  
  5134  		// Check if the value already in the skip list.
  5135  		if nex != nil && nex.equal(value) {
  5136  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  5137  		}
  5138  	}
  5139  	return false
  5140  }
  5141  
  5142  // Remove a node from the skip set.
  5143  func (s *UintSetDesc) Remove(value uint) bool {
  5144  	var (
  5145  		nodeToRemove *uintNodeDesc
  5146  		isMarked     bool // represents if this operation mark the node
  5147  		topLayer     = -1
  5148  		preds, succs [maxLevel]*uintNodeDesc
  5149  	)
  5150  	for {
  5151  		lFound := s.findNodeRemove(value, &preds, &succs)
  5152  		if isMarked || // this process mark this node or we can find this node in the skip list
  5153  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5154  			if !isMarked { // we don't mark this node for now
  5155  				nodeToRemove = succs[lFound]
  5156  				topLayer = lFound
  5157  				nodeToRemove.mu.Lock()
  5158  				if nodeToRemove.flags.Get(marked) {
  5159  					// The node is marked by another process,
  5160  					// the physical deletion will be accomplished by another process.
  5161  					nodeToRemove.mu.Unlock()
  5162  					return false
  5163  				}
  5164  				nodeToRemove.flags.SetTrue(marked)
  5165  				isMarked = true
  5166  			}
  5167  			// Accomplish the physical deletion.
  5168  			var (
  5169  				highestLocked        = -1 // the highest level being locked by this process
  5170  				valid                = true
  5171  				pred, succ, prevPred *uintNodeDesc
  5172  			)
  5173  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5174  				pred, succ = preds[layer], succs[layer]
  5175  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5176  					pred.mu.Lock()
  5177  					highestLocked = layer
  5178  					prevPred = pred
  5179  				}
  5180  				// valid check if there is another node has inserted into the skip list in this layer
  5181  				// during this process, or the previous is removed by another process.
  5182  				// It is valid if:
  5183  				// 1. the previous node exists.
  5184  				// 2. no another node has inserted into the skip list in this layer.
  5185  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  5186  			}
  5187  			if !valid {
  5188  				unlockUintDesc(preds, highestLocked)
  5189  				continue
  5190  			}
  5191  			for i := topLayer; i >= 0; i-- {
  5192  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  5193  				// So we don't need `nodeToRemove.loadNext`
  5194  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  5195  			}
  5196  			nodeToRemove.mu.Unlock()
  5197  			unlockUintDesc(preds, highestLocked)
  5198  			atomic.AddInt64(&s.length, -1)
  5199  			return true
  5200  		}
  5201  		return false
  5202  	}
  5203  }
  5204  
  5205  // Range calls f sequentially for each value present in the skip set.
  5206  // If f returns false, range stops the iteration.
  5207  func (s *UintSetDesc) Range(f func(value uint) bool) {
  5208  	x := s.header.atomicLoadNext(0)
  5209  	for x != nil {
  5210  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  5211  			x = x.atomicLoadNext(0)
  5212  			continue
  5213  		}
  5214  		if !f(x.value) {
  5215  			break
  5216  		}
  5217  		x = x.atomicLoadNext(0)
  5218  	}
  5219  }
  5220  
  5221  // Len return the length of this skip set.
  5222  func (s *UintSetDesc) Len() int {
  5223  	return int(atomic.LoadInt64(&s.length))
  5224  }
  5225  
  5226  // StringSet represents a set based on skip list.
  5227  type StringSet struct {
  5228  	header       *stringNode
  5229  	length       int64
  5230  	highestLevel int64 // highest level for now
  5231  }
  5232  
  5233  type stringNode struct {
  5234  	value string
  5235  	score uint64
  5236  	next  optionalArray // [level]*stringNode
  5237  	mu    sync.Mutex
  5238  	flags bitflag
  5239  	level uint32
  5240  }
  5241  
  5242  func newStringNode(value string, level int) *stringNode {
  5243  	node := &stringNode{
  5244  		score: hash(value),
  5245  		value: value,
  5246  		level: uint32(level),
  5247  	}
  5248  	if level > op1 {
  5249  		node.next.extra = new([op2]unsafe.Pointer)
  5250  	}
  5251  	return node
  5252  }
  5253  
  5254  func (n *stringNode) loadNext(i int) *stringNode {
  5255  	return (*stringNode)(n.next.load(i))
  5256  }
  5257  
  5258  func (n *stringNode) storeNext(i int, node *stringNode) {
  5259  	n.next.store(i, unsafe.Pointer(node))
  5260  }
  5261  
  5262  func (n *stringNode) atomicLoadNext(i int) *stringNode {
  5263  	return (*stringNode)(n.next.atomicLoad(i))
  5264  }
  5265  
  5266  func (n *stringNode) atomicStoreNext(i int, node *stringNode) {
  5267  	n.next.atomicStore(i, unsafe.Pointer(node))
  5268  }
  5269  
  5270  // NewString return an empty string skip set.
  5271  func NewString() *StringSet {
  5272  	h := newStringNode("", maxLevel)
  5273  	h.flags.SetTrue(fullyLinked)
  5274  	return &StringSet{
  5275  		header:       h,
  5276  		highestLevel: defaultHighestLevel,
  5277  	}
  5278  }
  5279  
  5280  // findNodeRemove takes a value and two maximal-height arrays then searches exactly as in a sequential skip-list.
  5281  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5282  func (s *StringSet) findNodeRemove(value string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int {
  5283  	score := hash(value)
  5284  	// lFound represents the index of the first layer at which it found a node.
  5285  	lFound, x := -1, s.header
  5286  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5287  		succ := x.atomicLoadNext(i)
  5288  		for succ != nil && succ.cmp(score, value) < 0 {
  5289  			x = succ
  5290  			succ = x.atomicLoadNext(i)
  5291  		}
  5292  		preds[i] = x
  5293  		succs[i] = succ
  5294  
  5295  		// Check if the value already in the skip list.
  5296  		if lFound == -1 && succ != nil && succ.cmp(score, value) == 0 {
  5297  			lFound = i
  5298  		}
  5299  	}
  5300  	return lFound
  5301  }
  5302  
  5303  // findNodeAdd takes a value and two maximal-height arrays then searches exactly as in a sequential skip-set.
  5304  // The returned preds and succs always satisfy preds[i] > value >= succs[i].
  5305  func (s *StringSet) findNodeAdd(value string, preds *[maxLevel]*stringNode, succs *[maxLevel]*stringNode) int {
  5306  	score := hash(value)
  5307  	x := s.header
  5308  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5309  		succ := x.atomicLoadNext(i)
  5310  		for succ != nil && succ.cmp(score, value) < 0 {
  5311  			x = succ
  5312  			succ = x.atomicLoadNext(i)
  5313  		}
  5314  		preds[i] = x
  5315  		succs[i] = succ
  5316  
  5317  		// Check if the value already in the skip list.
  5318  		if succ != nil && succ.cmp(score, value) == 0 {
  5319  			return i
  5320  		}
  5321  	}
  5322  	return -1
  5323  }
  5324  
  5325  func unlockString(preds [maxLevel]*stringNode, highestLevel int) {
  5326  	var prevPred *stringNode
  5327  	for i := highestLevel; i >= 0; i-- {
  5328  		if preds[i] != prevPred { // the node could be unlocked by previous loop
  5329  			preds[i].mu.Unlock()
  5330  			prevPred = preds[i]
  5331  		}
  5332  	}
  5333  }
  5334  
  5335  // Add add the value into skip set, return true if this process insert the value into skip set,
  5336  // return false if this process can't insert this value, because another process has insert the same value.
  5337  //
  5338  // If the value is in the skip set but not fully linked, this process will wait until it is.
  5339  func (s *StringSet) Add(value string) bool {
  5340  	level := s.randomlevel()
  5341  	var preds, succs [maxLevel]*stringNode
  5342  	for {
  5343  		lFound := s.findNodeAdd(value, &preds, &succs)
  5344  		if lFound != -1 { // indicating the value is already in the skip-list
  5345  			nodeFound := succs[lFound]
  5346  			if !nodeFound.flags.Get(marked) {
  5347  				for !nodeFound.flags.Get(fullyLinked) {
  5348  					// The node is not yet fully linked, just waits until it is.
  5349  				}
  5350  				return false
  5351  			}
  5352  			// If the node is marked, represents some other thread is in the process of deleting this node,
  5353  			// we need to add this node in next loop.
  5354  			continue
  5355  		}
  5356  		// Add this node into skip list.
  5357  		var (
  5358  			highestLocked        = -1 // the highest level being locked by this process
  5359  			valid                = true
  5360  			pred, succ, prevPred *stringNode
  5361  		)
  5362  		for layer := 0; valid && layer < level; layer++ {
  5363  			pred = preds[layer]   // target node's previous node
  5364  			succ = succs[layer]   // target node's next node
  5365  			if pred != prevPred { // the node in this layer could be locked by previous loop
  5366  				pred.mu.Lock()
  5367  				highestLocked = layer
  5368  				prevPred = pred
  5369  			}
  5370  			// valid check if there is another node has inserted into the skip list in this layer during this process.
  5371  			// It is valid if:
  5372  			// 1. The previous node and next node both are not marked.
  5373  			// 2. The previous node's next node is succ in this layer.
  5374  			valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
  5375  		}
  5376  		if !valid {
  5377  			unlockString(preds, highestLocked)
  5378  			continue
  5379  		}
  5380  
  5381  		nn := newStringNode(value, level)
  5382  		for layer := 0; layer < level; layer++ {
  5383  			nn.storeNext(layer, succs[layer])
  5384  			preds[layer].atomicStoreNext(layer, nn)
  5385  		}
  5386  		nn.flags.SetTrue(fullyLinked)
  5387  		unlockString(preds, highestLocked)
  5388  		atomic.AddInt64(&s.length, 1)
  5389  		return true
  5390  	}
  5391  }
  5392  
  5393  func (s *StringSet) randomlevel() int {
  5394  	// Generate random level.
  5395  	level := randomLevel()
  5396  	// Update highest level if possible.
  5397  	for {
  5398  		hl := atomic.LoadInt64(&s.highestLevel)
  5399  		if int64(level) <= hl {
  5400  			break
  5401  		}
  5402  		if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
  5403  			break
  5404  		}
  5405  	}
  5406  	return level
  5407  }
  5408  
  5409  // Contains check if the value is in the skip set.
  5410  func (s *StringSet) Contains(value string) bool {
  5411  	score := hash(value)
  5412  	x := s.header
  5413  	for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
  5414  		nex := x.atomicLoadNext(i)
  5415  		for nex != nil && nex.cmp(score, value) < 0 {
  5416  			x = nex
  5417  			nex = x.atomicLoadNext(i)
  5418  		}
  5419  
  5420  		// Check if the value already in the skip list.
  5421  		if nex != nil && nex.cmp(score, value) == 0 {
  5422  			return nex.flags.MGet(fullyLinked|marked, fullyLinked)
  5423  		}
  5424  	}
  5425  	return false
  5426  }
  5427  
  5428  // Remove a node from the skip set.
  5429  func (s *StringSet) Remove(value string) bool {
  5430  	var (
  5431  		nodeToRemove *stringNode
  5432  		isMarked     bool // represents if this operation mark the node
  5433  		topLayer     = -1
  5434  		preds, succs [maxLevel]*stringNode
  5435  	)
  5436  	for {
  5437  		lFound := s.findNodeRemove(value, &preds, &succs)
  5438  		if isMarked || // this process mark this node or we can find this node in the skip list
  5439  			lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
  5440  			if !isMarked { // we don't mark this node for now
  5441  				nodeToRemove = succs[lFound]
  5442  				topLayer = lFound
  5443  				nodeToRemove.mu.Lock()
  5444  				if nodeToRemove.flags.Get(marked) {
  5445  					// The node is marked by another process,
  5446  					// the physical deletion will be accomplished by another process.
  5447  					nodeToRemove.mu.Unlock()
  5448  					return false
  5449  				}
  5450  				nodeToRemove.flags.SetTrue(marked)
  5451  				isMarked = true
  5452  			}
  5453  			// Accomplish the physical deletion.
  5454  			var (
  5455  				highestLocked        = -1 // the highest level being locked by this process
  5456  				valid                = true
  5457  				pred, succ, prevPred *stringNode
  5458  			)
  5459  			for layer := 0; valid && (layer <= topLayer); layer++ {
  5460  				pred, succ = preds[layer], succs[layer]
  5461  				if pred != prevPred { // the node in this layer could be locked by previous loop
  5462  					pred.mu.Lock()
  5463  					highestLocked = layer
  5464  					prevPred = pred
  5465  				}
  5466  				// valid check if there is another node has inserted into the skip list in this layer
  5467  				// during this process, or the previous is removed by another process.
  5468  				// It is valid if:
  5469  				// 1. the previous node exists.
  5470  				// 2. no another node has inserted into the skip list in this layer.
  5471  				valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
  5472  			}
  5473  			if !valid {
  5474  				unlockString(preds, highestLocked)
  5475  				continue
  5476  			}
  5477  			for i := topLayer; i >= 0; i-- {
  5478  				// Now we own the `nodeToRemove`, no other goroutine will modify it.
  5479  				// So we don't need `nodeToRemove.loadNext`
  5480  				preds[i].atomicStoreNext(i, nodeToRemove.loadNext(i))
  5481  			}
  5482  			nodeToRemove.mu.Unlock()
  5483  			unlockString(preds, highestLocked)
  5484  			atomic.AddInt64(&s.length, -1)
  5485  			return true
  5486  		}
  5487  		return false
  5488  	}
  5489  }
  5490  
  5491  // Range calls f sequentially for each value present in the skip set.
  5492  // If f returns false, range stops the iteration.
  5493  func (s *StringSet) Range(f func(value string) bool) {
  5494  	x := s.header.atomicLoadNext(0)
  5495  	for x != nil {
  5496  		if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
  5497  			x = x.atomicLoadNext(0)
  5498  			continue
  5499  		}
  5500  		if !f(x.value) {
  5501  			break
  5502  		}
  5503  		x = x.atomicLoadNext(0)
  5504  	}
  5505  }
  5506  
  5507  // Len return the length of this skip set.
  5508  func (s *StringSet) Len() int {
  5509  	return int(atomic.LoadInt64(&s.length))
  5510  }
  5511  
  5512  // Return 1 if n is bigger, 0 if equal, else -1.
  5513  func (n *stringNode) cmp(score uint64, value string) int {
  5514  	if n.score > score {
  5515  		return 1
  5516  	} else if n.score == score {
  5517  		return cmpstring(n.value, value)
  5518  	}
  5519  	return -1
  5520  }