github.com/benz9527/xboot@v0.0.0-20240504061247-c23f15593274/lib/list/x_conc_skl.go (about)

     1  package list
     2  
     3  import (
     4  	"cmp"
     5  	"sync/atomic"
     6  	"unsafe"
     7  
     8  	"github.com/benz9527/xboot/lib/id"
     9  	"github.com/benz9527/xboot/lib/infra"
    10  )
    11  
    12  const (
    13  	// Indicating that the skip-list data node type, including unique, linkedList and rbtree.
    14  	xConcSklXNodeModeFlagBits = 0x0003
    15  	// Indicating that the skip-list data node mode is rbtree and do delete operation will borrow pred(0) or succ node(1).
    16  	xConcSklRbtreeRmBorrowFlagBit = 0x0004
    17  	// Indication that the skip-list key sort direction.
    18  	xConcSklKeyCmpFlagBit = 0x0008 /* 0: asc; 1: desc */
    19  )
    20  
    21  var _ XSkipList[uint8, uint8] = (*xConcSkl[uint8, uint8])(nil)
    22  
    23  type xConcSkl[K infra.OrderedKey, V any] struct {
    24  	head       *xConcSklNode[K, V]
    25  	vcmp       SklValComparator[V] // value comparator
    26  	rand       SklRand
    27  	optVer     id.UUIDGen // optimistic version generator
    28  	nodeLen    int64      // skip-list's node count.
    29  	indexCount uint64     // skip-list's index count.
    30  	flags      uint32
    31  	levels     int32 // skip-list's max height value inside the indexCount.
    32  }
    33  
    34  func (skl *xConcSkl[K, V]) loadXNodeMode() xNodeMode {
    35  	return xNodeMode(atomicLoadBits(&skl.flags, xConcSklXNodeModeFlagBits))
    36  }
    37  
    38  func (skl *xConcSkl[K, V]) atomicLoadHead() *xConcSklNode[K, V] {
    39  	return (*xConcSklNode[K, V])(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&skl.head))))
    40  }
    41  
    42  // traverse locates the target key and store the nodes encountered during the indexCount traversal.
    43  func (skl *xConcSkl[K, V]) traverse(
    44  	lvl int32,
    45  	key K,
    46  	isDesc bool,
    47  	aux []*xConcSklNode[K, V],
    48  ) *xConcSklNode[K, V] {
    49  	for /* vertical */ forward, l := skl.atomicLoadHead(), lvl-1; l >= 0; l-- {
    50  		nIdx := forward.atomicLoadNextNode(l)
    51  		for /* horizontal */ nIdx != nil {
    52  			res := cmp.Compare[K](key, nIdx.key)
    53  			if /* horizontal next */ (!isDesc && res > 0) || (isDesc && res < 0) {
    54  				forward = nIdx
    55  				nIdx = forward.atomicLoadNextNode(l)
    56  			} else if /* found */ res == 0 {
    57  				aux[l] = forward          /* pred */
    58  				aux[sklMaxLevel+l] = nIdx /* succ */
    59  				return nIdx
    60  			} else /* not found, vertical next */ {
    61  				break
    62  			}
    63  		}
    64  
    65  		aux[l] = forward          /* pred */
    66  		aux[sklMaxLevel+l] = nIdx /* succ */
    67  	}
    68  	return nil
    69  }
    70  
    71  // rmTraverse locates the remove target key and stores the nodes encountered
    72  // during the indices traversal.
    73  // Returns with the target key found level index.
    74  func (skl *xConcSkl[K, V]) rmTraverse(
    75  	key K,
    76  	isDesc bool,
    77  	aux []*xConcSklNode[K, V],
    78  ) (foundAt int32) {
    79  	// foundAt represents the index of the first layer at which it found a node.
    80  	foundAt = -1
    81  	forward := skl.atomicLoadHead()
    82  	for /* vertical */ l := skl.Levels() - 1; l >= 0; l-- {
    83  		nIdx := forward.atomicLoadNextNode(l)
    84  		for /* horizontal */ nIdx != nil {
    85  			res := cmp.Compare[K](key, nIdx.key)
    86  			if (!isDesc && res > 0) || (isDesc && res < 0) {
    87  				forward = nIdx
    88  				nIdx = forward.atomicLoadNextNode(l)
    89  			} else {
    90  				break
    91  			}
    92  		}
    93  
    94  		aux[l] = forward
    95  		aux[sklMaxLevel+l] = nIdx
    96  
    97  		if foundAt == -1 && nIdx != nil && key == nIdx.key {
    98  			foundAt = l
    99  		}
   100  		// Downward to next level.
   101  	}
   102  	return
   103  }
   104  
   105  // Classic Skip-List basic APIs
   106  
   107  // Len skip-list's node count.
   108  func (skl *xConcSkl[K, V]) Len() int64 {
   109  	return atomic.LoadInt64(&skl.nodeLen)
   110  }
   111  
   112  func (skl *xConcSkl[K, V]) IndexCount() uint64 {
   113  	return atomic.LoadUint64(&skl.indexCount)
   114  }
   115  
   116  // Levels skip-list's max height value inside the indexCount.
   117  func (skl *xConcSkl[K, V]) Levels() int32 {
   118  	return atomic.LoadInt32(&skl.levels)
   119  }
   120  
   121  // Insert add the val by a key into skip-list.
   122  // Only works for unique element skip-list.
   123  func (skl *xConcSkl[K, V]) Insert(key K, val V, ifNotPresent ...bool) error {
   124  	if skl.Len() >= sklMaxSize {
   125  		return infra.WrapErrorStack(ErrXSklIsFull)
   126  	}
   127  
   128  	var (
   129  		aux     = make([]*xConcSklNode[K, V], 2*sklMaxLevel)
   130  		oldLvls = skl.Levels()
   131  		newLvls = skl.rand(int(oldLvls), skl.Len()) // avoid loop call
   132  		ver     = skl.optVer.Number()
   133  		isDesc  = isSet(skl.flags, xConcSklKeyCmpFlagBit)
   134  	)
   135  
   136  	if len(ifNotPresent) <= 0 {
   137  		ifNotPresent = insertReplaceDisabled
   138  	}
   139  
   140  	for {
   141  		if node := skl.traverse(max(oldLvls, newLvls), key, isDesc, aux); node != nil {
   142  			if /* conc rm */ atomicIsSet(&node.flags, nodeRemovingFlagBit) {
   143  				continue
   144  			} else if /* conc d-check */ skl.Len() >= sklMaxSize {
   145  				return infra.WrapErrorStack(ErrXSklIsFull)
   146  			}
   147  
   148  			if isAppend, err := node.storeVal(ver, val, skl.vcmp, ifNotPresent...); err != nil {
   149  				return infra.WrapErrorStack(err)
   150  			} else if isAppend {
   151  				atomic.AddInt64(&skl.nodeLen, 1)
   152  			}
   153  			return nil
   154  		}
   155  		// Node not present. Add this node into skip list.
   156  		var (
   157  			pred, succ, prev *xConcSklNode[K, V]
   158  			isValid          = true
   159  			lockedLevels     = int32(-1)
   160  		)
   161  		for l := int32(0); isValid && l < newLvls; l++ {
   162  			pred, succ = aux[l], aux[sklMaxLevel+l]
   163  			if /* lock */ pred != prev {
   164  				pred.lock(ver)
   165  				lockedLevels = l
   166  				prev = pred
   167  			}
   168  			// Check indexCount and data node:
   169  			//      +------+       +------+      +------+
   170  			// ...  | pred |------>|  new |----->| succ | ...
   171  			//      +------+       +------+      +------+
   172  			// 1. Both the pred and succ isn't removing.
   173  			// 2. The pred's next node is the succ in this level.
   174  			isValid = !atomicIsSet(&pred.flags, nodeRemovingFlagBit) &&
   175  				(succ == nil || !atomicIsSet(&succ.flags, nodeRemovingFlagBit)) &&
   176  				pred.atomicLoadNextNode(l) == succ
   177  		}
   178  		if /* conc insert */ !isValid {
   179  			unlockNodes(ver, lockedLevels, aux[0:sklMaxLevel]...)
   180  			continue
   181  		} else if /* conc d-check */ skl.Len() >= sklMaxSize {
   182  			unlockNodes(ver, lockedLevels, aux[0:sklMaxLevel]...)
   183  			return infra.WrapErrorStack(ErrXSklIsFull)
   184  		}
   185  
   186  		node := newXConcSklNode(key, val, newLvls, skl.loadXNodeMode(), skl.vcmp)
   187  		for /* linking */ l := int32(0); l < newLvls; l++ {
   188  			//      +------+       +------+      +------+
   189  			// ...  | pred |------>|  new |----->| succ | ...
   190  			//      +------+       +------+      +------+
   191  			node.storeNextNode(l, aux[sklMaxLevel+l]) // Useless to use atomic here.
   192  			aux[l].atomicStoreNextNode(l, node)       // Memory barrier, concurrency safety.
   193  		}
   194  		atomicSet(&node.flags, nodeInsertedFlagBit)
   195  		if oldLvls = skl.Levels(); oldLvls < newLvls {
   196  			atomic.StoreInt32(&skl.levels, newLvls)
   197  		}
   198  
   199  		unlockNodes(ver, lockedLevels, aux[0:sklMaxLevel]...)
   200  		atomic.AddInt64(&skl.nodeLen, 1)
   201  		atomic.AddUint64(&skl.indexCount, uint64(newLvls))
   202  		return nil
   203  	}
   204  }
   205  
   206  // Foreach iterates each node (xNode within the node) by pass in function.
   207  // Once the function return false, the iteration should be stopped.
   208  // This function doesn't guarantee correctness in the case of concurrent
   209  // reads and writes.
   210  func (skl *xConcSkl[K, V]) Foreach(action func(i int64, item SklIterationItem[K, V]) bool) {
   211  	i := int64(0)
   212  	item := &xSklIter[K, V]{}
   213  	switch forward, mode := skl.atomicLoadHead().atomicLoadNextNode(0), skl.loadXNodeMode(); mode {
   214  	case unique:
   215  		for forward != nil {
   216  			if !atomicAreEqual(&forward.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   217  				forward = forward.atomicLoadNextNode(0)
   218  				continue
   219  			}
   220  			item.nodeLevelFn = func() uint32 {
   221  				return atomic.LoadUint32(&forward.level)
   222  			}
   223  			item.nodeItemCountFn = func() int64 {
   224  				return atomic.LoadInt64(&forward.count)
   225  			}
   226  			item.keyFn = func() K {
   227  				return forward.key
   228  			}
   229  			item.valFn = func() V {
   230  				node := forward.atomicLoadRoot()
   231  				if node == nil {
   232  					return *new(V)
   233  				}
   234  				return *node.vptr
   235  			}
   236  			if res := action(i, item); !res {
   237  				break
   238  			}
   239  			forward = forward.atomicLoadNextNode(0)
   240  			i++
   241  		}
   242  	case linkedList:
   243  		for forward != nil {
   244  			if !atomicAreEqual(&forward.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   245  				forward = forward.atomicLoadNextNode(0)
   246  				continue
   247  			}
   248  			item.nodeLevelFn = func() uint32 {
   249  				return atomic.LoadUint32(&forward.level)
   250  			}
   251  			item.nodeItemCountFn = func() int64 {
   252  				return atomic.LoadInt64(&forward.count)
   253  			}
   254  			item.keyFn = func() K {
   255  				return forward.key
   256  			}
   257  			for x := forward.atomicLoadRoot().parent; x != nil; x = x.parent {
   258  				item.valFn = func() V {
   259  					return *x.vptr
   260  				}
   261  				var res bool
   262  				if res, i = action(i, item), i+1; !res {
   263  					break
   264  				}
   265  			}
   266  			forward = forward.atomicLoadNextNode(0)
   267  		}
   268  	case rbtree:
   269  		for forward != nil {
   270  			if !atomicAreEqual(&forward.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   271  				forward = forward.atomicLoadNextNode(0)
   272  				continue
   273  			}
   274  			item.nodeLevelFn = func() uint32 {
   275  				return atomic.LoadUint32(&forward.level)
   276  			}
   277  			item.nodeItemCountFn = func() int64 {
   278  				return atomic.LoadInt64(&forward.count)
   279  			}
   280  			item.keyFn = func() K {
   281  				return forward.key
   282  			}
   283  			forward.rbDFS(func(idx int64, color color, val V) bool {
   284  				item.valFn = func() V {
   285  					return val
   286  				}
   287  				var res bool
   288  				if res, i = action(i, item), i+1; !res {
   289  					return false
   290  				}
   291  				return true
   292  			})
   293  			forward = forward.atomicLoadNextNode(0)
   294  		}
   295  	default:
   296  		panic("[x-conc-skl] unknown node type")
   297  	}
   298  }
   299  
   300  // LoadFirst returns the first value stored in the skip-list for a key,
   301  // or nil if no val is present.
   302  func (skl *xConcSkl[K, V]) LoadFirst(key K) (element SklElement[K, V], err error) {
   303  	if skl.Len() <= 0 {
   304  		return nil, infra.WrapErrorStack(ErrXSklIsEmpty)
   305  	}
   306  	isDesc := isSet(skl.flags, xConcSklKeyCmpFlagBit)
   307  
   308  	forward := skl.atomicLoadHead()
   309  	mode := skl.loadXNodeMode()
   310  	for /* vertical */ l := skl.Levels() - 1; l >= 0; l-- {
   311  		nIdx := forward.atomicLoadNextNode(l)
   312  		for /* horizontal */ nIdx != nil && ((!isDesc && key > nIdx.key) || (isDesc && key < nIdx.key)) {
   313  			forward = nIdx
   314  			nIdx = forward.atomicLoadNextNode(l)
   315  		}
   316  
   317  		if /* found */ nIdx != nil && key == nIdx.key {
   318  			if atomicAreEqual(&nIdx.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   319  				if /* conc rw empty */ atomic.LoadInt64(&nIdx.count) <= 0 {
   320  					return nil, infra.WrapErrorStack(ErrXSklConcRWLoadEmpty)
   321  				}
   322  				switch mode {
   323  				case unique:
   324  					if x := nIdx.atomicLoadRoot(); x == nil {
   325  						return nil, ErrXSklConcRWLoadEmpty
   326  					} else {
   327  						return &xSklElement[K, V]{
   328  							key: key,
   329  							val: *x.vptr,
   330  						}, nil
   331  					}
   332  				case linkedList:
   333  					if x := nIdx.atomicLoadRoot(); x == nil {
   334  						return nil, ErrXSklConcRWLoadEmpty
   335  					} else {
   336  						return &xSklElement[K, V]{
   337  							key: key,
   338  							val: *x.parent.vptr,
   339  						}, nil
   340  					}
   341  				case rbtree:
   342  					if x := nIdx.root.minimum(); x == nil {
   343  						return nil, infra.WrapErrorStack(ErrXSklConcRWLoadEmpty)
   344  					} else {
   345  						return &xSklElement[K, V]{
   346  							key: key,
   347  							val: *x.vptr,
   348  						}, nil
   349  					}
   350  				default:
   351  					panic("[x-conc-skl] unknown x-node type")
   352  				}
   353  			}
   354  			return nil, infra.WrapErrorStack(ErrXSklConcRWLoadFailed)
   355  		}
   356  	}
   357  	return nil, infra.WrapErrorStack(ErrXSklNotFound)
   358  }
   359  
   360  // RemoveFirst deletes the val for a key, only the first value.
   361  func (skl *xConcSkl[K, V]) RemoveFirst(key K) (element SklElement[K, V], err error) {
   362  	if skl.Len() <= 0 {
   363  		return nil, infra.WrapErrorStack(ErrXSklIsEmpty)
   364  	}
   365  
   366  	var (
   367  		aux      = make([]*xConcSklNode[K, V], 2*sklMaxLevel)
   368  		rmNode   *xConcSklNode[K, V]
   369  		isMarked bool // represents if this operation mark the node
   370  		topLevel = int32(-1)
   371  		ver      = skl.optVer.Number()
   372  		foundAt  = int32(-1)
   373  		isDesc   = isSet(skl.flags, xConcSklKeyCmpFlagBit)
   374  	)
   375  
   376  	switch mode := skl.loadXNodeMode(); mode {
   377  	// FIXME: Merge these 2 deletion loops logic
   378  	case unique:
   379  		for {
   380  			foundAt = skl.rmTraverse(key, isDesc, aux)
   381  			if isMarked || foundAt != -1 &&
   382  				atomicAreEqual(&aux[sklMaxLevel+foundAt].flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) &&
   383  				(int32(aux[sklMaxLevel+foundAt].level)-1) == foundAt {
   384  				if !isMarked {
   385  					rmNode = aux[sklMaxLevel+foundAt]
   386  					topLevel = foundAt
   387  					if !rmNode.tryLock(ver) {
   388  						if /* d-check */ atomicIsSet(&rmNode.flags, nodeRemovingFlagBit) {
   389  							return nil, infra.WrapErrorStack(ErrXSklConcRemoveTryLock)
   390  						}
   391  						isMarked = false
   392  						continue
   393  					}
   394  
   395  					if /* node locked, d-check */ atomicIsSet(&rmNode.flags, nodeRemovingFlagBit) {
   396  						rmNode.unlock(ver)
   397  						return nil, infra.WrapErrorStack(ErrXSklConcRemoving)
   398  					}
   399  
   400  					atomicSet(&rmNode.flags, nodeRemovingFlagBit)
   401  					isMarked = true
   402  				}
   403  
   404  				var (
   405  					lockedLayers         = int32(-1)
   406  					isValid              = true
   407  					pred, succ, prevPred *xConcSklNode[K, V]
   408  				)
   409  				for /* node locked */ l := int32(0); isValid && (l <= topLevel); l++ {
   410  					pred, succ = aux[l], aux[sklMaxLevel+l]
   411  					if /* lock indexCount */ pred != prevPred {
   412  						pred.lock(ver)
   413  						lockedLayers = l
   414  						prevPred = pred
   415  					}
   416  					// Check:
   417  					// 1. the previous node exists.
   418  					// 2. no other nodes are inserted into the skip list in this layer.
   419  					isValid = !atomicIsSet(&pred.flags, nodeRemovingFlagBit) && pred.atomicLoadNextNode(l) == succ
   420  				}
   421  				if /* conc rm */ !isValid {
   422  					unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   423  					continue
   424  				}
   425  
   426  				element = &xSklElement[K, V]{
   427  					key: key,
   428  					val: *rmNode.atomicLoadRoot().vptr,
   429  				}
   430  				atomic.AddInt64(&rmNode.count, -1)
   431  				atomic.AddInt64(&skl.nodeLen, -1)
   432  
   433  				if atomic.LoadInt64(&rmNode.count) <= 0 {
   434  					for /* re-linking, reduce levels */ l := topLevel; l >= 0; l-- {
   435  						aux[l].atomicStoreNextNode(l, rmNode.loadNextNode(l))
   436  					}
   437  					atomic.AddUint64(&skl.indexCount, ^uint64(rmNode.level-1))
   438  				}
   439  
   440  				rmNode.unlock(ver)
   441  				unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   442  				return element, nil
   443  			}
   444  			break
   445  		}
   446  	case linkedList, rbtree:
   447  		for {
   448  			foundAt = skl.rmTraverse(key, isDesc, aux)
   449  			if isMarked || foundAt != -1 {
   450  				fullyLinkedButNotRemove := atomicAreEqual(&aux[sklMaxLevel+foundAt].flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked)
   451  				succMatch := (int32(aux[sklMaxLevel+foundAt].level) - 1) == foundAt
   452  				if !succMatch {
   453  					break
   454  				} else if !fullyLinkedButNotRemove {
   455  					continue
   456  				}
   457  
   458  				if fullyLinkedButNotRemove && !isMarked {
   459  					rmNode = aux[sklMaxLevel+foundAt]
   460  					topLevel = foundAt
   461  					if !rmNode.tryLock(ver) {
   462  						continue
   463  					}
   464  
   465  					if /* node locked */ !atomicIsSet(&rmNode.flags, nodeRemovingFlagBit) {
   466  						atomicSet(&rmNode.flags, nodeRemovingFlagBit)
   467  					}
   468  					isMarked = true
   469  				}
   470  
   471  				var (
   472  					lockedLayers     = int32(-1)
   473  					isValid          = true
   474  					pred, succ, prev *xConcSklNode[K, V]
   475  				)
   476  				for /* node locked */ l := int32(0); isValid && (l <= topLevel); l++ {
   477  					pred, succ = aux[l], aux[sklMaxLevel+l]
   478  					if /* lock indexCount */ pred != prev {
   479  						pred.lock(ver)
   480  						lockedLayers = l
   481  						prev = pred
   482  					}
   483  					// Check:
   484  					// 1. the previous node exists.
   485  					// 2. no other nodes are inserted into the skip list in this layer.
   486  					isValid = !atomicIsSet(&pred.flags, nodeRemovingFlagBit) && pred.atomicLoadNextNode(l) == succ
   487  				}
   488  				if /* conc rm */ !isValid {
   489  					unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   490  					continue
   491  				}
   492  
   493  				switch mode {
   494  				case linkedList:
   495  					if /* locked */ x := rmNode.root.linkedListNext(); x != nil {
   496  						element = &xSklElement[K, V]{
   497  							key: key,
   498  							val: *x.vptr,
   499  						}
   500  						atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&rmNode.root.parent)), unsafe.Pointer(x.parent))
   501  						atomic.AddInt64(&rmNode.count, -1)
   502  						atomic.AddInt64(&skl.nodeLen, -1)
   503  						atomicUnset(&rmNode.flags, nodeRemovingFlagBit)
   504  					} else {
   505  						atomic.StoreInt64(&rmNode.count, 0)
   506  					}
   507  				case rbtree:
   508  					if /* locked */ x, _err := rmNode.rbRemoveMin(); _err == nil && x != nil {
   509  						element = &xSklElement[K, V]{
   510  							key: key,
   511  							val: *x.vptr,
   512  						}
   513  						atomic.AddInt64(&skl.nodeLen, -1)
   514  					}
   515  					atomicUnset(&rmNode.flags, nodeRemovingFlagBit)
   516  				}
   517  
   518  				if atomic.LoadInt64(&rmNode.count) <= 0 {
   519  					for /* re-linking, reduce levels */ l := topLevel; l >= 0; l-- {
   520  						aux[l].atomicStoreNextNode(l, rmNode.loadNextNode(l))
   521  					}
   522  					atomic.AddUint64(&skl.indexCount, ^uint64(rmNode.level-1))
   523  				}
   524  
   525  				rmNode.unlock(ver)
   526  				unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   527  				return element, nil
   528  			}
   529  			break
   530  		}
   531  	default: /* impossible */
   532  		panic("[x-conc-skl] unknown x-node type")
   533  	}
   534  
   535  	if foundAt == -1 {
   536  		return nil, infra.WrapErrorStack(ErrXSklNotFound)
   537  	}
   538  	return nil, infra.WrapErrorStack(ErrXSklUnknownReason)
   539  }
   540  
   541  func (skl *xConcSkl[K, V]) PeekHead() (element SklElement[K, V]) {
   542  	switch forward, mode := skl.atomicLoadHead().atomicLoadNextNode(0), skl.loadXNodeMode(); mode {
   543  	case unique:
   544  		for {
   545  			if !atomicAreEqual(&forward.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   546  				forward = forward.atomicLoadNextNode(0)
   547  				continue
   548  			}
   549  			node := forward.atomicLoadRoot()
   550  			if node == nil {
   551  				return nil
   552  			}
   553  			element = &xSklElement[K, V]{
   554  				key: forward.key,
   555  				val: *node.vptr,
   556  			}
   557  			break
   558  		}
   559  	case linkedList:
   560  		for {
   561  			if !atomicAreEqual(&forward.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   562  				forward = forward.atomicLoadNextNode(0)
   563  				continue
   564  			}
   565  			x := forward.atomicLoadRoot().parent
   566  			if x == nil {
   567  				return nil
   568  			}
   569  			element = &xSklElement[K, V]{
   570  				key: forward.key,
   571  				val: *x.vptr,
   572  			}
   573  			break
   574  		}
   575  	case rbtree:
   576  		for {
   577  			if !atomicAreEqual(&forward.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   578  				forward = forward.atomicLoadNextNode(0)
   579  				continue
   580  			}
   581  			x := forward.root.minimum()
   582  			if x == nil {
   583  				return nil
   584  			}
   585  			element = &xSklElement[K, V]{
   586  				key: forward.key,
   587  				val: *x.vptr,
   588  			}
   589  			break
   590  		}
   591  	default:
   592  		panic("[x-conc-skl] unknown node type")
   593  	}
   594  	return element
   595  }
   596  
   597  func (skl *xConcSkl[K, V]) PopHead() (element SklElement[K, V], err error) {
   598  	forward := skl.atomicLoadHead().atomicLoadNextNode(0)
   599  	if forward == nil {
   600  		return nil, infra.WrapErrorStack(ErrXSklIsEmpty)
   601  	}
   602  	return skl.RemoveFirst(forward.key)
   603  }
   604  
   605  // Duplicated element Skip-List basic APIs
   606  
   607  func (skl *xConcSkl[K, V]) LoadIfMatch(key K, matcher func(that V) bool) ([]SklElement[K, V], error) {
   608  	if skl.Len() <= 0 {
   609  		return nil, infra.WrapErrorStack(ErrXSklIsEmpty)
   610  	}
   611  
   612  	var (
   613  		forward  = skl.atomicLoadHead()
   614  		mode     = skl.loadXNodeMode()
   615  		elements = make([]SklElement[K, V], 0, 32)
   616  		isDesc   = isSet(skl.flags, xConcSklKeyCmpFlagBit)
   617  	)
   618  	for /* vertical */ l := skl.Levels() - 1; l >= 0; l-- {
   619  		nIdx := forward.atomicLoadNextNode(l)
   620  		for /* horizontal */ nIdx != nil && ((!isDesc && key > nIdx.key) || (isDesc && key < nIdx.key)) {
   621  			forward = nIdx
   622  			nIdx = forward.atomicLoadNextNode(l)
   623  		}
   624  
   625  		if /* found */ nIdx != nil && key == nIdx.key {
   626  			if atomicAreEqual(&nIdx.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   627  				if /* conc rw */ atomic.LoadInt64(&nIdx.count) <= 0 {
   628  					return nil, infra.WrapErrorStack(ErrXSklConcRWLoadEmpty)
   629  				}
   630  				switch mode {
   631  				case unique:
   632  					panic("[x-conc-skl] unique mode skip-list not implements the load if match method")
   633  				case linkedList:
   634  					for x := nIdx.atomicLoadRoot().parent; x != nil; x = x.linkedListNext() {
   635  						v := *x.vptr
   636  						if matcher(v) {
   637  							elements = append(elements, &xSklElement[K, V]{
   638  								key: key,
   639  								val: v,
   640  							})
   641  						}
   642  					}
   643  					return elements, nil
   644  				case rbtree:
   645  					nIdx.rbDFS(func(idx int64, color color, v V) bool {
   646  						if matcher(v) {
   647  							elements = append(elements, &xSklElement[K, V]{
   648  								key: key,
   649  								val: v,
   650  							})
   651  						}
   652  						return true
   653  					})
   654  					return elements, nil
   655  				default:
   656  					panic("[x-conc-skl] unknown x-node type")
   657  				}
   658  			}
   659  			return nil, infra.WrapErrorStack(ErrXSklConcRWLoadFailed)
   660  		}
   661  	}
   662  	return nil, infra.WrapErrorStack(ErrXSklNotFound)
   663  }
   664  
   665  func (skl *xConcSkl[K, V]) LoadAll(key K) ([]SklElement[K, V], error) {
   666  	if skl.Len() <= 0 {
   667  		return nil, infra.WrapErrorStack(ErrXSklIsEmpty)
   668  	}
   669  
   670  	var (
   671  		forward  = skl.atomicLoadHead()
   672  		mode     = skl.loadXNodeMode()
   673  		elements = make([]SklElement[K, V], 0, 32)
   674  		isDesc   = isSet(skl.flags, xConcSklKeyCmpFlagBit)
   675  	)
   676  	for /* vertical */ l := skl.Levels() - 1; l >= 0; l-- {
   677  		nIdx := forward.atomicLoadNextNode(l)
   678  		for /* horizontal */ nIdx != nil && ((!isDesc && key > nIdx.key) || (isDesc && key < nIdx.key)) {
   679  			forward = nIdx
   680  			nIdx = forward.atomicLoadNextNode(l)
   681  		}
   682  
   683  		if /* found */ nIdx != nil && key == nIdx.key {
   684  			if atomicAreEqual(&nIdx.flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked) {
   685  				if /* conc rw */ atomic.LoadInt64(&nIdx.count) <= 0 {
   686  					return nil, ErrXSklConcRWLoadEmpty
   687  				}
   688  				switch mode {
   689  				case unique:
   690  					panic("[x-conc-skl] unique mode skip-list not implements the load all method")
   691  				case linkedList:
   692  					for x := nIdx.atomicLoadRoot().parent; x != nil; x = x.linkedListNext() {
   693  						elements = append(elements, &xSklElement[K, V]{
   694  							key: key,
   695  							val: *x.vptr,
   696  						})
   697  					}
   698  					return elements, nil
   699  				case rbtree:
   700  					nIdx.rbDFS(func(idx int64, color color, v V) bool {
   701  						elements = append(elements, &xSklElement[K, V]{
   702  							key: key,
   703  							val: v,
   704  						})
   705  						return true
   706  					})
   707  					return elements, nil
   708  				default:
   709  					panic("[x-conc-skl] unknown x-node type")
   710  				}
   711  			}
   712  			return nil, infra.WrapErrorStack(ErrXSklConcRWLoadFailed)
   713  		}
   714  	}
   715  	return nil, infra.WrapErrorStack(ErrXSklNotFound)
   716  }
   717  
   718  func (skl *xConcSkl[K, V]) RemoveIfMatch(key K, matcher func(that V) bool) ([]SklElement[K, V], error) {
   719  	if skl.Len() <= 0 {
   720  		return nil, infra.WrapErrorStack(ErrXSklIsEmpty)
   721  	}
   722  
   723  	var (
   724  		aux      = make([]*xConcSklNode[K, V], 2*sklMaxLevel)
   725  		rmNode   *xConcSklNode[K, V]
   726  		isMarked bool // represents if this operation mark the node
   727  		topLevel = int32(-1)
   728  		ver      = skl.optVer.Number()
   729  		foundAt  = int32(-1)
   730  		elements = make([]SklElement[K, V], 0, 32)
   731  		isDesc   = isSet(skl.flags, xConcSklKeyCmpFlagBit)
   732  	)
   733  
   734  	switch mode := skl.loadXNodeMode(); mode {
   735  	// FIXME: Merge these 2 deletion loops logic
   736  	case unique:
   737  		panic("[x-conc-skl] unique mode skip-list not implements the remove if match method")
   738  	case linkedList, rbtree:
   739  		for {
   740  			foundAt = skl.rmTraverse(key, isDesc, aux)
   741  			if isMarked || foundAt != -1 {
   742  				fullyLinkedButNotRemove := atomicAreEqual(&aux[sklMaxLevel+foundAt].flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked)
   743  				succMatch := (int32(aux[sklMaxLevel+foundAt].level) - 1) == foundAt
   744  				if !succMatch {
   745  					break
   746  				} else if !fullyLinkedButNotRemove {
   747  					continue
   748  				}
   749  
   750  				if fullyLinkedButNotRemove && !isMarked {
   751  					rmNode = aux[sklMaxLevel+foundAt]
   752  					topLevel = foundAt
   753  					if !rmNode.tryLock(ver) {
   754  						continue
   755  					}
   756  
   757  					if /* node locked */ !atomicIsSet(&rmNode.flags, nodeRemovingFlagBit) {
   758  						atomicSet(&rmNode.flags, nodeRemovingFlagBit)
   759  					}
   760  					isMarked = true
   761  				}
   762  
   763  				var (
   764  					lockedLayers     = int32(-1)
   765  					isValid          = true
   766  					pred, succ, prev *xConcSklNode[K, V]
   767  				)
   768  				for /* node locked */ l := int32(0); isValid && (l <= topLevel); l++ {
   769  					pred, succ = aux[l], aux[sklMaxLevel+l]
   770  					if /* lock indexCount */ pred != prev {
   771  						pred.lock(ver)
   772  						lockedLayers = l
   773  						prev = pred
   774  					}
   775  					// Check:
   776  					// 1. the previous node exists.
   777  					// 2. no other nodes are inserted into the skip list in this layer.
   778  					isValid = !atomicIsSet(&pred.flags, nodeRemovingFlagBit) && pred.atomicLoadNextNode(l) == succ
   779  				}
   780  				if /* conc rm */ !isValid {
   781  					unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   782  					continue
   783  				}
   784  
   785  				switch mode {
   786  				case linkedList:
   787  					if x := rmNode.root.linkedListNext(); x == nil {
   788  						atomic.AddInt64(&rmNode.count, 0)
   789  					} else {
   790  						first, prev := x, x
   791  						for ; /* locked */ x != nil; x = x.linkedListNext() {
   792  							if matcher(*x.vptr) {
   793  								if x == first {
   794  									first = x.linkedListNext()
   795  									atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&rmNode.root.parent)), unsafe.Pointer(first))
   796  								} else {
   797  									prev.parent = x.linkedListNext()
   798  								}
   799  								elements = append(elements, &xSklElement[K, V]{
   800  									key: key,
   801  									val: *x.vptr,
   802  								})
   803  								atomic.AddInt64(&rmNode.count, -1)
   804  								atomic.AddInt64(&skl.nodeLen, -1)
   805  							} else {
   806  								prev = x
   807  							}
   808  						}
   809  						atomicUnset(&rmNode.flags, nodeRemovingFlagBit)
   810  					}
   811  				case rbtree:
   812  					// TODO fix bad efficiency
   813  					rmNode.rbDFS( /* locked */ func(idx int64, color color, v V) bool {
   814  						if matcher(v) {
   815  							elements = append(elements, &xSklElement[K, V]{
   816  								key: key,
   817  								val: v,
   818  							})
   819  						}
   820  						return true
   821  					})
   822  					for _, e := range elements {
   823  						if _, err := rmNode.rbRemove(e.Val(), skl.vcmp); err == nil {
   824  							atomic.AddInt64(&rmNode.count, -1)
   825  							atomic.AddInt64(&skl.nodeLen, -1)
   826  						}
   827  					}
   828  					atomicUnset(&rmNode.flags, nodeRemovingFlagBit)
   829  				}
   830  
   831  				if atomic.LoadInt64(&rmNode.count) <= 0 {
   832  					for /* re-linking, reduce levels */ l := topLevel; l >= 0; l-- {
   833  						aux[l].atomicStoreNextNode(l, rmNode.loadNextNode(l))
   834  					}
   835  					atomic.AddUint64(&skl.indexCount, ^uint64(rmNode.level-1))
   836  				}
   837  
   838  				rmNode.unlock(ver)
   839  				unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   840  				return elements, nil
   841  			}
   842  			break
   843  		}
   844  	default:
   845  		panic("[x-conc-skl] unknown x-node type")
   846  	}
   847  
   848  	if foundAt == -1 {
   849  		return nil, infra.WrapErrorStack(ErrXSklNotFound)
   850  	}
   851  	return nil, infra.WrapErrorStack(ErrXSklUnknownReason)
   852  }
   853  
   854  func (skl *xConcSkl[K, V]) RemoveAll(key K) ([]SklElement[K, V], error) {
   855  	if skl.Len() <= 0 {
   856  		return nil, infra.WrapErrorStack(ErrXSklIsEmpty)
   857  	}
   858  
   859  	var (
   860  		aux      = make([]*xConcSklNode[K, V], 2*sklMaxLevel)
   861  		rmNode   *xConcSklNode[K, V]
   862  		isMarked bool // represents if this operation mark the node
   863  		topLevel = int32(-1)
   864  		ver      = skl.optVer.Number()
   865  		foundAt  = int32(-1)
   866  		elements = make([]SklElement[K, V], 0, 32)
   867  		isDesc   = isSet(skl.flags, xConcSklKeyCmpFlagBit)
   868  	)
   869  
   870  	switch mode := skl.loadXNodeMode(); mode {
   871  	// FIXME: Merge these 2 deletion loops logic
   872  	case unique:
   873  		panic("[x-conc-skl] unique mode skip-list not implements the remove all method")
   874  	case linkedList, rbtree:
   875  		for {
   876  			foundAt = skl.rmTraverse(key, isDesc, aux)
   877  			if isMarked || foundAt != -1 {
   878  				fullyLinkedButNotRemove := atomicAreEqual(&aux[sklMaxLevel+foundAt].flags, nodeInsertedFlagBit|nodeRemovingFlagBit, insertFullyLinked)
   879  				succMatch := (int32(aux[sklMaxLevel+foundAt].level) - 1) == foundAt
   880  				if !succMatch {
   881  					break
   882  				} else if !fullyLinkedButNotRemove {
   883  					continue
   884  				}
   885  
   886  				if fullyLinkedButNotRemove && !isMarked {
   887  					rmNode = aux[sklMaxLevel+foundAt]
   888  					topLevel = foundAt
   889  					if !rmNode.tryLock(ver) {
   890  						continue
   891  					}
   892  
   893  					if /* node locked */ !atomicIsSet(&rmNode.flags, nodeRemovingFlagBit) {
   894  						atomicSet(&rmNode.flags, nodeRemovingFlagBit)
   895  					}
   896  					isMarked = true
   897  				}
   898  
   899  				var (
   900  					lockedLayers     = int32(-1)
   901  					isValid          = true
   902  					pred, succ, prev *xConcSklNode[K, V]
   903  				)
   904  				for /* node locked */ l := int32(0); isValid && (l <= topLevel); l++ {
   905  					pred, succ = aux[l], aux[sklMaxLevel+l]
   906  					if /* lock indexCount */ pred != prev {
   907  						pred.lock(ver)
   908  						lockedLayers = l
   909  						prev = pred
   910  					}
   911  					// Check:
   912  					// 1. the previous node exists.
   913  					// 2. no other nodes are inserted into the skip list in this layer.
   914  					isValid = !atomicIsSet(&pred.flags, nodeRemovingFlagBit) && pred.atomicLoadNextNode(l) == succ
   915  				}
   916  				if /* conc rm */ !isValid {
   917  					unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   918  					continue
   919  				}
   920  
   921  				switch mode {
   922  				case linkedList:
   923  					if x := rmNode.root.linkedListNext(); x == nil {
   924  						atomic.AddInt64(&rmNode.count, 0)
   925  					} else {
   926  						atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&rmNode.root.parent)), unsafe.Pointer(nil))
   927  						for /* locked */ x != nil {
   928  							elements = append(elements, &xSklElement[K, V]{
   929  								key: key,
   930  								val: *x.vptr,
   931  							})
   932  							prev := x
   933  							x = x.linkedListNext()
   934  							prev.parent = nil
   935  						}
   936  						atomic.StoreInt64(&rmNode.count, 0)
   937  						atomic.AddInt64(&skl.nodeLen, -atomic.LoadInt64(&rmNode.count))
   938  					}
   939  				case rbtree:
   940  					rmNode.rbDFS( /* locked */ func(idx int64, color color, v V) bool {
   941  						elements = append(elements, &xSklElement[K, V]{
   942  							key: key,
   943  							val: v,
   944  						})
   945  						return true
   946  					})
   947  					rmNode.rbRelease()
   948  					atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&rmNode.root)), unsafe.Pointer(nil))
   949  					atomic.StoreInt64(&rmNode.count, 0)
   950  					atomic.AddInt64(&skl.nodeLen, -atomic.LoadInt64(&rmNode.count))
   951  				}
   952  
   953  				if atomic.LoadInt64(&rmNode.count) <= 0 {
   954  					for /* re-linking, reduce levels */ l := topLevel; l >= 0; l-- {
   955  						aux[l].atomicStoreNextNode(l, rmNode.loadNextNode(l))
   956  					}
   957  					atomic.AddUint64(&skl.indexCount, ^uint64(rmNode.level-1))
   958  				}
   959  
   960  				rmNode.unlock(ver)
   961  				unlockNodes(ver, lockedLayers, aux[0:sklMaxLevel]...)
   962  				return elements, nil
   963  			}
   964  			break
   965  		}
   966  	default:
   967  		panic("[x-conc-skl] unknown x-node type")
   968  	}
   969  
   970  	if foundAt == -1 {
   971  		return nil, infra.WrapErrorStack(ErrXSklNotFound)
   972  	}
   973  	return nil, infra.WrapErrorStack(ErrXSklUnknownReason)
   974  }