github.com/benz9527/xboot@v0.0.0-20240504061247-c23f15593274/lib/list/x_arena_skl_node.go (about) 1 package list 2 3 import ( 4 "runtime" 5 "sync/atomic" 6 "unsafe" 7 8 "github.com/benz9527/xboot/lib/infra" 9 ) 10 11 var _ SklElement[uint8, uint8] = (*xArenaSklElement[uint8, uint8])(nil) 12 13 // xConcSklElement is used to keepalive of the Go memory objects' lifecycle. 14 type xArenaSklElement[K infra.OrderedKey, V any] struct { 15 indices []*xArenaSklNode[K, V] 16 nodeRef *xArenaSklNode[K, V] 17 prev *xArenaSklElement[K, V] // double-linked-list 18 next *xArenaSklElement[K, V] 19 key K 20 val atomic.Value 21 } 22 23 func (e *xArenaSklElement[K, V]) Key() K { 24 return e.key 25 } 26 27 func (e *xArenaSklElement[K, V]) Val() V { 28 return e.val.Load().(V) 29 } 30 31 func newXConcSklHeadElement[K infra.OrderedKey, V any]() *xArenaSklElement[K, V] { 32 node := &xArenaSklNode[K, V]{ 33 level: sklMaxLevel, 34 } 35 node.flags = set(node.flags, nodeIsHeadFlagBit|nodeInsertedFlagBit) 36 node.flags = setBitsAs(node.flags, xNodeModeFlagBits, uint32(unique)) 37 head := &xArenaSklElement[K, V]{ 38 indices: make([]*xArenaSklNode[K, V], sklMaxLevel), 39 } 40 head.nodeRef = node 41 node.elementRef = head 42 return head 43 } 44 45 func newXArenaSklDataElement[K infra.OrderedKey, V any]( 46 key K, 47 val V, 48 lvl uint32, 49 arena *autoGrowthArena[xArenaSklNode[K, V]], 50 ) *xArenaSklElement[K, V] { 51 e := &xArenaSklElement[K, V]{ 52 key: key, 53 indices: make([]*xArenaSklNode[K, V], lvl), 54 } 55 e.val.Store(val) 56 57 node, _ := arena.allocate() 58 node.level = lvl 59 node.elementRef = e 60 e.nodeRef = node 61 node.count = 1 62 return e 63 } 64 65 // If it is unique x-node type store value directly. 66 // Otherwise, it is a sentinel node for linked-list or rbtree. 67 // @field count, the number of duplicate elements. 68 // @field mu, lock-free, spin-lock, optimistic-lock. 69 type xArenaSklNode[K infra.OrderedKey, V any] struct { 70 elementRef *xArenaSklElement[K, V] // size 8, 1 byte, recursive 71 mu uint64 // size 8, 2 byte 72 count int64 // size 8, 1 byte 73 level uint32 // size 4 74 flags uint32 // size 4 75 } 76 77 func (node *xArenaSklNode[K, V]) lock(version uint64) { 78 backoff := uint8(1) 79 for !atomic.CompareAndSwapUint64(&node.mu, unlocked, version) { 80 if backoff <= 32 { 81 for i := uint8(0); i < backoff; i++ { 82 infra.ProcYield(5) 83 } 84 } else { 85 runtime.Gosched() 86 } 87 backoff <<= 1 88 } 89 } 90 91 func (node *xArenaSklNode[K, V]) tryLock(version uint64) bool { 92 return atomic.CompareAndSwapUint64(&node.mu, unlocked, version) 93 } 94 95 func (node *xArenaSklNode[K, V]) unlock(version uint64) bool { 96 return atomic.CompareAndSwapUint64(&node.mu, version, unlocked) 97 } 98 99 func (node *xArenaSklNode[K, V]) loadNextNode(i int32) *xArenaSklNode[K, V] { 100 return node.elementRef.indices[i] 101 } 102 103 func (node *xArenaSklNode[K, V]) storeNextNode(i int32, next *xArenaSklNode[K, V]) { 104 node.elementRef.indices[i] = next 105 } 106 107 func (node *xArenaSklNode[K, V]) atomicLoadNextNode(i int32) *xArenaSklNode[K, V] { 108 return (*xArenaSklNode[K, V])(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&node.elementRef.indices[i])))) 109 } 110 111 func (node *xArenaSklNode[K, V]) atomicStoreNextNode(i int32, next *xArenaSklNode[K, V]) { 112 atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&node.elementRef.indices[i])), unsafe.Pointer(next)) 113 } 114 115 func unlockArenaNodes[K infra.OrderedKey, V any](version uint64, num int32, nodes ...*xArenaSklNode[K, V]) { 116 var prev *xArenaSklNode[K, V] 117 for i := num; i >= 0; i-- { 118 if nodes[i] != prev { 119 nodes[i].unlock(version) 120 prev = nodes[i] 121 } 122 } 123 }