github.com/zhiqiangxu/util@v0.0.0-20230112053021-0a7aee056cd5/skl/lf/list.go (about) 1 package lf 2 3 import ( 4 "bytes" 5 "sync/atomic" 6 "unsafe" 7 ) 8 9 // List is a lock free sorted singly linked list 10 type List struct { 11 head listNode 12 arena *Arena 13 } 14 15 var _ list = (*List)(nil) 16 17 // NewListWithArena with specified Arena 18 func NewListWithArena(arena *Arena) *List { 19 l := &List{arena: arena} 20 21 l.head.head = true 22 return l 23 } 24 25 // NewList with arenaSize 26 func NewList(arenaSize uint32) *List { 27 arena := NewArena(arenaSize) 28 return NewListWithArena(arena) 29 } 30 31 // ListNodeSize is the size of ListNode 32 const ListNodeSize = int(unsafe.Sizeof(listNode{})) 33 34 const ( 35 bitMask = ^uint32(0x03) 36 markBit = uint32(1) 37 flagBit = uint32(2) 38 ) 39 40 type listNode struct { 41 // Multiple parts of the value are encoded as a single uint64 so that it 42 // can be atomically loaded and stored: 43 // value offset: uint32 (bits 0-31) 44 // value size : uint16 (bits 32-63) 45 value uint64 46 backlink uint32 // points to the prev node 47 succ uint32 // contains a next pointer, a mark bit and a flag bit. 48 keyOffset uint32 // Immutable. No need to lock to access key. 49 keySize uint16 // Immutable. No need to lock to access key. 50 head bool 51 } 52 53 // Contains checks whether k is in list 54 func (l *List) Contains(k []byte) bool { 55 current, _ := l.searchFrom(k, l.headNode(), true) 56 return current.Compare(l.arena, k) == 0 57 } 58 59 // Get v by k if exists 60 // v is readonly 61 func (l *List) Get(k []byte) (v []byte, exists bool) { 62 current, _ := l.searchFrom(k, l.headNode(), true) 63 if current.Compare(l.arena, k) == 0 { 64 exists = true 65 v = current.Value(l.arena) 66 } 67 return 68 } 69 70 func (l *List) headNode() *listNode { 71 return &l.head 72 } 73 74 // Insert attempts to insert a new node with the supplied key and value. 75 func (l *List) Insert(k, v []byte) (isNew bool, err error) { 76 prev, next := l.searchFrom(k, l.headNode(), true) 77 78 var voffset uint32 79 80 if prev.Compare(l.arena, k) == 0 { 81 82 voffset, err = l.arena.putBytes(v) 83 if err != nil { 84 return 85 } 86 prev.UpdateValue(voffset, uint16(len(v))) 87 return 88 } 89 90 node, err := newListNode(l.arena, k, v) 91 if err != nil { 92 return 93 } 94 nodeOffset := l.arena.getListNodeOffset(node) 95 96 for { 97 prevSucc := prev.Succ() 98 // If the predecessor is flagged, help 99 // the corresponding deletion to complete. 100 if prevSucc&flagBit != 0 { 101 l.helpFlagged(prev, l.arena.getListNode(prevSucc&bitMask)) 102 } else { 103 node.succ = l.arena.getListNodeOffset(next) 104 // Insertion attempt. 105 if atomic.CompareAndSwapUint32(&prev.succ, node.succ, nodeOffset) { 106 // Successful insertion. 107 isNew = true 108 return 109 } 110 111 // Failure. 112 113 // Failure due to flagging. 114 if prev.Flagged() { 115 l.helpFlagged(prev, prev.Next(l.arena)) 116 } 117 // Possibly a failure due to marking. Traverse a 118 // chain of backlinks to reach an unmarked node. 119 for prev.Marked() { 120 prev = l.arena.getListNode(prev.GetBacklist()) 121 } 122 } 123 124 prev, next = l.searchFrom(k, prev, true) 125 if prev.Compare(l.arena, k) == 0 { 126 prev.UpdateValue(voffset, uint16(len(v))) 127 return 128 } 129 } 130 } 131 132 // Delete sttempts to delete a node with the supplied key 133 func (l *List) Delete(k []byte) bool { 134 prev, del := l.searchFrom(k, l.headNode(), false) 135 if del == nil || del.Compare(l.arena, k) != 0 { 136 return false 137 } 138 139 prev, flagged := l.tryFlag(prev, del) 140 if prev != nil { 141 l.helpFlagged(prev, del) 142 } 143 144 return flagged 145 } 146 147 // finds two consecutive nodes n1 and n2 148 // pre condition: 149 // node.key < k 150 // if equal is true: 151 // n1.key <= k < n2.key. 152 // if equal is false: 153 // n1.key < k <= n2.key. 154 func (l *List) searchFrom(k []byte, node *listNode, equal bool) (current, next *listNode) { 155 156 var cmpFunc func(n *listNode) bool 157 if equal { 158 cmpFunc = func(n *listNode) bool { 159 return n.Compare(l.arena, k) <= 0 160 } 161 } else { 162 cmpFunc = func(n *listNode) bool { 163 return n.Compare(l.arena, k) < 0 164 } 165 } 166 167 current = node 168 next = node.Next(l.arena) 169 for next != nil && cmpFunc(next) { 170 for { 171 nextSuc := next.Succ() 172 currentSuc := current.Succ() 173 currentNext := l.arena.getListNode(currentSuc & bitMask) 174 175 // Ensure that either next node is unmarked, 176 // or both curr node and next node are 177 // marked and curr node was marked earlier. 178 if nextSuc&markBit == 1 && (currentSuc&markBit == 0 || currentNext != next) { 179 if currentNext == next { 180 l.helpMarked(current, next) 181 } 182 next = currentNext 183 } else { 184 break 185 } 186 } 187 188 if next != nil && cmpFunc(next) { 189 current = next 190 next = current.Next(l.arena) 191 } 192 193 } 194 return 195 } 196 197 // Attempts to physically delete the marked 198 // node del node and unflag prev node. 199 func (l *List) helpMarked(prev, del *listNode) { 200 next := del.Succ() & bitMask 201 atomic.CompareAndSwapUint32(&prev.succ, l.arena.getListNodeOffset(del)+flagBit, next) 202 } 203 204 // Attempts to flag the predecessor of target node. P rev node is the last node known to be the predecessor. 205 func (l *List) tryFlag(prev, target *listNode) (n *listNode, flagged bool) { 206 207 for { 208 // predecessor is already flagged 209 if prev.Flagged() { 210 n = prev 211 return 212 } 213 targetOffset := l.arena.getListNodeOffset(target) 214 if atomic.CompareAndSwapUint32(&prev.succ, targetOffset, targetOffset+flagBit) { 215 // c&s was successful 216 n = prev 217 flagged = true 218 return 219 } 220 221 if prev.Flagged() { 222 // failure due to flagging 223 n = prev 224 return 225 } 226 227 // possibly failure due to marking 228 for prev.Marked() { 229 prev = l.arena.getListNode(prev.GetBacklist()) 230 } 231 232 var del *listNode 233 prev, del = l.searchFrom(target.Key(l.arena), prev, false) 234 // target_node was deleted from the list 235 if del != target { 236 return 237 } 238 } 239 240 } 241 242 // Attempts to mark the node del node. 243 func (l *List) tryMark(del *listNode) { 244 for !del.Marked() { 245 next := del.Succ() & bitMask 246 swapped := atomic.CompareAndSwapUint32(&del.succ, next, next+markBit) 247 if !swapped { 248 if del.Flagged() { 249 l.helpFlagged(del, l.arena.getListNode(next)) 250 } 251 } 252 } 253 } 254 255 // Attempts to mark and physically delete node del node, 256 // which is the successor of the flagged node prev node. 257 func (l *List) helpFlagged(prev, del *listNode) { 258 del.SetBacklist(l.arena.getListNodeOffset(prev)) 259 260 l.tryMark(del) 261 l.helpMarked(prev, del) 262 } 263 264 func newListNode(arena *Arena, k, v []byte) (n *listNode, err error) { 265 koff, voff, err := arena.putKV(k, v) 266 if err != nil { 267 return 268 } 269 noff, err := arena.putListNode() 270 if err != nil { 271 return 272 } 273 n = arena.getListNode(noff) 274 n.keyOffset = koff 275 n.keySize = uint16(len(k)) 276 n.value = encodeValue(voff, uint16(len(v))) 277 return 278 } 279 280 func encodeValue(valOffset uint32, valSize uint16) uint64 { 281 return uint64(valSize)<<32 | uint64(valOffset) 282 } 283 284 func decodeValue(value uint64) (valOffset uint32, valSize uint16) { 285 valSize = uint16(value >> 32) 286 valOffset = uint32(value) 287 return 288 } 289 290 func (n *listNode) Flagged() bool { 291 return atomic.LoadUint32(&n.succ)&flagBit != 0 292 } 293 294 func (n *listNode) Marked() bool { 295 return atomic.LoadUint32(&n.succ)&markBit != 0 296 } 297 298 func (n *listNode) Succ() uint32 { 299 return atomic.LoadUint32(&n.succ) 300 } 301 302 func (n *listNode) Key(arena *Arena) []byte { 303 return arena.getBytes(n.keyOffset, n.keySize) 304 } 305 306 func (n *listNode) Value(arena *Arena) []byte { 307 v := atomic.LoadUint64(&n.value) 308 voff, vsize := decodeValue(v) 309 return arena.getBytes(voff, vsize) 310 } 311 312 func (n *listNode) UpdateValue(offset uint32, size uint16) { 313 value := encodeValue(offset, size) 314 atomic.StoreUint64(&n.value, value) 315 } 316 317 func (n *listNode) SetBacklist(prevOffset uint32) { 318 atomic.StoreUint32(&n.backlink, prevOffset) 319 } 320 321 func (n *listNode) GetBacklist() uint32 { 322 return atomic.LoadUint32(&n.backlink) 323 } 324 325 func (n *listNode) Next(arena *Arena) *listNode { 326 succ := n.Succ() 327 return arena.getListNode(succ & bitMask) 328 } 329 330 func (n *listNode) Compare(arena *Arena, k []byte) int { 331 if n.head { 332 return -1 333 } 334 return bytes.Compare(arena.getBytes(n.keyOffset, n.keySize), k) 335 }