github.com/zuoyebang/bitalosdb@v1.1.1-0.20240516111551-79a8c4d8ce20/bitpage/skl_node.go (about)

     1  // Copyright 2021 The Bitalosdb author(hustxrb@163.com) and other contributors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package bitpage
    16  
    17  import (
    18  	"sync/atomic"
    19  )
    20  
    21  func sklNodeMaxSize(keySize, valueSize uint32) uint32 {
    22  	return uint32(maxNodeSize) + keySize + valueSize + align4
    23  }
    24  
    25  type links struct {
    26  	nextOffset uint32
    27  	prevOffset uint32
    28  }
    29  
    30  func (l *links) init(prevOffset, nextOffset uint32) {
    31  	l.nextOffset = nextOffset
    32  	l.prevOffset = prevOffset
    33  }
    34  
    35  type node struct {
    36  	keyOffset uint32
    37  	keySize   uint32
    38  	valueSize uint32
    39  	allocSize uint32
    40  
    41  	skipToFirst uint32
    42  	skipToLast  uint32
    43  
    44  	tower [maxHeight]links
    45  }
    46  
    47  func newNode(tbl *table, height uint32, key internalKey, value []byte) (*node, error) {
    48  	keySize := key.Size()
    49  	valueSize := len(value)
    50  
    51  	nd, err := newRawNode(tbl, height, uint32(keySize), uint32(valueSize))
    52  	if err != nil {
    53  		return nil, err
    54  	}
    55  
    56  	key.Encode(nd.getKeyBytes(tbl))
    57  	copy(nd.getValue(tbl), value)
    58  	return nd, nil
    59  }
    60  
    61  func newRawNode(tbl *table, height uint32, keySize, valueSize uint32) (*node, error) {
    62  	unusedSize := uint32((maxHeight - int(height)) * linksSize)
    63  	nodeSize := uint32(maxNodeSize) - unusedSize
    64  
    65  	nodeOffset, allocSize, err := tbl.allocAlign(nodeSize+keySize+valueSize, align4, unusedSize)
    66  	if err != nil {
    67  		return nil, err
    68  	}
    69  
    70  	nd := (*node)(tbl.getPointer(nodeOffset))
    71  	nd.keyOffset = nodeOffset + nodeSize
    72  	nd.keySize = keySize
    73  	nd.valueSize = valueSize
    74  	nd.allocSize = allocSize
    75  	nd.skipToFirst = 0
    76  	nd.skipToLast = 0
    77  	return nd, nil
    78  }
    79  
    80  func (n *node) getKeyBytes(tbl *table) []byte {
    81  	return tbl.getBytes(n.keyOffset, n.keySize)
    82  }
    83  
    84  func (n *node) getValue(tbl *table) []byte {
    85  	return tbl.getBytes(n.keyOffset+n.keySize, n.valueSize)
    86  }
    87  
    88  func (n *node) nextOffset(h int) uint32 {
    89  	return atomic.LoadUint32(&n.tower[h].nextOffset)
    90  }
    91  
    92  func (n *node) skipToFirstOffset() uint32 {
    93  	return atomic.LoadUint32(&n.skipToFirst)
    94  }
    95  
    96  func (n *node) setSkipToFirstOffset(val uint32) {
    97  	atomic.StoreUint32(&n.skipToFirst, val)
    98  }
    99  
   100  func (n *node) skipToLastOffset() uint32 {
   101  	return atomic.LoadUint32(&n.skipToLast)
   102  }
   103  
   104  func (n *node) setSkipToLastOffset(val uint32) {
   105  	atomic.StoreUint32(&n.skipToLast, val)
   106  }
   107  
   108  func (n *node) prevOffset(h int) uint32 {
   109  	return atomic.LoadUint32(&n.tower[h].prevOffset)
   110  }
   111  
   112  func (n *node) casNextOffset(h int, old, val uint32) bool {
   113  	return atomic.CompareAndSwapUint32(&n.tower[h].nextOffset, old, val)
   114  }
   115  
   116  func (n *node) casPrevOffset(h int, old, val uint32) bool {
   117  	return atomic.CompareAndSwapUint32(&n.tower[h].prevOffset, old, val)
   118  }