github.com/zuoyebang/bitalostable@v1.0.1-0.20240229032404-e3b99a834294/internal/arenaskl/node.go (about)

     1  /*
     2   * Copyright 2017 Dgraph Labs, Inc. and Contributors
     3   * Modifications copyright (C) 2017 Andy Kimball and Contributors
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   */
    17  
    18  package arenaskl
    19  
    20  import (
    21  	"math"
    22  	"sync/atomic"
    23  
    24  	"github.com/zuoyebang/bitalostable/internal/base"
    25  )
    26  
    27  // MaxNodeSize returns the maximum space needed for a node with the specified
    28  // key and value sizes. This could overflow a uint32, which is why a uint64
    29  // is used here. If a key/value overflows a uint32, it should not be added to
    30  // the skiplist.
    31  func MaxNodeSize(keySize, valueSize uint32) uint64 {
    32  	return uint64(maxNodeSize) + uint64(keySize) + uint64(valueSize) + align4
    33  }
    34  
    35  type links struct {
    36  	nextOffset uint32
    37  	prevOffset uint32
    38  }
    39  
    40  func (l *links) init(prevOffset, nextOffset uint32) {
    41  	l.nextOffset = nextOffset
    42  	l.prevOffset = prevOffset
    43  }
    44  
    45  type node struct {
    46  	// Immutable fields, so no need to lock to access key.
    47  	keyOffset uint32
    48  	keySize   uint32
    49  	valueSize uint32
    50  	allocSize uint32
    51  
    52  	skipToFirst uint32
    53  	skipToLast  uint32
    54  
    55  	// Most nodes do not need to use the full height of the tower, since the
    56  	// probability of each successive level decreases exponentially. Because
    57  	// these elements are never accessed, they do not need to be allocated.
    58  	// Therefore, when a node is allocated in the arena, its memory footprint
    59  	// is deliberately truncated to not include unneeded tower elements.
    60  	//
    61  	// All accesses to elements should use CAS operations, with no need to lock.
    62  	tower [maxHeight]links
    63  }
    64  
    65  func newNode(
    66  	arena *Arena, height uint32, key base.InternalKey, value []byte,
    67  ) (nd *node, err error) {
    68  	if height < 1 || height > maxHeight {
    69  		panic("height cannot be less than one or greater than the max height")
    70  	}
    71  	keySize := key.Size()
    72  	if int64(keySize) > math.MaxUint32 {
    73  		panic("key is too large")
    74  	}
    75  	valueSize := len(value)
    76  	if int64(len(value)) > math.MaxUint32 {
    77  		panic("value is too large")
    78  	}
    79  	if int64(len(value))+int64(keySize)+int64(maxNodeSize) > math.MaxUint32 {
    80  		panic("combined key and value size is too large")
    81  	}
    82  
    83  	nd, err = newRawNode(arena, height, uint32(keySize), uint32(valueSize))
    84  	if err != nil {
    85  		return
    86  	}
    87  
    88  	key.Encode(nd.getKeyBytes(arena))
    89  	copy(nd.getValue(arena), value)
    90  	return
    91  }
    92  
    93  func newRawNode(arena *Arena, height uint32, keySize, valueSize uint32) (nd *node, err error) {
    94  	// Compute the amount of the tower that will never be used, since the height
    95  	// is less than maxHeight.
    96  	unusedSize := uint32((maxHeight - int(height)) * linksSize)
    97  	nodeSize := uint32(maxNodeSize) - unusedSize
    98  
    99  	nodeOffset, allocSize, err := arena.alloc(nodeSize+keySize+valueSize, align4, unusedSize)
   100  	if err != nil {
   101  		return
   102  	}
   103  
   104  	nd = (*node)(arena.getPointer(nodeOffset))
   105  	nd.keyOffset = nodeOffset + nodeSize
   106  	nd.keySize = keySize
   107  	nd.valueSize = valueSize
   108  	nd.allocSize = allocSize
   109  	nd.skipToFirst = 0
   110  	nd.skipToLast = 0
   111  	return
   112  }
   113  
   114  func (n *node) getKeyBytes(arena *Arena) []byte {
   115  	return arena.getBytes(n.keyOffset, n.keySize)
   116  }
   117  
   118  func (n *node) getValue(arena *Arena) []byte {
   119  	return arena.getBytes(n.keyOffset+n.keySize, n.valueSize)
   120  }
   121  
   122  func (n *node) nextOffset(h int) uint32 {
   123  	return atomic.LoadUint32(&n.tower[h].nextOffset)
   124  }
   125  
   126  func (n *node) prevOffset(h int) uint32 {
   127  	return atomic.LoadUint32(&n.tower[h].prevOffset)
   128  }
   129  
   130  func (n *node) casNextOffset(h int, old, val uint32) bool {
   131  	return atomic.CompareAndSwapUint32(&n.tower[h].nextOffset, old, val)
   132  }
   133  
   134  func (n *node) casPrevOffset(h int, old, val uint32) bool {
   135  	return atomic.CompareAndSwapUint32(&n.tower[h].prevOffset, old, val)
   136  }
   137  
   138  func (n *node) skipToFirstOffset() uint32 {
   139  	return atomic.LoadUint32(&n.skipToFirst)
   140  }
   141  
   142  func (n *node) setSkipToFirstOffset(val uint32) {
   143  	atomic.StoreUint32(&n.skipToFirst, val)
   144  }
   145  
   146  func (n *node) skipToLastOffset() uint32 {
   147  	return atomic.LoadUint32(&n.skipToLast)
   148  }
   149  
   150  func (n *node) setSkipToLastOffset(val uint32) {
   151  	atomic.StoreUint32(&n.skipToLast, val)
   152  }