github.com/MetalBlockchain/metalgo@v1.11.9/x/merkledb/intermediate_node_db_test.go (about)

     1  // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package merkledb
     5  
     6  import (
     7  	"fmt"
     8  	"testing"
     9  
    10  	"github.com/stretchr/testify/require"
    11  
    12  	"github.com/MetalBlockchain/metalgo/database"
    13  	"github.com/MetalBlockchain/metalgo/database/memdb"
    14  	"github.com/MetalBlockchain/metalgo/utils"
    15  	"github.com/MetalBlockchain/metalgo/utils/maybe"
    16  	"github.com/MetalBlockchain/metalgo/utils/units"
    17  )
    18  
    19  // Tests:
    20  // * Putting a key-node pair in the database
    21  // * Getting a key-node pair from the cache and from the base db
    22  // * Deleting a key-node pair from the database
    23  // * Evicting elements from the cache
    24  // * Flushing the cache
    25  func Test_IntermediateNodeDB(t *testing.T) {
    26  	require := require.New(t)
    27  
    28  	n := newNode(ToKey([]byte{0x00}))
    29  	n.setValue(DefaultHasher, maybe.Some([]byte{byte(0x02)}))
    30  	nodeSize := cacheEntrySize(n.key, n)
    31  
    32  	// use exact multiple of node size so require.Equal(1, db.nodeCache.fifo.Len()) is correct later
    33  	cacheSize := nodeSize * 100
    34  	bufferSize := nodeSize * 20
    35  
    36  	evictionBatchSize := bufferSize
    37  	baseDB := memdb.New()
    38  	db := newIntermediateNodeDB(
    39  		baseDB,
    40  		utils.NewBytesPool(),
    41  		&mockMetrics{},
    42  		cacheSize,
    43  		bufferSize,
    44  		evictionBatchSize,
    45  		4,
    46  		DefaultHasher,
    47  	)
    48  
    49  	// Put a key-node pair
    50  	node1Key := ToKey([]byte{0x01})
    51  	node1 := newNode(node1Key)
    52  	node1.setValue(DefaultHasher, maybe.Some([]byte{byte(0x01)}))
    53  	require.NoError(db.Put(node1Key, node1))
    54  
    55  	// Get the key-node pair from cache
    56  	node1Read, err := db.Get(node1Key)
    57  	require.NoError(err)
    58  	require.Equal(node1, node1Read)
    59  
    60  	// Overwrite the key-node pair
    61  	node1Updated := newNode(node1Key)
    62  	node1Updated.setValue(DefaultHasher, maybe.Some([]byte{byte(0x02)}))
    63  	require.NoError(db.Put(node1Key, node1Updated))
    64  
    65  	// Assert the key-node pair was overwritten
    66  	node1Read, err = db.Get(node1Key)
    67  	require.NoError(err)
    68  	require.Equal(node1Updated, node1Read)
    69  
    70  	// Delete the key-node pair
    71  	require.NoError(db.Delete(node1Key))
    72  	_, err = db.Get(node1Key)
    73  
    74  	// Assert the key-node pair was deleted
    75  	require.Equal(database.ErrNotFound, err)
    76  
    77  	// Put elements in the cache until it is full.
    78  	expectedSize := 0
    79  	added := 0
    80  	for {
    81  		key := ToKey([]byte{byte(added)})
    82  		node := newNode(Key{})
    83  		node.setValue(DefaultHasher, maybe.Some([]byte{byte(added)}))
    84  		newExpectedSize := expectedSize + cacheEntrySize(key, node)
    85  		if newExpectedSize > bufferSize {
    86  			// Don't trigger eviction.
    87  			break
    88  		}
    89  
    90  		require.NoError(db.Put(key, node))
    91  		expectedSize = newExpectedSize
    92  		added++
    93  	}
    94  
    95  	// Assert cache has expected number of elements
    96  	require.Equal(added, db.writeBuffer.fifo.Len())
    97  
    98  	// Put one more element in the cache, which should trigger an eviction
    99  	// of all but 2 elements. 2 elements remain rather than 1 element because of
   100  	// the added key prefix increasing the size tracked by the batch.
   101  	key := ToKey([]byte{byte(added)})
   102  	node := newNode(Key{})
   103  	node.setValue(DefaultHasher, maybe.Some([]byte{byte(added)}))
   104  	require.NoError(db.Put(key, node))
   105  
   106  	// Assert cache has expected number of elements
   107  	require.Equal(1, db.writeBuffer.fifo.Len())
   108  	gotKey, _, ok := db.writeBuffer.fifo.Oldest()
   109  	require.True(ok)
   110  	require.Equal(ToKey([]byte{byte(added)}), gotKey)
   111  
   112  	// Get a node from the base database
   113  	// Use an early key that has been evicted from the cache
   114  	_, inCache := db.writeBuffer.Get(node1Key)
   115  	require.False(inCache)
   116  	nodeRead, err := db.Get(node1Key)
   117  	require.NoError(err)
   118  	require.Equal(maybe.Some([]byte{0x01}), nodeRead.value)
   119  
   120  	// Flush the cache.
   121  	require.NoError(db.Flush())
   122  
   123  	// Assert the cache is empty
   124  	require.Zero(db.writeBuffer.fifo.Len())
   125  
   126  	// Assert the evicted cache elements were written to disk with prefix.
   127  	it := baseDB.NewIteratorWithPrefix(intermediateNodePrefix)
   128  	defer it.Release()
   129  
   130  	count := 0
   131  	for it.Next() {
   132  		count++
   133  	}
   134  	require.NoError(it.Error())
   135  	require.Equal(added+1, count)
   136  }
   137  
   138  func FuzzIntermediateNodeDBConstructDBKey(f *testing.F) {
   139  	bufferSize := 200
   140  	cacheSize := 200
   141  	evictionBatchSize := bufferSize
   142  	baseDB := memdb.New()
   143  
   144  	f.Fuzz(func(
   145  		t *testing.T,
   146  		key []byte,
   147  		tokenLength uint,
   148  	) {
   149  		require := require.New(t)
   150  		for _, tokenSize := range validTokenSizes {
   151  			db := newIntermediateNodeDB(
   152  				baseDB,
   153  				utils.NewBytesPool(),
   154  				&mockMetrics{},
   155  				cacheSize,
   156  				bufferSize,
   157  				evictionBatchSize,
   158  				tokenSize,
   159  				DefaultHasher,
   160  			)
   161  
   162  			p := ToKey(key)
   163  			uBitLength := tokenLength * uint(tokenSize)
   164  			if uBitLength >= uint(p.length) {
   165  				t.SkipNow()
   166  			}
   167  			p = p.Take(int(uBitLength))
   168  			constructedKey := db.constructDBKey(p)
   169  			baseLength := len(p.value) + len(intermediateNodePrefix)
   170  			require.Equal(intermediateNodePrefix, (*constructedKey)[:len(intermediateNodePrefix)])
   171  			switch {
   172  			case tokenSize == 8:
   173  				// for keys with tokens of size byte, no padding is added
   174  				require.Equal(p.Bytes(), (*constructedKey)[len(intermediateNodePrefix):])
   175  			case p.hasPartialByte():
   176  				require.Len(*constructedKey, baseLength)
   177  				require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), (*constructedKey)[len(intermediateNodePrefix):])
   178  			default:
   179  				// when a whole number of bytes, there is an extra padding byte
   180  				require.Len(*constructedKey, baseLength+1)
   181  				require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), (*constructedKey)[len(intermediateNodePrefix):])
   182  			}
   183  		}
   184  	})
   185  }
   186  
   187  func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) {
   188  	require := require.New(t)
   189  	cacheSize := 200
   190  	bufferSize := 200
   191  	evictionBatchSize := bufferSize
   192  	baseDB := memdb.New()
   193  	db := newIntermediateNodeDB(
   194  		baseDB,
   195  		utils.NewBytesPool(),
   196  		&mockMetrics{},
   197  		cacheSize,
   198  		bufferSize,
   199  		evictionBatchSize,
   200  		4,
   201  		DefaultHasher,
   202  	)
   203  
   204  	db.bufferPool.Put(&[]byte{0xFF, 0xFF, 0xFF})
   205  	constructedKey := db.constructDBKey(ToKey([]byte{}))
   206  	require.Len(*constructedKey, 2)
   207  	require.Equal(intermediateNodePrefix, (*constructedKey)[:len(intermediateNodePrefix)])
   208  	require.Equal(byte(16), (*constructedKey)[len(*constructedKey)-1])
   209  
   210  	db.bufferPool = utils.NewBytesPool()
   211  	db.bufferPool.Put(&[]byte{0xFF, 0xFF, 0xFF})
   212  	p := ToKey([]byte{0xF0}).Take(4)
   213  	constructedKey = db.constructDBKey(p)
   214  	require.Len(*constructedKey, 2)
   215  	require.Equal(intermediateNodePrefix, (*constructedKey)[:len(intermediateNodePrefix)])
   216  	require.Equal(p.Extend(ToToken(1, 4)).Bytes(), (*constructedKey)[len(intermediateNodePrefix):])
   217  }
   218  
   219  func TestIntermediateNodeDBClear(t *testing.T) {
   220  	require := require.New(t)
   221  	cacheSize := 200
   222  	bufferSize := 200
   223  	evictionBatchSize := bufferSize
   224  	baseDB := memdb.New()
   225  	db := newIntermediateNodeDB(
   226  		baseDB,
   227  		utils.NewBytesPool(),
   228  		&mockMetrics{},
   229  		cacheSize,
   230  		bufferSize,
   231  		evictionBatchSize,
   232  		4,
   233  		DefaultHasher,
   234  	)
   235  
   236  	for _, b := range [][]byte{{1}, {2}, {3}} {
   237  		require.NoError(db.Put(ToKey(b), newNode(ToKey(b))))
   238  	}
   239  
   240  	require.NoError(db.Clear())
   241  
   242  	iter := baseDB.NewIteratorWithPrefix(intermediateNodePrefix)
   243  	defer iter.Release()
   244  	require.False(iter.Next())
   245  
   246  	require.Zero(db.writeBuffer.currentSize)
   247  }
   248  
   249  // Test that deleting the empty key and flushing works correctly.
   250  // Previously, there was a bug that occurred when deleting the empty key
   251  // if the cache was empty. The size of the cache entry was reported as 0,
   252  // which caused the cache's currentSize to be 0, so on resize() we didn't
   253  // call onEviction. This caused the empty key to not be deleted from the baseDB.
   254  func TestIntermediateNodeDBDeleteEmptyKey(t *testing.T) {
   255  	require := require.New(t)
   256  	cacheSize := 200
   257  	bufferSize := 200
   258  	evictionBatchSize := bufferSize
   259  	baseDB := memdb.New()
   260  	db := newIntermediateNodeDB(
   261  		baseDB,
   262  		utils.NewBytesPool(),
   263  		&mockMetrics{},
   264  		cacheSize,
   265  		bufferSize,
   266  		evictionBatchSize,
   267  		4,
   268  		DefaultHasher,
   269  	)
   270  
   271  	emptyKey := ToKey([]byte{})
   272  	require.NoError(db.Put(emptyKey, newNode(emptyKey)))
   273  	require.NoError(db.Flush())
   274  
   275  	emptyDBKey := db.constructDBKey(emptyKey)
   276  	has, err := baseDB.Has(*emptyDBKey)
   277  	require.NoError(err)
   278  	require.True(has)
   279  
   280  	require.NoError(db.Delete(ToKey([]byte{})))
   281  	require.NoError(db.Flush())
   282  
   283  	emptyDBKey = db.constructDBKey(emptyKey)
   284  	has, err = baseDB.Has(*emptyDBKey)
   285  	require.NoError(err)
   286  	require.False(has)
   287  }
   288  
   289  func Benchmark_IntermediateNodeDB_ConstructDBKey(b *testing.B) {
   290  	keyTokenSizes := []int{0, 1, 4, 16, 64, 256}
   291  	for _, tokenSize := range validTokenSizes {
   292  		db := newIntermediateNodeDB(
   293  			memdb.New(),
   294  			utils.NewBytesPool(),
   295  			&mockMetrics{},
   296  			units.MiB,
   297  			units.MiB,
   298  			units.MiB,
   299  			tokenSize,
   300  			DefaultHasher,
   301  		)
   302  
   303  		for _, keyTokenSize := range keyTokenSizes {
   304  			keyBitSize := keyTokenSize * tokenSize
   305  			keyBytes := make([]byte, bytesNeeded(keyBitSize))
   306  			key := Key{
   307  				length: keyBitSize,
   308  				value:  string(keyBytes),
   309  			}
   310  			b.Run(fmt.Sprintf("%d/%d", tokenSize, keyTokenSize), func(b *testing.B) {
   311  				for i := 0; i < b.N; i++ {
   312  					db.bufferPool.Put(db.constructDBKey(key))
   313  				}
   314  			})
   315  		}
   316  	}
   317  }