github.com/decred/dcrlnd@v0.7.6/blockcache/blockcache_test.go (about)

     1  package blockcache
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"sync"
     8  	"testing"
     9  
    10  	"github.com/decred/dcrd/chaincfg/chainhash"
    11  	"github.com/decred/dcrd/dcrutil/v4"
    12  	"github.com/decred/dcrd/wire"
    13  	cache "github.com/decred/dcrlnd/neutrinocache"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  	"matheusd.com/testctx"
    17  )
    18  
    19  type mockChainBackend struct {
    20  	blocks         map[chainhash.Hash]*wire.MsgBlock
    21  	chainCallCount int
    22  
    23  	sync.RWMutex
    24  }
    25  
    26  func newMockChain() *mockChainBackend {
    27  	return &mockChainBackend{
    28  		blocks: make(map[chainhash.Hash]*wire.MsgBlock),
    29  	}
    30  }
    31  
    32  // GetBlock is a mock implementation of block fetching that tracks the number
    33  // of backend calls and returns the block found for the given hash or an error.
    34  func (m *mockChainBackend) GetBlock(ctx context.Context, blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
    35  	m.Lock()
    36  	defer m.Unlock()
    37  
    38  	m.chainCallCount++
    39  
    40  	block, ok := m.blocks[*blockHash]
    41  	if !ok {
    42  		return nil, fmt.Errorf("block not found")
    43  	}
    44  
    45  	return block, nil
    46  }
    47  
    48  func (m *mockChainBackend) getChainCallCount() int {
    49  	m.RLock()
    50  	defer m.RUnlock()
    51  
    52  	return m.chainCallCount
    53  }
    54  
    55  func (m *mockChainBackend) addBlock(block *wire.MsgBlock, nonce uint32) {
    56  	m.Lock()
    57  	defer m.Unlock()
    58  
    59  	block.Header.Nonce = nonce
    60  	hash := block.Header.BlockHash()
    61  	m.blocks[hash] = block
    62  }
    63  
    64  func (m *mockChainBackend) resetChainCallCount() {
    65  	m.Lock()
    66  	defer m.Unlock()
    67  
    68  	m.chainCallCount = 0
    69  }
    70  
    71  // TestBlockCacheGetBlock tests that the block Cache works correctly as a LFU block
    72  // Cache for the given max capacity.
    73  func TestBlockCacheGetBlock(t *testing.T) {
    74  	mc := newMockChain()
    75  	getBlockImpl := mc.GetBlock
    76  	ctx := testctx.New(t)
    77  
    78  	block1 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 1}}
    79  	block2 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 2}}
    80  	block3 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 3}}
    81  
    82  	blockhash1 := block1.BlockHash()
    83  	blockhash2 := block2.BlockHash()
    84  	blockhash3 := block3.BlockHash()
    85  
    86  	inv1 := wire.NewInvVect(wire.InvTypeBlock, &blockhash1)
    87  	inv2 := wire.NewInvVect(wire.InvTypeBlock, &blockhash2)
    88  	inv3 := wire.NewInvVect(wire.InvTypeBlock, &blockhash3)
    89  
    90  	// Determine the size of one of the blocks.
    91  	sz, _ := (&cache.CacheableBlock{Block: dcrutil.NewBlock(block1)}).Size()
    92  
    93  	// A new Cache is set up with a capacity of 2 blocks
    94  	bc := NewBlockCache(2 * sz)
    95  
    96  	mc.addBlock(&wire.MsgBlock{}, 1)
    97  	mc.addBlock(&wire.MsgBlock{}, 2)
    98  	mc.addBlock(&wire.MsgBlock{}, 3)
    99  
   100  	// We expect the initial Cache to be empty
   101  	require.Equal(t, 0, bc.Cache.Len())
   102  
   103  	// After calling getBlock for block1, it is expected that the Cache
   104  	// will have a size of 1 and will contain block1. One chain backends
   105  	// call is expected to fetch the block.
   106  	_, err := bc.GetBlock(ctx, &blockhash1, getBlockImpl)
   107  	require.NoError(t, err)
   108  	require.Equal(t, 1, bc.Cache.Len())
   109  	require.Equal(t, 1, mc.getChainCallCount())
   110  	mc.resetChainCallCount()
   111  
   112  	_, err = bc.Cache.Get(*inv1)
   113  	require.NoError(t, err)
   114  
   115  	// After calling getBlock for block2, it is expected that the Cache
   116  	// will have a size of 2 and will contain both block1 and block2.
   117  	// One chain backends call is expected to fetch the block.
   118  	_, err = bc.GetBlock(ctx, &blockhash2, getBlockImpl)
   119  	require.NoError(t, err)
   120  	require.Equal(t, 2, bc.Cache.Len())
   121  	require.Equal(t, 1, mc.getChainCallCount())
   122  	mc.resetChainCallCount()
   123  
   124  	_, err = bc.Cache.Get(*inv1)
   125  	require.NoError(t, err)
   126  
   127  	_, err = bc.Cache.Get(*inv2)
   128  	require.NoError(t, err)
   129  
   130  	// getBlock is called again for block1 to make block2 the LFU block.
   131  	// No call to the chain backend is expected since block 1 is already
   132  	// in the Cache.
   133  	_, err = bc.GetBlock(ctx, &blockhash1, getBlockImpl)
   134  	require.NoError(t, err)
   135  	require.Equal(t, 2, bc.Cache.Len())
   136  	require.Equal(t, 0, mc.getChainCallCount())
   137  	mc.resetChainCallCount()
   138  
   139  	// Since the Cache is now at its max capacity, it is expected that when
   140  	// getBlock is called for a new block then the LFU block will be
   141  	// evicted. It is expected that block2 will be evicted. After calling
   142  	// Getblock for block3, it is expected that the Cache will have a
   143  	// length of 2 and will contain block 1 and 3.
   144  	_, err = bc.GetBlock(ctx, &blockhash3, getBlockImpl)
   145  	require.NoError(t, err)
   146  	require.Equal(t, 2, bc.Cache.Len())
   147  	require.Equal(t, 1, mc.getChainCallCount())
   148  	mc.resetChainCallCount()
   149  
   150  	_, err = bc.Cache.Get(*inv1)
   151  	require.NoError(t, err)
   152  
   153  	_, err = bc.Cache.Get(*inv2)
   154  	require.True(t, errors.Is(err, cache.ErrElementNotFound))
   155  
   156  	_, err = bc.Cache.Get(*inv3)
   157  	require.NoError(t, err)
   158  }
   159  
   160  // TestBlockCacheMutexes is used to test that concurrent calls to GetBlock with
   161  // the same block hash does not result in multiple calls to the chain backend.
   162  // In other words this tests the HashMutex.
   163  func TestBlockCacheMutexes(t *testing.T) {
   164  	mc := newMockChain()
   165  	getBlockImpl := mc.GetBlock
   166  	ctx := testctx.New(t)
   167  
   168  	block1 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 1}}
   169  	block2 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 2}}
   170  
   171  	blockhash1 := block1.BlockHash()
   172  	blockhash2 := block2.BlockHash()
   173  
   174  	// Determine the size of the block.
   175  	sz, _ := (&cache.CacheableBlock{Block: dcrutil.NewBlock(block1)}).Size()
   176  
   177  	// A new Cache is set up with a capacity of 2 blocks
   178  	bc := NewBlockCache(2 * sz)
   179  
   180  	mc.addBlock(&wire.MsgBlock{}, 1)
   181  	mc.addBlock(&wire.MsgBlock{}, 2)
   182  
   183  	// Spin off multiple go routines and ensure that concurrent calls to the
   184  	// GetBlock method does not result in multiple calls to the chain
   185  	// backend.
   186  	var wg sync.WaitGroup
   187  	for i := 0; i < 100; i++ {
   188  		wg.Add(1)
   189  		go func(e int) {
   190  			if e%2 == 0 {
   191  				_, err := bc.GetBlock(ctx, &blockhash1, getBlockImpl)
   192  				assert.NoError(t, err)
   193  			} else {
   194  				_, err := bc.GetBlock(ctx, &blockhash2, getBlockImpl)
   195  				assert.NoError(t, err)
   196  			}
   197  
   198  			wg.Done()
   199  		}(i)
   200  	}
   201  
   202  	wg.Wait()
   203  	require.Equal(t, 2, mc.getChainCallCount())
   204  }