github.com/number571/tendermint@v0.34.11-gost/internal/blockchain/v0/reactor_test.go (about)

     1  package v0
     2  
     3  import (
     4  	"os"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/stretchr/testify/require"
     9  
    10  	abci "github.com/number571/tendermint/abci/types"
    11  	cfg "github.com/number571/tendermint/config"
    12  	cons "github.com/number571/tendermint/internal/consensus"
    13  	"github.com/number571/tendermint/internal/mempool/mock"
    14  	"github.com/number571/tendermint/internal/p2p"
    15  	"github.com/number571/tendermint/internal/p2p/p2ptest"
    16  	"github.com/number571/tendermint/internal/test/factory"
    17  	"github.com/number571/tendermint/libs/log"
    18  	bcproto "github.com/number571/tendermint/proto/tendermint/blockchain"
    19  	"github.com/number571/tendermint/proxy"
    20  	sm "github.com/number571/tendermint/state"
    21  	sf "github.com/number571/tendermint/state/test/factory"
    22  	"github.com/number571/tendermint/store"
    23  	"github.com/number571/tendermint/types"
    24  	dbm "github.com/tendermint/tm-db"
    25  )
    26  
    27  type reactorTestSuite struct {
    28  	network *p2ptest.Network
    29  	logger  log.Logger
    30  	nodes   []types.NodeID
    31  
    32  	reactors map[types.NodeID]*Reactor
    33  	app      map[types.NodeID]proxy.AppConns
    34  
    35  	blockchainChannels map[types.NodeID]*p2p.Channel
    36  	peerChans          map[types.NodeID]chan p2p.PeerUpdate
    37  	peerUpdates        map[types.NodeID]*p2p.PeerUpdates
    38  
    39  	fastSync bool
    40  }
    41  
    42  func setup(
    43  	t *testing.T,
    44  	genDoc *types.GenesisDoc,
    45  	privVal types.PrivValidator,
    46  	maxBlockHeights []int64,
    47  	chBuf uint,
    48  ) *reactorTestSuite {
    49  	t.Helper()
    50  
    51  	numNodes := len(maxBlockHeights)
    52  	require.True(t, numNodes >= 1,
    53  		"must specify at least one block height (nodes)")
    54  
    55  	rts := &reactorTestSuite{
    56  		logger:             log.TestingLogger().With("module", "blockchain", "testCase", t.Name()),
    57  		network:            p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
    58  		nodes:              make([]types.NodeID, 0, numNodes),
    59  		reactors:           make(map[types.NodeID]*Reactor, numNodes),
    60  		app:                make(map[types.NodeID]proxy.AppConns, numNodes),
    61  		blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
    62  		peerChans:          make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
    63  		peerUpdates:        make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
    64  		fastSync:           true,
    65  	}
    66  
    67  	chDesc := p2p.ChannelDescriptor{ID: byte(BlockchainChannel)}
    68  	rts.blockchainChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf))
    69  
    70  	i := 0
    71  	for nodeID := range rts.network.Nodes {
    72  		rts.addNode(t, nodeID, genDoc, privVal, maxBlockHeights[i])
    73  		i++
    74  	}
    75  
    76  	t.Cleanup(func() {
    77  		for _, nodeID := range rts.nodes {
    78  			rts.peerUpdates[nodeID].Close()
    79  
    80  			if rts.reactors[nodeID].IsRunning() {
    81  				require.NoError(t, rts.reactors[nodeID].Stop())
    82  				require.NoError(t, rts.app[nodeID].Stop())
    83  				require.False(t, rts.reactors[nodeID].IsRunning())
    84  			}
    85  		}
    86  	})
    87  
    88  	return rts
    89  }
    90  
    91  func (rts *reactorTestSuite) addNode(t *testing.T,
    92  	nodeID types.NodeID,
    93  	genDoc *types.GenesisDoc,
    94  	privVal types.PrivValidator,
    95  	maxBlockHeight int64,
    96  ) {
    97  	t.Helper()
    98  
    99  	rts.nodes = append(rts.nodes, nodeID)
   100  	rts.app[nodeID] = proxy.NewAppConns(proxy.NewLocalClientCreator(&abci.BaseApplication{}))
   101  	require.NoError(t, rts.app[nodeID].Start())
   102  
   103  	blockDB := dbm.NewMemDB()
   104  	stateDB := dbm.NewMemDB()
   105  	stateStore := sm.NewStore(stateDB)
   106  	blockStore := store.NewBlockStore(blockDB)
   107  
   108  	state, err := sm.MakeGenesisState(genDoc)
   109  	require.NoError(t, err)
   110  	require.NoError(t, stateStore.Save(state))
   111  
   112  	blockExec := sm.NewBlockExecutor(
   113  		stateStore,
   114  		log.TestingLogger(),
   115  		rts.app[nodeID].Consensus(),
   116  		mock.Mempool{},
   117  		sm.EmptyEvidencePool{},
   118  		blockStore,
   119  	)
   120  
   121  	for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
   122  		lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
   123  
   124  		if blockHeight > 1 {
   125  			lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
   126  			lastBlock := blockStore.LoadBlock(blockHeight - 1)
   127  
   128  			vote, err := factory.MakeVote(
   129  				privVal,
   130  				lastBlock.Header.ChainID, 0,
   131  				lastBlock.Header.Height, 0, 2,
   132  				lastBlockMeta.BlockID,
   133  				time.Now(),
   134  			)
   135  			require.NoError(t, err)
   136  
   137  			lastCommit = types.NewCommit(
   138  				vote.Height,
   139  				vote.Round,
   140  				lastBlockMeta.BlockID,
   141  				[]types.CommitSig{vote.CommitSig()},
   142  			)
   143  		}
   144  
   145  		thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
   146  		thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
   147  		blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
   148  
   149  		state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
   150  		require.NoError(t, err)
   151  
   152  		blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
   153  	}
   154  
   155  	rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
   156  	rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)
   157  	rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID])
   158  	rts.reactors[nodeID], err = NewReactor(
   159  		rts.logger.With("nodeID", nodeID),
   160  		state.Copy(),
   161  		blockExec,
   162  		blockStore,
   163  		nil,
   164  		rts.blockchainChannels[nodeID],
   165  		rts.peerUpdates[nodeID],
   166  		rts.fastSync,
   167  		cons.NopMetrics())
   168  	require.NoError(t, err)
   169  
   170  	require.NoError(t, rts.reactors[nodeID].Start())
   171  	require.True(t, rts.reactors[nodeID].IsRunning())
   172  }
   173  
   174  func (rts *reactorTestSuite) start(t *testing.T) {
   175  	t.Helper()
   176  	rts.network.Start(t)
   177  	require.Len(t,
   178  		rts.network.RandomNode().PeerManager.Peers(),
   179  		len(rts.nodes)-1,
   180  		"network does not have expected number of nodes")
   181  }
   182  
   183  func TestReactor_AbruptDisconnect(t *testing.T) {
   184  	config := cfg.ResetTestRoot("blockchain_reactor_test")
   185  	defer os.RemoveAll(config.RootDir)
   186  
   187  	genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
   188  	maxBlockHeight := int64(64)
   189  
   190  	rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
   191  
   192  	require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
   193  
   194  	rts.start(t)
   195  
   196  	secondaryPool := rts.reactors[rts.nodes[1]].pool
   197  
   198  	require.Eventually(
   199  		t,
   200  		func() bool {
   201  			height, _, _ := secondaryPool.GetStatus()
   202  			return secondaryPool.MaxPeerHeight() > 0 && height > 0 && height < 10
   203  		},
   204  		10*time.Second,
   205  		10*time.Millisecond,
   206  		"expected node to be partially synced",
   207  	)
   208  
   209  	// Remove synced node from the syncing node which should not result in any
   210  	// deadlocks or race conditions within the context of poolRoutine.
   211  	rts.peerChans[rts.nodes[1]] <- p2p.PeerUpdate{
   212  		Status: p2p.PeerStatusDown,
   213  		NodeID: rts.nodes[0],
   214  	}
   215  	rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0])
   216  }
   217  
   218  func TestReactor_SyncTime(t *testing.T) {
   219  	config := cfg.ResetTestRoot("blockchain_reactor_test")
   220  	defer os.RemoveAll(config.RootDir)
   221  
   222  	genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
   223  	maxBlockHeight := int64(101)
   224  
   225  	rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
   226  	require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
   227  	rts.start(t)
   228  
   229  	require.Eventually(
   230  		t,
   231  		func() bool {
   232  			return rts.reactors[rts.nodes[1]].GetRemainingSyncTime() > time.Nanosecond &&
   233  				rts.reactors[rts.nodes[1]].pool.getLastSyncRate() > 0.001
   234  		},
   235  		10*time.Second,
   236  		10*time.Millisecond,
   237  		"expected node to be partially synced",
   238  	)
   239  }
   240  
   241  func TestReactor_NoBlockResponse(t *testing.T) {
   242  	config := cfg.ResetTestRoot("blockchain_reactor_test")
   243  	defer os.RemoveAll(config.RootDir)
   244  
   245  	genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
   246  	maxBlockHeight := int64(65)
   247  
   248  	rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
   249  
   250  	require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
   251  
   252  	rts.start(t)
   253  
   254  	testCases := []struct {
   255  		height   int64
   256  		existent bool
   257  	}{
   258  		{maxBlockHeight + 2, false},
   259  		{10, true},
   260  		{1, true},
   261  		{100, false},
   262  	}
   263  
   264  	secondaryPool := rts.reactors[rts.nodes[1]].pool
   265  	require.Eventually(
   266  		t,
   267  		func() bool { return secondaryPool.MaxPeerHeight() > 0 && secondaryPool.IsCaughtUp() },
   268  		10*time.Second,
   269  		10*time.Millisecond,
   270  		"expected node to be fully synced",
   271  	)
   272  
   273  	for _, tc := range testCases {
   274  		block := rts.reactors[rts.nodes[1]].store.LoadBlock(tc.height)
   275  		if tc.existent {
   276  			require.True(t, block != nil)
   277  		} else {
   278  			require.Nil(t, block)
   279  		}
   280  	}
   281  }
   282  
   283  func TestReactor_BadBlockStopsPeer(t *testing.T) {
   284  	// Ultimately, this should be refactored to be less integration test oriented
   285  	// and more unit test oriented by simply testing channel sends and receives.
   286  	// See: https://github.com/number571/tendermint/issues/6005
   287  	t.SkipNow()
   288  
   289  	config := cfg.ResetTestRoot("blockchain_reactor_test")
   290  	defer os.RemoveAll(config.RootDir)
   291  
   292  	maxBlockHeight := int64(48)
   293  	genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
   294  
   295  	rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
   296  
   297  	require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
   298  
   299  	rts.start(t)
   300  
   301  	require.Eventually(
   302  		t,
   303  		func() bool {
   304  			caughtUp := true
   305  			for _, id := range rts.nodes[1 : len(rts.nodes)-1] {
   306  				if rts.reactors[id].pool.MaxPeerHeight() == 0 || !rts.reactors[id].pool.IsCaughtUp() {
   307  					caughtUp = false
   308  				}
   309  			}
   310  
   311  			return caughtUp
   312  		},
   313  		10*time.Minute,
   314  		10*time.Millisecond,
   315  		"expected all nodes to be fully synced",
   316  	)
   317  
   318  	for _, id := range rts.nodes[:len(rts.nodes)-1] {
   319  		require.Len(t, rts.reactors[id].pool.peers, 3)
   320  	}
   321  
   322  	// Mark testSuites[3] as an invalid peer which will cause newSuite to disconnect
   323  	// from this peer.
   324  	//
   325  	// XXX: This causes a potential race condition.
   326  	// See: https://github.com/number571/tendermint/issues/6005
   327  	otherGenDoc, otherPrivVals := factory.RandGenesisDoc(config, 1, false, 30)
   328  	newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{
   329  		MaxPeers:     uint16(len(rts.nodes) + 1),
   330  		MaxConnected: uint16(len(rts.nodes) + 1),
   331  	})
   332  	rts.addNode(t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight)
   333  
   334  	// add a fake peer just so we do not wait for the consensus ticker to timeout
   335  	rts.reactors[newNode.NodeID].pool.SetPeerRange("00ff", 10, 10)
   336  
   337  	// wait for the new peer to catch up and become fully synced
   338  	require.Eventually(
   339  		t,
   340  		func() bool {
   341  			return rts.reactors[newNode.NodeID].pool.MaxPeerHeight() > 0 && rts.reactors[newNode.NodeID].pool.IsCaughtUp()
   342  		},
   343  		10*time.Minute,
   344  		10*time.Millisecond,
   345  		"expected new node to be fully synced",
   346  	)
   347  
   348  	require.Eventuallyf(
   349  		t,
   350  		func() bool { return len(rts.reactors[newNode.NodeID].pool.peers) < len(rts.nodes)-1 },
   351  		10*time.Minute,
   352  		10*time.Millisecond,
   353  		"invalid number of peers; expected < %d, got: %d",
   354  		len(rts.nodes)-1,
   355  		len(rts.reactors[newNode.NodeID].pool.peers),
   356  	)
   357  }