github.com/ari-anchor/sei-tendermint@v0.0.0-20230519144642-dc826b7b56bb/internal/consensus/replay_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"math/rand"
    10  	"os"
    11  	"runtime"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/fortytw2/leaktest"
    16  	"github.com/gogo/protobuf/proto"
    17  	"github.com/stretchr/testify/assert"
    18  	"github.com/stretchr/testify/require"
    19  	dbm "github.com/tendermint/tm-db"
    20  
    21  	abciclient "github.com/ari-anchor/sei-tendermint/abci/client"
    22  	"github.com/ari-anchor/sei-tendermint/abci/example/kvstore"
    23  	abci "github.com/ari-anchor/sei-tendermint/abci/types"
    24  	"github.com/ari-anchor/sei-tendermint/config"
    25  	"github.com/ari-anchor/sei-tendermint/crypto"
    26  	"github.com/ari-anchor/sei-tendermint/crypto/encoding"
    27  	"github.com/ari-anchor/sei-tendermint/internal/eventbus"
    28  	"github.com/ari-anchor/sei-tendermint/internal/mempool"
    29  	"github.com/ari-anchor/sei-tendermint/internal/proxy"
    30  	"github.com/ari-anchor/sei-tendermint/internal/pubsub"
    31  	sm "github.com/ari-anchor/sei-tendermint/internal/state"
    32  	sf "github.com/ari-anchor/sei-tendermint/internal/state/test/factory"
    33  	"github.com/ari-anchor/sei-tendermint/internal/store"
    34  	"github.com/ari-anchor/sei-tendermint/internal/test/factory"
    35  	"github.com/ari-anchor/sei-tendermint/libs/log"
    36  	tmrand "github.com/ari-anchor/sei-tendermint/libs/rand"
    37  	"github.com/ari-anchor/sei-tendermint/privval"
    38  	tmproto "github.com/ari-anchor/sei-tendermint/proto/tendermint/types"
    39  	"github.com/ari-anchor/sei-tendermint/types"
    40  )
    41  
    42  // These tests ensure we can always recover from failure at any part of the consensus process.
    43  // There are two general failure scenarios: failure during consensus, and failure while applying the block.
    44  // Only the latter interacts with the app and store,
    45  // but the former has to deal with restrictions on re-use of priv_validator keys.
    46  // The `WAL Tests` are for failures during the consensus;
    47  // the `Handshake Tests` are for failures in applying the block.
    48  // With the help of the WAL, we can recover from it all!
    49  
    50  //------------------------------------------------------------------------------------------
    51  // WAL Tests
    52  
    53  // TODO: It would be better to verify explicitly which states we can recover from without the wal
    54  // and which ones we need the wal for - then we'd also be able to only flush the
    55  // wal writer when we need to, instead of with every message.
    56  
    57  func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config,
    58  	lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
    59  	logger := log.NewNopLogger()
    60  	state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
    61  	require.NoError(t, err)
    62  	privValidator := loadPrivValidator(t, consensusReplayConfig)
    63  	blockStore := store.NewBlockStore(dbm.NewMemDB())
    64  	cs := newStateWithConfigAndBlockStore(
    65  		ctx,
    66  		t,
    67  		logger,
    68  		consensusReplayConfig,
    69  		state,
    70  		privValidator,
    71  		kvstore.NewApplication(),
    72  		blockStore,
    73  	)
    74  
    75  	bytes, err := os.ReadFile(cs.config.WalFile())
    76  	require.NoError(t, err)
    77  	require.NotNil(t, bytes)
    78  
    79  	require.NoError(t, cs.Start(ctx))
    80  	defer func() {
    81  		cs.Stop()
    82  	}()
    83  	t.Cleanup(cs.Wait)
    84  	// This is just a signal that we haven't halted; its not something contained
    85  	// in the WAL itself. Assuming the consensus state is running, replay of any
    86  	// WAL, including the empty one, should eventually be followed by a new
    87  	// block, or else something is wrong.
    88  	newBlockSub, err := cs.eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{
    89  		ClientID: testSubscriber,
    90  		Query:    types.EventQueryNewBlock,
    91  	})
    92  	require.NoError(t, err)
    93  	ctxto, cancel := context.WithTimeout(ctx, 120*time.Second)
    94  	defer cancel()
    95  	_, err = newBlockSub.Next(ctxto)
    96  	if errors.Is(err, context.DeadlineExceeded) {
    97  		t.Fatal("Timed out waiting for new block (see trace above)")
    98  	} else if err != nil {
    99  		t.Fatal("newBlockSub was canceled")
   100  	}
   101  }
   102  
   103  func sendTxs(ctx context.Context, t *testing.T, cs *State) {
   104  	t.Helper()
   105  	for i := 0; i < 256; i++ {
   106  		select {
   107  		case <-ctx.Done():
   108  			return
   109  		default:
   110  			tx := []byte{byte(i)}
   111  
   112  			require.NoError(t, assertMempool(t, cs.txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}))
   113  
   114  			i++
   115  		}
   116  	}
   117  }
   118  
   119  // TestWALCrash uses crashing WAL to test we can recover from any WAL failure.
   120  func TestWALCrash(t *testing.T) {
   121  	testCases := []struct {
   122  		name         string
   123  		initFn       func(dbm.DB, *State, context.Context)
   124  		heightToStop int64
   125  	}{
   126  		{"empty block",
   127  			func(stateDB dbm.DB, cs *State, ctx context.Context) {},
   128  			1},
   129  		{"many non-empty blocks",
   130  			func(stateDB dbm.DB, cs *State, ctx context.Context) {
   131  				go sendTxs(ctx, t, cs)
   132  			},
   133  			3},
   134  	}
   135  
   136  	for _, tc := range testCases {
   137  		tc := tc
   138  		t.Run(tc.name, func(t *testing.T) {
   139  			ctx, cancel := context.WithCancel(context.Background())
   140  			defer cancel()
   141  
   142  			consensusReplayConfig, err := ResetConfig(t.TempDir(), tc.name)
   143  			require.NoError(t, err)
   144  			crashWALandCheckLiveness(ctx, t, consensusReplayConfig, tc.initFn, tc.heightToStop)
   145  		})
   146  	}
   147  }
   148  
   149  func crashWALandCheckLiveness(rctx context.Context, t *testing.T, consensusReplayConfig *config.Config,
   150  	initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
   151  	walPanicked := make(chan error)
   152  	crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
   153  
   154  	i := 1
   155  LOOP:
   156  	for {
   157  		// create consensus state from a clean slate
   158  		logger := log.NewNopLogger()
   159  		blockDB := dbm.NewMemDB()
   160  		stateDB := dbm.NewMemDB()
   161  		stateStore := sm.NewStore(stateDB)
   162  		blockStore := store.NewBlockStore(blockDB)
   163  		state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
   164  		require.NoError(t, err)
   165  		privValidator := loadPrivValidator(t, consensusReplayConfig)
   166  		cs := newStateWithConfigAndBlockStore(
   167  			rctx,
   168  			t,
   169  			logger,
   170  			consensusReplayConfig,
   171  			state,
   172  			privValidator,
   173  			kvstore.NewApplication(),
   174  			blockStore,
   175  		)
   176  
   177  		// start sending transactions
   178  		ctx, cancel := context.WithCancel(rctx)
   179  		initFn(stateDB, cs, ctx)
   180  
   181  		// clean up WAL file from the previous iteration
   182  		walFile := cs.config.WalFile()
   183  		os.Remove(walFile)
   184  
   185  		// set crashing WAL
   186  		csWal, err := cs.OpenWAL(ctx, walFile)
   187  		require.NoError(t, err)
   188  		crashingWal.next = csWal
   189  
   190  		// reset the message counter
   191  		crashingWal.msgIndex = 1
   192  		cs.wal = crashingWal
   193  
   194  		// start consensus state
   195  		err = cs.Start(ctx)
   196  		require.NoError(t, err)
   197  
   198  		i++
   199  
   200  		select {
   201  		case <-rctx.Done():
   202  			t.Fatal("context canceled before test completed")
   203  		case err := <-walPanicked:
   204  			// make sure we can make blocks after a crash
   205  			startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.roundState.Height(), blockDB, stateStore)
   206  
   207  			// stop consensus state and transactions sender (initFn)
   208  			cs.Stop()
   209  			cancel()
   210  
   211  			// if we reached the required height, exit
   212  			if _, ok := err.(ReachedHeightToStopError); ok {
   213  				break LOOP
   214  			}
   215  		case <-time.After(10 * time.Second):
   216  			t.Fatal("WAL did not panic for 10 seconds (check the log)")
   217  		}
   218  	}
   219  }
   220  
   221  // crashingWAL is a WAL which crashes or rather simulates a crash during Save
   222  // (before and after). It remembers a message for which we last panicked
   223  // (lastPanickedForMsgIndex), so we don't panic for it in subsequent iterations.
   224  type crashingWAL struct {
   225  	next         WAL
   226  	panicCh      chan error
   227  	heightToStop int64
   228  
   229  	msgIndex                int // current message index
   230  	lastPanickedForMsgIndex int // last message for which we panicked
   231  }
   232  
   233  var _ WAL = &crashingWAL{}
   234  
   235  // WALWriteError indicates a WAL crash.
   236  type WALWriteError struct {
   237  	msg string
   238  }
   239  
   240  func (e WALWriteError) Error() string {
   241  	return e.msg
   242  }
   243  
   244  // ReachedHeightToStopError indicates we've reached the required consensus
   245  // height and may exit.
   246  type ReachedHeightToStopError struct {
   247  	height int64
   248  }
   249  
   250  func (e ReachedHeightToStopError) Error() string {
   251  	return fmt.Sprintf("reached height to stop %d", e.height)
   252  }
   253  
   254  // Write simulate WAL's crashing by sending an error to the panicCh and then
   255  // exiting the cs.receiveRoutine.
   256  func (w *crashingWAL) Write(m WALMessage) error {
   257  	if endMsg, ok := m.(EndHeightMessage); ok {
   258  		if endMsg.Height == w.heightToStop {
   259  			w.panicCh <- ReachedHeightToStopError{endMsg.Height}
   260  			runtime.Goexit()
   261  			return nil
   262  		}
   263  
   264  		return w.next.Write(m)
   265  	}
   266  
   267  	if w.msgIndex > w.lastPanickedForMsgIndex {
   268  		w.lastPanickedForMsgIndex = w.msgIndex
   269  		_, file, line, _ := runtime.Caller(1)
   270  		w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)}
   271  		runtime.Goexit()
   272  		return nil
   273  	}
   274  
   275  	w.msgIndex++
   276  	return w.next.Write(m)
   277  }
   278  
   279  func (w *crashingWAL) WriteSync(m WALMessage) error {
   280  	return w.Write(m)
   281  }
   282  
   283  func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() }
   284  
   285  func (w *crashingWAL) SearchForEndHeight(
   286  	height int64,
   287  	options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
   288  	return w.next.SearchForEndHeight(height, options)
   289  }
   290  
   291  func (w *crashingWAL) Start(ctx context.Context) error { return w.next.Start(ctx) }
   292  func (w *crashingWAL) Stop()                           { w.next.Stop() }
   293  func (w *crashingWAL) Wait()                           { w.next.Wait() }
   294  
   295  // ------------------------------------------------------------------------------------------
   296  type simulatorTestSuite struct {
   297  	GenesisState sm.State
   298  	Config       *config.Config
   299  	Chain        []*types.Block
   300  	ExtCommits   []*types.ExtendedCommit
   301  	CleanupFunc  cleanupFunc
   302  
   303  	Mempool mempool.Mempool
   304  	Evpool  sm.EvidencePool
   305  }
   306  
   307  const (
   308  	numBlocks = 6
   309  )
   310  
   311  //---------------------------------------
   312  // Test handshake/replay
   313  
   314  // 0 - all synced up
   315  // 1 - saved block but app and state are behind
   316  // 2 - save block and committed but state is behind
   317  // 3 - save block and committed with truncated block store and state behind
   318  var modes = []uint{0, 1, 2, 3}
   319  
   320  // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay
   321  func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
   322  	t.Helper()
   323  	cfg := configSetup(t)
   324  
   325  	sim := &simulatorTestSuite{
   326  		Mempool: emptyMempool{},
   327  		Evpool:  sm.EmptyEvidencePool{},
   328  	}
   329  
   330  	nPeers := 7
   331  	nVals := 4
   332  
   333  	css, genDoc, cfg, cleanup := randConsensusNetWithPeers(
   334  		ctx,
   335  		t,
   336  		cfg,
   337  		nVals,
   338  		nPeers,
   339  		"replay_test",
   340  		newMockTickerFunc(true),
   341  		newEpehemeralKVStore)
   342  	sim.Config = cfg
   343  	defer func() { t.Cleanup(cleanup) }()
   344  
   345  	var err error
   346  	sim.GenesisState, err = sm.MakeGenesisState(genDoc)
   347  	require.NoError(t, err)
   348  
   349  	partSize := types.BlockPartSizeBytes
   350  
   351  	newRoundCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryNewRound)
   352  	proposalCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryCompleteProposal)
   353  
   354  	vss := make([]*validatorStub, nPeers)
   355  	for i := 0; i < nPeers; i++ {
   356  		vss[i] = newValidatorStub(css[i].privValidator, int32(i))
   357  	}
   358  	height, round := css[0].roundState.Height(), css[0].roundState.Round()
   359  
   360  	// start the machine
   361  	startTestRound(ctx, css[0], height, round)
   362  	incrementHeight(vss...)
   363  	ensureNewRound(t, newRoundCh, height, 0)
   364  	ensureNewProposal(t, proposalCh, height, round)
   365  	rs := css[0].GetRoundState()
   366  
   367  	signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
   368  		types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
   369  		vss[1:nVals]...)
   370  
   371  	ensureNewRound(t, newRoundCh, height+1, 0)
   372  
   373  	// HEIGHT 2
   374  	height++
   375  	incrementHeight(vss...)
   376  	newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx)
   377  	require.NoError(t, err)
   378  	valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1)
   379  	require.NoError(t, err)
   380  	newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
   381  	err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx1, nil, mempool.TxInfo{})
   382  	assert.NoError(t, err)
   383  	propBlock, err := css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2)
   384  	require.NoError(t, err)
   385  	propBlockParts, err := propBlock.MakePartSet(partSize)
   386  	require.NoError(t, err)
   387  	blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
   388  
   389  	pubKey, err := vss[1].PrivValidator.GetPubKey(ctx)
   390  	require.NoError(t, err)
   391  	proposal := types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time, propBlock.GetTxKeys(), propBlock.Header, propBlock.LastCommit, propBlock.Evidence, pubKey.Address())
   392  	p := proposal.ToProto()
   393  	if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil {
   394  		t.Fatal("failed to sign bad proposal", err)
   395  	}
   396  	proposal.Signature = p.Signature
   397  
   398  	// set the proposal block
   399  	if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
   400  		t.Fatal(err)
   401  	}
   402  	ensureNewProposal(t, proposalCh, height, round)
   403  	rs = css[0].GetRoundState()
   404  	signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
   405  		types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
   406  		vss[1:nVals]...)
   407  	ensureNewRound(t, newRoundCh, height+1, 0)
   408  
   409  	// HEIGHT 3
   410  	height++
   411  	incrementHeight(vss...)
   412  	updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx)
   413  	require.NoError(t, err)
   414  	updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1)
   415  	require.NoError(t, err)
   416  	updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
   417  	err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, updateValidatorTx1, nil, mempool.TxInfo{})
   418  	assert.NoError(t, err)
   419  	propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2)
   420  	require.NoError(t, err)
   421  	propBlockParts, err = propBlock.MakePartSet(partSize)
   422  	require.NoError(t, err)
   423  	blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
   424  	pubKey, err = vss[2].PrivValidator.GetPubKey(ctx)
   425  	require.NoError(t, err)
   426  	proposal = types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time, propBlock.GetTxKeys(), propBlock.Header, propBlock.LastCommit, propBlock.Evidence, pubKey.Address())
   427  	p = proposal.ToProto()
   428  	if err := vss[2].SignProposal(ctx, cfg.ChainID(), p); err != nil {
   429  		t.Fatal("failed to sign bad proposal", err)
   430  	}
   431  	proposal.Signature = p.Signature
   432  
   433  	// set the proposal block
   434  	if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
   435  		t.Fatal(err)
   436  	}
   437  	ensureNewProposal(t, proposalCh, height, round)
   438  	rs = css[0].GetRoundState()
   439  	signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
   440  		types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
   441  		vss[1:nVals]...)
   442  	ensureNewRound(t, newRoundCh, height+1, 0)
   443  
   444  	// HEIGHT 4
   445  	height++
   446  	incrementHeight(vss...)
   447  	newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(ctx)
   448  	require.NoError(t, err)
   449  	newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2)
   450  	require.NoError(t, err)
   451  	newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
   452  	err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx2, nil, mempool.TxInfo{})
   453  	assert.NoError(t, err)
   454  	newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(ctx)
   455  	require.NoError(t, err)
   456  	newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3)
   457  	require.NoError(t, err)
   458  	newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
   459  	err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx3, nil, mempool.TxInfo{})
   460  	assert.NoError(t, err)
   461  	propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2)
   462  	require.NoError(t, err)
   463  	propBlockParts, err = propBlock.MakePartSet(partSize)
   464  	require.NoError(t, err)
   465  	blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
   466  	newVss := make([]*validatorStub, nVals+1)
   467  	copy(newVss, vss[:nVals+1])
   468  	newVss = sortVValidatorStubsByPower(ctx, t, newVss)
   469  
   470  	valIndexFn := func(cssIdx int) int {
   471  		for i, vs := range newVss {
   472  			vsPubKey, err := vs.GetPubKey(ctx)
   473  			require.NoError(t, err)
   474  
   475  			cssPubKey, err := css[cssIdx].privValidator.GetPubKey(ctx)
   476  			require.NoError(t, err)
   477  
   478  			if vsPubKey.Equals(cssPubKey) {
   479  				return i
   480  			}
   481  		}
   482  		t.Fatalf("validator css[%d] not found in newVss", cssIdx)
   483  		return -1
   484  	}
   485  
   486  	selfIndex := valIndexFn(0)
   487  	require.NotEqual(t, -1, selfIndex)
   488  	pubKey, err = vss[3].PrivValidator.GetPubKey(ctx)
   489  	require.NoError(t, err)
   490  	proposal = types.NewProposal(vss[3].Height, round, -1, blockID, propBlock.Header.Time, propBlock.GetTxKeys(), propBlock.Header, propBlock.LastCommit, propBlock.Evidence, pubKey.Address())
   491  	p = proposal.ToProto()
   492  	if err := vss[3].SignProposal(ctx, cfg.ChainID(), p); err != nil {
   493  		t.Fatal("failed to sign bad proposal", err)
   494  	}
   495  	proposal.Signature = p.Signature
   496  
   497  	// set the proposal block
   498  	if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
   499  		t.Fatal(err)
   500  	}
   501  	ensureNewProposal(t, proposalCh, height, round)
   502  
   503  	removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
   504  	err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, removeValidatorTx2, nil, mempool.TxInfo{})
   505  	assert.NoError(t, err)
   506  
   507  	rs = css[0].GetRoundState()
   508  	for i := 0; i < nVals+1; i++ {
   509  		if i == selfIndex {
   510  			continue
   511  		}
   512  		signAddVotes(ctx, t, css[0],
   513  			tmproto.PrecommitType, sim.Config.ChainID(),
   514  			types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
   515  			newVss[i])
   516  	}
   517  	ensureNewRound(t, newRoundCh, height+1, 0)
   518  
   519  	// HEIGHT 5
   520  	height++
   521  	incrementHeight(vss...)
   522  	// Reflect the changes to vss[nVals] at height 3 and resort newVss.
   523  	newVssIdx := valIndexFn(nVals)
   524  	require.NotEqual(t, -1, newVssIdx)
   525  
   526  	newVss[newVssIdx].VotingPower = 25
   527  	newVss = sortVValidatorStubsByPower(ctx, t, newVss)
   528  
   529  	selfIndex = valIndexFn(0)
   530  	require.NotEqual(t, -1, selfIndex)
   531  	ensureNewProposal(t, proposalCh, height, round)
   532  	rs = css[0].GetRoundState()
   533  	for i := 0; i < nVals+1; i++ {
   534  		if i == selfIndex {
   535  			continue
   536  		}
   537  		signAddVotes(ctx, t, css[0],
   538  			tmproto.PrecommitType, sim.Config.ChainID(),
   539  			types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
   540  			newVss[i])
   541  	}
   542  	ensureNewRound(t, newRoundCh, height+1, 0)
   543  
   544  	// HEIGHT 6
   545  	height++
   546  	incrementHeight(vss...)
   547  	removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
   548  	err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, removeValidatorTx3, nil, mempool.TxInfo{})
   549  	assert.NoError(t, err)
   550  	propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2)
   551  	require.NoError(t, err)
   552  	propBlockParts, err = propBlock.MakePartSet(partSize)
   553  	require.NoError(t, err)
   554  	blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
   555  	newVss = make([]*validatorStub, nVals+3)
   556  	copy(newVss, vss[:nVals+3])
   557  	newVss = sortVValidatorStubsByPower(ctx, t, newVss)
   558  
   559  	selfIndex = valIndexFn(0)
   560  	require.NotEqual(t, -1, selfIndex)
   561  	pubKey, err = vss[1].PrivValidator.GetPubKey(ctx)
   562  	require.NoError(t, err)
   563  	proposal = types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time, propBlock.GetTxKeys(), propBlock.Header, propBlock.LastCommit, propBlock.Evidence, pubKey.Address())
   564  	p = proposal.ToProto()
   565  	if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil {
   566  		t.Fatal("failed to sign bad proposal", err)
   567  	}
   568  	proposal.Signature = p.Signature
   569  
   570  	// set the proposal block
   571  	if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
   572  		t.Fatal(err)
   573  	}
   574  	ensureNewProposal(t, proposalCh, height, round)
   575  	rs = css[0].GetRoundState()
   576  	for i := 0; i < nVals+3; i++ {
   577  		if i == selfIndex {
   578  			continue
   579  		}
   580  		signAddVotes(ctx, t, css[0],
   581  			tmproto.PrecommitType, sim.Config.ChainID(),
   582  			types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
   583  			newVss[i])
   584  	}
   585  	ensureNewRound(t, newRoundCh, height+1, 0)
   586  
   587  	sim.Chain = []*types.Block{}
   588  	sim.ExtCommits = []*types.ExtendedCommit{}
   589  	for i := 1; i <= numBlocks; i++ {
   590  		sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i)))
   591  		sim.ExtCommits = append(sim.ExtCommits, css[0].blockStore.LoadBlockExtendedCommit(int64(i)))
   592  	}
   593  
   594  	return sim
   595  }
   596  
   597  // Sync from scratch
   598  func TestHandshakeReplayAll(t *testing.T) {
   599  	ctx, cancel := context.WithCancel(context.Background())
   600  	defer cancel()
   601  
   602  	sim := setupSimulator(ctx, t)
   603  
   604  	t.Cleanup(leaktest.Check(t))
   605  
   606  	for _, m := range modes {
   607  		testHandshakeReplay(ctx, t, sim, 0, m, false)
   608  	}
   609  	for _, m := range modes {
   610  		testHandshakeReplay(ctx, t, sim, 0, m, true)
   611  	}
   612  }
   613  
   614  // Sync many, not from scratch
   615  func TestHandshakeReplaySome(t *testing.T) {
   616  	ctx, cancel := context.WithCancel(context.Background())
   617  	defer cancel()
   618  
   619  	sim := setupSimulator(ctx, t)
   620  
   621  	t.Cleanup(leaktest.Check(t))
   622  
   623  	for _, m := range modes {
   624  		testHandshakeReplay(ctx, t, sim, 2, m, false)
   625  	}
   626  	for _, m := range modes {
   627  		testHandshakeReplay(ctx, t, sim, 2, m, true)
   628  	}
   629  }
   630  
   631  // Sync from lagging by one
   632  func TestHandshakeReplayOne(t *testing.T) {
   633  	ctx, cancel := context.WithCancel(context.Background())
   634  	defer cancel()
   635  
   636  	sim := setupSimulator(ctx, t)
   637  
   638  	for _, m := range modes {
   639  		testHandshakeReplay(ctx, t, sim, numBlocks-1, m, false)
   640  	}
   641  	for _, m := range modes {
   642  		testHandshakeReplay(ctx, t, sim, numBlocks-1, m, true)
   643  	}
   644  }
   645  
   646  // Sync from caught up
   647  func TestHandshakeReplayNone(t *testing.T) {
   648  	ctx, cancel := context.WithCancel(context.Background())
   649  	defer cancel()
   650  
   651  	sim := setupSimulator(ctx, t)
   652  
   653  	t.Cleanup(leaktest.Check(t))
   654  
   655  	for _, m := range modes {
   656  		testHandshakeReplay(ctx, t, sim, numBlocks, m, false)
   657  	}
   658  	for _, m := range modes {
   659  		testHandshakeReplay(ctx, t, sim, numBlocks, m, true)
   660  	}
   661  }
   662  
   663  func tempWALWithData(t *testing.T, data []byte) string {
   664  	t.Helper()
   665  
   666  	walFile, err := os.CreateTemp(t.TempDir(), "wal")
   667  	require.NoError(t, err, "failed to create temp WAL file")
   668  	t.Cleanup(func() { _ = os.RemoveAll(walFile.Name()) })
   669  
   670  	_, err = walFile.Write(data)
   671  	require.NoError(t, err, "failed to  write to temp WAL file")
   672  
   673  	require.NoError(t, walFile.Close(), "failed to close temp WAL file")
   674  	return walFile.Name()
   675  }
   676  
   677  // Make some blocks. Start a fresh app and apply nBlocks blocks.
   678  // Then restart the app and sync it up with the remaining blocks
   679  func testHandshakeReplay(
   680  	rctx context.Context,
   681  	t *testing.T,
   682  	sim *simulatorTestSuite,
   683  	nBlocks int,
   684  	mode uint,
   685  	testValidatorsChange bool,
   686  ) {
   687  	var chain []*types.Block
   688  	var extCommits []*types.ExtendedCommit
   689  	var store *mockBlockStore
   690  	var stateDB dbm.DB
   691  	var genesisState sm.State
   692  
   693  	ctx, cancel := context.WithCancel(rctx)
   694  	t.Cleanup(cancel)
   695  
   696  	cfg := sim.Config
   697  
   698  	logger := log.NewNopLogger()
   699  	if testValidatorsChange {
   700  		testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_m", t.Name(), mode))
   701  		require.NoError(t, err)
   702  		defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
   703  		stateDB = dbm.NewMemDB()
   704  
   705  		genesisState = sim.GenesisState
   706  		cfg = sim.Config
   707  		chain = append([]*types.Block{}, sim.Chain...) // copy chain
   708  		extCommits = sim.ExtCommits
   709  		store = newMockBlockStore(t, cfg, genesisState.ConsensusParams)
   710  	} else { // test single node
   711  		testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_s", t.Name(), mode))
   712  		require.NoError(t, err)
   713  		defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
   714  		walBody, err := WALWithNBlocks(ctx, t, logger, numBlocks)
   715  		require.NoError(t, err)
   716  		walFile := tempWALWithData(t, walBody)
   717  		cfg.Consensus.SetWalFile(walFile)
   718  
   719  		privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
   720  		require.NoError(t, err)
   721  
   722  		wal, err := NewWAL(ctx, logger, walFile)
   723  		require.NoError(t, err)
   724  		err = wal.Start(ctx)
   725  		require.NoError(t, err)
   726  		t.Cleanup(func() { cancel(); wal.Wait() })
   727  		chain, extCommits = makeBlockchainFromWAL(t, wal)
   728  		pubKey, err := privVal.GetPubKey(ctx)
   729  		require.NoError(t, err)
   730  		stateDB, genesisState, store = stateAndStore(t, cfg, pubKey, kvstore.ProtocolVersion)
   731  
   732  	}
   733  	stateStore := sm.NewStore(stateDB)
   734  	store.chain = chain
   735  	store.extCommits = extCommits
   736  
   737  	state := genesisState.Copy()
   738  	// run the chain through state.ApplyBlock to build up the tendermint state
   739  	state = buildTMStateFromChain(
   740  		ctx,
   741  		t,
   742  		cfg,
   743  		logger,
   744  		sim.Mempool,
   745  		sim.Evpool,
   746  		stateStore,
   747  		state,
   748  		chain,
   749  		nBlocks,
   750  		mode,
   751  		store,
   752  	)
   753  	latestAppHash := state.AppHash
   754  
   755  	eventBus := eventbus.NewDefault(logger)
   756  	require.NoError(t, eventBus.Start(ctx))
   757  
   758  	client := abciclient.NewLocalClient(logger, kvstore.NewApplication())
   759  	if nBlocks > 0 {
   760  		// run nBlocks against a new client to build up the app state.
   761  		// use a throwaway tendermint state
   762  		proxyApp := proxy.New(client, logger, proxy.NopMetrics())
   763  		stateDB1 := dbm.NewMemDB()
   764  		stateStore := sm.NewStore(stateDB1)
   765  		err := stateStore.Save(genesisState)
   766  		require.NoError(t, err)
   767  		buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, eventBus, nBlocks, mode, store)
   768  	}
   769  
   770  	// Prune block store if requested
   771  	expectError := false
   772  	if mode == 3 {
   773  		pruned, err := store.PruneBlocks(2)
   774  		require.NoError(t, err)
   775  		require.EqualValues(t, 1, pruned)
   776  		expectError = int64(nBlocks) < 2
   777  	}
   778  
   779  	// now start the app using the handshake - it should sync
   780  	genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
   781  	require.NoError(t, err)
   782  	handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
   783  	proxyApp := proxy.New(client, logger, proxy.NopMetrics())
   784  	require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
   785  	require.True(t, proxyApp.IsRunning())
   786  	require.NotNil(t, proxyApp)
   787  	t.Cleanup(func() { cancel(); proxyApp.Wait() })
   788  
   789  	err = handshaker.Handshake(ctx, proxyApp)
   790  	if expectError {
   791  		require.Error(t, err)
   792  		return
   793  	}
   794  	require.NoError(t, err, "Error on abci handshake")
   795  
   796  	// get the latest app hash from the app
   797  	res, err := proxyApp.Info(ctx, &abci.RequestInfo{Version: ""})
   798  	if err != nil {
   799  		t.Fatal(err)
   800  	}
   801  
   802  	// the app hash should be synced up
   803  	if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
   804  		t.Fatalf(
   805  			"Expected app hashes to match after handshake/replay. got %X, expected %X",
   806  			res.LastBlockAppHash,
   807  			latestAppHash)
   808  	}
   809  
   810  	expectedBlocksToSync := numBlocks - nBlocks
   811  	if nBlocks == numBlocks && mode > 0 {
   812  		expectedBlocksToSync++
   813  	} else if nBlocks > 0 && mode == 1 {
   814  		expectedBlocksToSync++
   815  	}
   816  
   817  	if handshaker.NBlocks() != expectedBlocksToSync {
   818  		t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks())
   819  	}
   820  }
   821  
   822  func applyBlock(
   823  	ctx context.Context,
   824  	t *testing.T,
   825  	stateStore sm.Store,
   826  	mempool mempool.Mempool,
   827  	evpool sm.EvidencePool,
   828  	st sm.State,
   829  	blk *types.Block,
   830  	appClient abciclient.Client,
   831  	blockStore *mockBlockStore,
   832  	eventBus *eventbus.EventBus,
   833  ) sm.State {
   834  	testPartSize := types.BlockPartSizeBytes
   835  	blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), appClient, mempool, evpool, blockStore, eventBus, sm.NopMetrics())
   836  
   837  	bps, err := blk.MakePartSet(testPartSize)
   838  	require.NoError(t, err)
   839  	blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: bps.Header()}
   840  	newState, err := blockExec.ApplyBlock(ctx, st, blkID, blk, nil)
   841  	require.NoError(t, err)
   842  	return newState
   843  }
   844  
   845  func buildAppStateFromChain(
   846  	ctx context.Context,
   847  	t *testing.T,
   848  	appClient abciclient.Client,
   849  	stateStore sm.Store,
   850  	mempool mempool.Mempool,
   851  	evpool sm.EvidencePool,
   852  	state sm.State,
   853  	chain []*types.Block,
   854  	eventBus *eventbus.EventBus,
   855  	nBlocks int,
   856  	mode uint,
   857  	blockStore *mockBlockStore,
   858  ) {
   859  	t.Helper()
   860  	// start a new app without handshake, play nBlocks blocks
   861  	require.NoError(t, appClient.Start(ctx))
   862  
   863  	state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
   864  	validators := types.TM2PB.ValidatorUpdates(state.Validators)
   865  	_, err := appClient.InitChain(ctx, &abci.RequestInitChain{
   866  		Validators: validators,
   867  	})
   868  	require.NoError(t, err)
   869  
   870  	require.NoError(t, stateStore.Save(state)) // save height 1's validatorsInfo
   871  
   872  	switch mode {
   873  	case 0:
   874  		for i := 0; i < nBlocks; i++ {
   875  			block := chain[i]
   876  			state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus)
   877  		}
   878  	case 1, 2, 3:
   879  		for i := 0; i < nBlocks-1; i++ {
   880  			block := chain[i]
   881  			state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, appClient, blockStore, eventBus)
   882  		}
   883  
   884  		if mode == 2 || mode == 3 {
   885  			// update the kvstore height and apphash
   886  			// as if we ran commit but not
   887  			state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], appClient, blockStore, eventBus)
   888  		}
   889  	default:
   890  		require.Fail(t, "unknown mode %v", mode)
   891  	}
   892  
   893  }
   894  
   895  func buildTMStateFromChain(
   896  	ctx context.Context,
   897  	t *testing.T,
   898  	cfg *config.Config,
   899  	logger log.Logger,
   900  	mempool mempool.Mempool,
   901  	evpool sm.EvidencePool,
   902  	stateStore sm.Store,
   903  	state sm.State,
   904  	chain []*types.Block,
   905  	nBlocks int,
   906  	mode uint,
   907  	blockStore *mockBlockStore,
   908  ) sm.State {
   909  	t.Helper()
   910  
   911  	// run the whole chain against this client to build up the tendermint state
   912  	client := abciclient.NewLocalClient(logger, kvstore.NewApplication())
   913  
   914  	proxyApp := proxy.New(client, logger, proxy.NopMetrics())
   915  	require.NoError(t, proxyApp.Start(ctx))
   916  
   917  	state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
   918  	validators := types.TM2PB.ValidatorUpdates(state.Validators)
   919  	_, err := proxyApp.InitChain(ctx, &abci.RequestInitChain{
   920  		Validators: validators,
   921  	})
   922  	require.NoError(t, err)
   923  
   924  	require.NoError(t, stateStore.Save(state))
   925  
   926  	eventBus := eventbus.NewDefault(logger)
   927  	require.NoError(t, eventBus.Start(ctx))
   928  
   929  	switch mode {
   930  	case 0:
   931  		// sync right up
   932  		for _, block := range chain {
   933  			state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus)
   934  		}
   935  
   936  	case 1, 2, 3:
   937  		// sync up to the penultimate as if we stored the block.
   938  		// whether we commit or not depends on the appHash
   939  		for _, block := range chain[:len(chain)-1] {
   940  			state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore, eventBus)
   941  		}
   942  
   943  		// apply the final block to a state copy so we can
   944  		// get the right next appHash but keep the state back
   945  		applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore, eventBus)
   946  	default:
   947  		require.Fail(t, "unknown mode %v", mode)
   948  	}
   949  
   950  	return state
   951  }
   952  
   953  func TestHandshakeErrorsIfAppReturnsWrongAppHash(t *testing.T) {
   954  	// 1. Initialize tendermint and commit 3 blocks with the following app hashes:
   955  	//		- 0x01
   956  	//		- 0x02
   957  	//		- 0x03
   958  
   959  	ctx, cancel := context.WithCancel(context.Background())
   960  	defer cancel()
   961  
   962  	cfg, err := ResetConfig(t.TempDir(), "handshake_test_")
   963  	require.NoError(t, err)
   964  	t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })
   965  	privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
   966  	require.NoError(t, err)
   967  	const appVersion = 0x0
   968  	pubKey, err := privVal.GetPubKey(ctx)
   969  	require.NoError(t, err)
   970  	stateDB, state, store := stateAndStore(t, cfg, pubKey, appVersion)
   971  	stateStore := sm.NewStore(stateDB)
   972  	genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
   973  	require.NoError(t, err)
   974  	state.LastValidators = state.Validators.Copy()
   975  	// mode = 0 for committing all the blocks
   976  	blocks := sf.MakeBlocks(ctx, t, 3, &state, privVal)
   977  
   978  	store.chain = blocks
   979  
   980  	logger := log.NewNopLogger()
   981  
   982  	eventBus := eventbus.NewDefault(logger)
   983  	require.NoError(t, eventBus.Start(ctx))
   984  
   985  	// 2. Tendermint must panic if app returns wrong hash for the first block
   986  	//		- RANDOM HASH
   987  	//		- 0x02
   988  	//		- 0x03
   989  	{
   990  		app := &badApp{numBlocks: 3, allHashesAreWrong: true}
   991  		client := abciclient.NewLocalClient(logger, app)
   992  		proxyApp := proxy.New(client, logger, proxy.NopMetrics())
   993  		err := proxyApp.Start(ctx)
   994  		require.NoError(t, err)
   995  		t.Cleanup(func() { cancel(); proxyApp.Wait() })
   996  
   997  		h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
   998  		assert.Error(t, h.Handshake(ctx, proxyApp))
   999  	}
  1000  
  1001  	// 3. Tendermint must panic if app returns wrong hash for the last block
  1002  	//		- 0x01
  1003  	//		- 0x02
  1004  	//		- RANDOM HASH
  1005  	{
  1006  		app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true}
  1007  		client := abciclient.NewLocalClient(logger, app)
  1008  		proxyApp := proxy.New(client, logger, proxy.NopMetrics())
  1009  		err := proxyApp.Start(ctx)
  1010  		require.NoError(t, err)
  1011  		t.Cleanup(func() { cancel(); proxyApp.Wait() })
  1012  
  1013  		h := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
  1014  		require.Error(t, h.Handshake(ctx, proxyApp))
  1015  	}
  1016  }
  1017  
  1018  type badApp struct {
  1019  	abci.BaseApplication
  1020  	numBlocks           byte
  1021  	height              byte
  1022  	allHashesAreWrong   bool
  1023  	onlyLastHashIsWrong bool
  1024  }
  1025  
  1026  func (app *badApp) FinalizeBlock(_ context.Context, _ *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) {
  1027  	app.height++
  1028  	if app.onlyLastHashIsWrong {
  1029  		if app.height == app.numBlocks {
  1030  			return &abci.ResponseFinalizeBlock{AppHash: tmrand.Bytes(8)}, nil
  1031  		}
  1032  		return &abci.ResponseFinalizeBlock{AppHash: []byte{app.height}}, nil
  1033  	} else if app.allHashesAreWrong {
  1034  		return &abci.ResponseFinalizeBlock{AppHash: tmrand.Bytes(8)}, nil
  1035  	}
  1036  
  1037  	panic("either allHashesAreWrong or onlyLastHashIsWrong must be set")
  1038  }
  1039  
  1040  //--------------------------
  1041  // utils for making blocks
  1042  
  1043  func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.ExtendedCommit) {
  1044  	t.Helper()
  1045  	var height int64
  1046  
  1047  	// Search for height marker
  1048  	gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{})
  1049  	require.NoError(t, err)
  1050  	require.True(t, found, "wal does not contain height %d", height)
  1051  	defer gr.Close()
  1052  
  1053  	// log.Notice("Build a blockchain by reading from the WAL")
  1054  
  1055  	var (
  1056  		blocks             []*types.Block
  1057  		extCommits         []*types.ExtendedCommit
  1058  		thisBlockParts     *types.PartSet
  1059  		thisBlockExtCommit *types.ExtendedCommit
  1060  	)
  1061  
  1062  	dec := NewWALDecoder(gr)
  1063  	for {
  1064  		msg, err := dec.Decode()
  1065  		if err == io.EOF {
  1066  			break
  1067  		}
  1068  		require.NoError(t, err)
  1069  
  1070  		piece := readPieceFromWAL(msg)
  1071  		if piece == nil {
  1072  			continue
  1073  		}
  1074  
  1075  		switch p := piece.(type) {
  1076  		case EndHeightMessage:
  1077  			// if its not the first one, we have a full block
  1078  			if thisBlockParts != nil {
  1079  				var pbb = new(tmproto.Block)
  1080  				bz, err := io.ReadAll(thisBlockParts.GetReader())
  1081  				require.NoError(t, err)
  1082  
  1083  				require.NoError(t, proto.Unmarshal(bz, pbb))
  1084  
  1085  				block, err := types.BlockFromProto(pbb)
  1086  				require.NoError(t, err)
  1087  
  1088  				require.Equal(t, block.Height, height+1,
  1089  					"read bad block from wal. got height %d, expected %d", block.Height, height+1)
  1090  
  1091  				commitHeight := thisBlockExtCommit.Height
  1092  				require.Equal(t, commitHeight, height+1,
  1093  					"commit doesnt match. got height %d, expected %d", commitHeight, height+1)
  1094  
  1095  				blocks = append(blocks, block)
  1096  				extCommits = append(extCommits, thisBlockExtCommit)
  1097  				height++
  1098  			}
  1099  		case *types.PartSetHeader:
  1100  			thisBlockParts = types.NewPartSetFromHeader(*p)
  1101  		case *types.Part:
  1102  			_, err := thisBlockParts.AddPart(p)
  1103  			require.NoError(t, err)
  1104  		case *types.Vote:
  1105  			if p.Type == tmproto.PrecommitType {
  1106  				thisBlockExtCommit = &types.ExtendedCommit{
  1107  					Height:             p.Height,
  1108  					Round:              p.Round,
  1109  					BlockID:            p.BlockID,
  1110  					ExtendedSignatures: []types.ExtendedCommitSig{p.ExtendedCommitSig()},
  1111  				}
  1112  			}
  1113  		}
  1114  	}
  1115  	// grab the last block too
  1116  	bz, err := io.ReadAll(thisBlockParts.GetReader())
  1117  	require.NoError(t, err)
  1118  
  1119  	var pbb = new(tmproto.Block)
  1120  	require.NoError(t, proto.Unmarshal(bz, pbb))
  1121  
  1122  	block, err := types.BlockFromProto(pbb)
  1123  	require.NoError(t, err)
  1124  
  1125  	require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1)
  1126  	commitHeight := thisBlockExtCommit.Height
  1127  	require.Equal(t, commitHeight, height+1, "commit does not match. got height %d, expected %d", commitHeight, height+1)
  1128  
  1129  	blocks = append(blocks, block)
  1130  	extCommits = append(extCommits, thisBlockExtCommit)
  1131  	return blocks, extCommits
  1132  }
  1133  
  1134  func readPieceFromWAL(msg *TimedWALMessage) interface{} {
  1135  	// for logging
  1136  	switch m := msg.Msg.(type) {
  1137  	case msgInfo:
  1138  		switch msg := m.Msg.(type) {
  1139  		case *ProposalMessage:
  1140  			return &msg.Proposal.BlockID.PartSetHeader
  1141  		case *BlockPartMessage:
  1142  			return msg.Part
  1143  		case *VoteMessage:
  1144  			return msg.Vote
  1145  		}
  1146  	case EndHeightMessage:
  1147  		return m
  1148  	}
  1149  
  1150  	return nil
  1151  }
  1152  
  1153  // fresh state and mock store
  1154  func stateAndStore(
  1155  	t *testing.T,
  1156  	cfg *config.Config,
  1157  	pubKey crypto.PubKey,
  1158  	appVersion uint64,
  1159  ) (dbm.DB, sm.State, *mockBlockStore) {
  1160  	stateDB := dbm.NewMemDB()
  1161  	stateStore := sm.NewStore(stateDB)
  1162  	state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
  1163  	require.NoError(t, err)
  1164  	state.Version.Consensus.App = appVersion
  1165  	store := newMockBlockStore(t, cfg, state.ConsensusParams)
  1166  	require.NoError(t, stateStore.Save(state))
  1167  
  1168  	return stateDB, state, store
  1169  }
  1170  
  1171  //----------------------------------
  1172  // mock block store
  1173  
  1174  type mockBlockStore struct {
  1175  	cfg        *config.Config
  1176  	params     types.ConsensusParams
  1177  	chain      []*types.Block
  1178  	extCommits []*types.ExtendedCommit
  1179  	base       int64
  1180  	t          *testing.T
  1181  }
  1182  
  1183  var _ sm.BlockStore = &mockBlockStore{}
  1184  
  1185  // TODO: NewBlockStore(db.NewMemDB) ...
  1186  func newMockBlockStore(t *testing.T, cfg *config.Config, params types.ConsensusParams) *mockBlockStore {
  1187  	return &mockBlockStore{
  1188  		cfg:    cfg,
  1189  		params: params,
  1190  		t:      t,
  1191  	}
  1192  }
  1193  
  1194  func (bs *mockBlockStore) Height() int64                       { return int64(len(bs.chain)) }
  1195  func (bs *mockBlockStore) Base() int64                         { return bs.base }
  1196  func (bs *mockBlockStore) Size() int64                         { return bs.Height() - bs.Base() + 1 }
  1197  func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta      { return bs.LoadBlockMeta(bs.base) }
  1198  func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] }
  1199  func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
  1200  	return bs.chain[int64(len(bs.chain))-1]
  1201  }
  1202  func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil }
  1203  func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
  1204  	block := bs.chain[height-1]
  1205  	bps, err := block.MakePartSet(types.BlockPartSizeBytes)
  1206  	require.NoError(bs.t, err)
  1207  	return &types.BlockMeta{
  1208  		BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()},
  1209  		Header:  block.Header,
  1210  	}
  1211  }
  1212  func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
  1213  func (bs *mockBlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) {
  1214  }
  1215  func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
  1216  }
  1217  
  1218  func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
  1219  	return bs.extCommits[height-1].ToCommit()
  1220  }
  1221  
  1222  func (bs *mockBlockStore) LoadSeenCommit() *types.Commit {
  1223  	return bs.extCommits[len(bs.extCommits)-1].ToCommit()
  1224  }
  1225  
  1226  func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit {
  1227  	return bs.extCommits[height-1]
  1228  }
  1229  
  1230  func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {
  1231  	pruned := uint64(0)
  1232  	for i := int64(0); i < height-1; i++ {
  1233  		bs.chain[i] = nil
  1234  		bs.extCommits[i] = nil
  1235  		pruned++
  1236  	}
  1237  	bs.base = height
  1238  	return pruned, nil
  1239  }
  1240  
  1241  func (bs *mockBlockStore) DeleteLatestBlock() error { return nil }
  1242  
  1243  //---------------------------------------
  1244  // Test handshake/init chain
  1245  
  1246  func TestHandshakeUpdatesValidators(t *testing.T) {
  1247  	ctx, cancel := context.WithCancel(context.Background())
  1248  	defer cancel()
  1249  
  1250  	logger := log.NewNopLogger()
  1251  	votePower := 10 + int64(rand.Uint32())
  1252  	val, _, err := factory.Validator(ctx, votePower)
  1253  	require.NoError(t, err)
  1254  	vals := types.NewValidatorSet([]*types.Validator{val})
  1255  	app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
  1256  	client := abciclient.NewLocalClient(logger, app)
  1257  
  1258  	eventBus := eventbus.NewDefault(logger)
  1259  	require.NoError(t, eventBus.Start(ctx))
  1260  
  1261  	cfg, err := ResetConfig(t.TempDir(), "handshake_test_")
  1262  	require.NoError(t, err)
  1263  	t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) })
  1264  
  1265  	privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
  1266  	require.NoError(t, err)
  1267  	pubKey, err := privVal.GetPubKey(ctx)
  1268  	require.NoError(t, err)
  1269  	stateDB, state, store := stateAndStore(t, cfg, pubKey, 0x0)
  1270  	stateStore := sm.NewStore(stateDB)
  1271  
  1272  	oldValAddr := state.Validators.Validators[0].Address
  1273  
  1274  	// now start the app using the handshake - it should sync
  1275  	genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
  1276  	require.NoError(t, err)
  1277  
  1278  	handshaker := NewHandshaker(logger, stateStore, state, store, eventBus, genDoc)
  1279  	proxyApp := proxy.New(client, logger, proxy.NopMetrics())
  1280  	require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
  1281  
  1282  	require.NoError(t, handshaker.Handshake(ctx, proxyApp), "error on abci handshake")
  1283  
  1284  	// reload the state, check the validator set was updated
  1285  	state, err = stateStore.Load()
  1286  	require.NoError(t, err)
  1287  
  1288  	newValAddr := state.Validators.Validators[0].Address
  1289  	expectValAddr := val.Address
  1290  	assert.NotEqual(t, oldValAddr, newValAddr)
  1291  	assert.Equal(t, newValAddr, expectValAddr)
  1292  }
  1293  
  1294  // returns the vals on InitChain
  1295  type initChainApp struct {
  1296  	abci.BaseApplication
  1297  	vals []abci.ValidatorUpdate
  1298  }
  1299  
  1300  func (ica *initChainApp) InitChain(_ context.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
  1301  	return &abci.ResponseInitChain{Validators: ica.vals}, nil
  1302  }