github.com/adoriasoft/tendermint@v0.34.0-dev1.0.20200722151356-96d84601a75a/statesync/syncer_test.go (about)

     1  package statesync
     2  
     3  import (
     4  	"errors"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/stretchr/testify/assert"
     9  	"github.com/stretchr/testify/mock"
    10  	"github.com/stretchr/testify/require"
    11  
    12  	abci "github.com/tendermint/tendermint/abci/types"
    13  	"github.com/tendermint/tendermint/libs/log"
    14  	tmsync "github.com/tendermint/tendermint/libs/sync"
    15  	"github.com/tendermint/tendermint/p2p"
    16  	p2pmocks "github.com/tendermint/tendermint/p2p/mocks"
    17  	tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
    18  	ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
    19  	tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
    20  	"github.com/tendermint/tendermint/proxy"
    21  	proxymocks "github.com/tendermint/tendermint/proxy/mocks"
    22  	sm "github.com/tendermint/tendermint/state"
    23  	"github.com/tendermint/tendermint/statesync/mocks"
    24  	"github.com/tendermint/tendermint/types"
    25  	"github.com/tendermint/tendermint/version"
    26  )
    27  
    28  // Sets up a basic syncer that can be used to test OfferSnapshot requests
    29  func setupOfferSyncer(t *testing.T) (*syncer, *proxymocks.AppConnSnapshot) {
    30  	connQuery := &proxymocks.AppConnQuery{}
    31  	connSnapshot := &proxymocks.AppConnSnapshot{}
    32  	stateProvider := &mocks.StateProvider{}
    33  	stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil)
    34  	syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "")
    35  	return syncer, connSnapshot
    36  }
    37  
    38  // Sets up a simple peer mock with an ID
    39  func simplePeer(id string) *p2pmocks.Peer {
    40  	peer := &p2pmocks.Peer{}
    41  	peer.On("ID").Return(p2p.ID(id))
    42  	return peer
    43  }
    44  
    45  func TestSyncer_SyncAny(t *testing.T) {
    46  	state := sm.State{
    47  		ChainID: "chain",
    48  		Version: tmstate.Version{
    49  			Consensus: tmversion.Consensus{
    50  				Block: version.BlockProtocol,
    51  				App:   0,
    52  			},
    53  
    54  			Software: version.TMCoreSemVer,
    55  		},
    56  
    57  		LastBlockHeight: 1,
    58  		LastBlockID:     types.BlockID{Hash: []byte("blockhash")},
    59  		LastBlockTime:   time.Now(),
    60  		LastResultsHash: []byte("last_results_hash"),
    61  		AppHash:         []byte("app_hash"),
    62  
    63  		LastValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val1")}},
    64  		Validators:     &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val2")}},
    65  		NextValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val3")}},
    66  
    67  		ConsensusParams:                  *types.DefaultConsensusParams(),
    68  		LastHeightConsensusParamsChanged: 1,
    69  	}
    70  	commit := &types.Commit{BlockID: types.BlockID{Hash: []byte("blockhash")}}
    71  
    72  	chunks := []*chunk{
    73  		{Height: 1, Format: 1, Index: 0, Chunk: []byte{1, 1, 0}},
    74  		{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 1, 1}},
    75  		{Height: 1, Format: 1, Index: 2, Chunk: []byte{1, 1, 2}},
    76  	}
    77  	s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
    78  
    79  	stateProvider := &mocks.StateProvider{}
    80  	stateProvider.On("AppHash", uint64(1)).Return(state.AppHash, nil)
    81  	stateProvider.On("AppHash", uint64(2)).Return([]byte("app_hash_2"), nil)
    82  	stateProvider.On("Commit", uint64(1)).Return(commit, nil)
    83  	stateProvider.On("State", uint64(1)).Return(state, nil)
    84  	connSnapshot := &proxymocks.AppConnSnapshot{}
    85  	connQuery := &proxymocks.AppConnQuery{}
    86  
    87  	syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "")
    88  
    89  	// Adding a chunk should error when no sync is in progress
    90  	_, err := syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}})
    91  	require.Error(t, err)
    92  
    93  	// Adding a couple of peers should trigger snapshot discovery messages
    94  	peerA := &p2pmocks.Peer{}
    95  	peerA.On("ID").Return(p2p.ID("a"))
    96  	peerA.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true)
    97  	syncer.AddPeer(peerA)
    98  	peerA.AssertExpectations(t)
    99  
   100  	peerB := &p2pmocks.Peer{}
   101  	peerB.On("ID").Return(p2p.ID("b"))
   102  	peerB.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true)
   103  	syncer.AddPeer(peerB)
   104  	peerB.AssertExpectations(t)
   105  
   106  	// Both peers report back with snapshots. One of them also returns a snapshot we don't want, in
   107  	// format 2, which will be rejected by the ABCI application.
   108  	new, err := syncer.AddSnapshot(peerA, s)
   109  	require.NoError(t, err)
   110  	assert.True(t, new)
   111  
   112  	new, err = syncer.AddSnapshot(peerB, s)
   113  	require.NoError(t, err)
   114  	assert.False(t, new)
   115  
   116  	new, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}})
   117  	require.NoError(t, err)
   118  	assert.True(t, new)
   119  
   120  	// We start a sync, with peers sending back chunks when requested. We first reject the snapshot
   121  	// with height 2 format 2, and accept the snapshot at height 1.
   122  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   123  		Snapshot: &abci.Snapshot{
   124  			Height: 2,
   125  			Format: 2,
   126  			Chunks: 3,
   127  			Hash:   []byte{1},
   128  		},
   129  		AppHash: []byte("app_hash_2"),
   130  	}).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil)
   131  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   132  		Snapshot: &abci.Snapshot{
   133  			Height:   s.Height,
   134  			Format:   s.Format,
   135  			Chunks:   s.Chunks,
   136  			Hash:     s.Hash,
   137  			Metadata: s.Metadata,
   138  		},
   139  		AppHash: []byte("app_hash"),
   140  	}).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil)
   141  
   142  	chunkRequests := make(map[uint32]int)
   143  	chunkRequestsMtx := tmsync.Mutex{}
   144  	onChunkRequest := func(args mock.Arguments) {
   145  		pb, err := decodeMsg(args[1].([]byte))
   146  		require.NoError(t, err)
   147  		msg := pb.(*ssproto.ChunkRequest)
   148  		require.EqualValues(t, 1, msg.Height)
   149  		require.EqualValues(t, 1, msg.Format)
   150  		require.LessOrEqual(t, msg.Index, uint32(len(chunks)))
   151  
   152  		added, err := syncer.AddChunk(chunks[msg.Index])
   153  		require.NoError(t, err)
   154  		assert.True(t, added)
   155  
   156  		chunkRequestsMtx.Lock()
   157  		chunkRequests[msg.Index]++
   158  		chunkRequestsMtx.Unlock()
   159  	}
   160  	peerA.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true)
   161  	peerB.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true)
   162  
   163  	// The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1,
   164  	// which should cause it to keep the existing chunk 0 and 2, and restart restoration from
   165  	// beginning. We also wait for a little while, to exercise the retry logic in fetchChunks().
   166  	connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   167  		Index: 2, Chunk: []byte{1, 1, 2},
   168  	}).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return(
   169  		&abci.ResponseApplySnapshotChunk{
   170  			Result:        abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT,
   171  			RefetchChunks: []uint32{1},
   172  		}, nil)
   173  
   174  	connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   175  		Index: 0, Chunk: []byte{1, 1, 0},
   176  	}).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   177  	connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   178  		Index: 1, Chunk: []byte{1, 1, 1},
   179  	}).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   180  	connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   181  		Index: 2, Chunk: []byte{1, 1, 2},
   182  	}).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   183  	connQuery.On("InfoSync", proxy.RequestInfo).Return(&abci.ResponseInfo{
   184  		AppVersion:       9,
   185  		LastBlockHeight:  1,
   186  		LastBlockAppHash: []byte("app_hash"),
   187  	}, nil)
   188  
   189  	newState, lastCommit, err := syncer.SyncAny(0)
   190  	require.NoError(t, err)
   191  
   192  	time.Sleep(50 * time.Millisecond) // wait for peers to receive requests
   193  
   194  	chunkRequestsMtx.Lock()
   195  	assert.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests)
   196  	chunkRequestsMtx.Unlock()
   197  
   198  	// The syncer should have updated the state app version from the ABCI info response.
   199  	expectState := state
   200  	expectState.Version.Consensus.App = 9
   201  
   202  	assert.Equal(t, expectState, newState)
   203  	assert.Equal(t, commit, lastCommit)
   204  
   205  	connSnapshot.AssertExpectations(t)
   206  	connQuery.AssertExpectations(t)
   207  	peerA.AssertExpectations(t)
   208  	peerB.AssertExpectations(t)
   209  }
   210  
   211  func TestSyncer_SyncAny_noSnapshots(t *testing.T) {
   212  	syncer, _ := setupOfferSyncer(t)
   213  	_, _, err := syncer.SyncAny(0)
   214  	assert.Equal(t, errNoSnapshots, err)
   215  }
   216  
   217  func TestSyncer_SyncAny_abort(t *testing.T) {
   218  	syncer, connSnapshot := setupOfferSyncer(t)
   219  
   220  	s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   221  	_, err := syncer.AddSnapshot(simplePeer("id"), s)
   222  	require.NoError(t, err)
   223  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   224  		Snapshot: toABCI(s), AppHash: []byte("app_hash"),
   225  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil)
   226  
   227  	_, _, err = syncer.SyncAny(0)
   228  	assert.Equal(t, errAbort, err)
   229  	connSnapshot.AssertExpectations(t)
   230  }
   231  
   232  func TestSyncer_SyncAny_reject(t *testing.T) {
   233  	syncer, connSnapshot := setupOfferSyncer(t)
   234  
   235  	// s22 is tried first, then s12, then s11, then errNoSnapshots
   236  	s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
   237  	s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
   238  	s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   239  	_, err := syncer.AddSnapshot(simplePeer("id"), s22)
   240  	require.NoError(t, err)
   241  	_, err = syncer.AddSnapshot(simplePeer("id"), s12)
   242  	require.NoError(t, err)
   243  	_, err = syncer.AddSnapshot(simplePeer("id"), s11)
   244  	require.NoError(t, err)
   245  
   246  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   247  		Snapshot: toABCI(s22), AppHash: []byte("app_hash"),
   248  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
   249  
   250  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   251  		Snapshot: toABCI(s12), AppHash: []byte("app_hash"),
   252  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
   253  
   254  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   255  		Snapshot: toABCI(s11), AppHash: []byte("app_hash"),
   256  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
   257  
   258  	_, _, err = syncer.SyncAny(0)
   259  	assert.Equal(t, errNoSnapshots, err)
   260  	connSnapshot.AssertExpectations(t)
   261  }
   262  
   263  func TestSyncer_SyncAny_reject_format(t *testing.T) {
   264  	syncer, connSnapshot := setupOfferSyncer(t)
   265  
   266  	// s22 is tried first, which reject s22 and s12, then s11 will abort.
   267  	s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
   268  	s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
   269  	s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   270  	_, err := syncer.AddSnapshot(simplePeer("id"), s22)
   271  	require.NoError(t, err)
   272  	_, err = syncer.AddSnapshot(simplePeer("id"), s12)
   273  	require.NoError(t, err)
   274  	_, err = syncer.AddSnapshot(simplePeer("id"), s11)
   275  	require.NoError(t, err)
   276  
   277  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   278  		Snapshot: toABCI(s22), AppHash: []byte("app_hash"),
   279  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil)
   280  
   281  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   282  		Snapshot: toABCI(s11), AppHash: []byte("app_hash"),
   283  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil)
   284  
   285  	_, _, err = syncer.SyncAny(0)
   286  	assert.Equal(t, errAbort, err)
   287  	connSnapshot.AssertExpectations(t)
   288  }
   289  
   290  func TestSyncer_SyncAny_reject_sender(t *testing.T) {
   291  	syncer, connSnapshot := setupOfferSyncer(t)
   292  
   293  	peerA := simplePeer("a")
   294  	peerB := simplePeer("b")
   295  	peerC := simplePeer("c")
   296  
   297  	// sbc will be offered first, which will be rejected with reject_sender, causing all snapshots
   298  	// submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and
   299  	// errNoSnapshots is returned.
   300  	sa := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   301  	sb := &snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   302  	sc := &snapshot{Height: 3, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   303  	sbc := &snapshot{Height: 4, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   304  	_, err := syncer.AddSnapshot(peerA, sa)
   305  	require.NoError(t, err)
   306  	_, err = syncer.AddSnapshot(peerB, sb)
   307  	require.NoError(t, err)
   308  	_, err = syncer.AddSnapshot(peerC, sc)
   309  	require.NoError(t, err)
   310  	_, err = syncer.AddSnapshot(peerB, sbc)
   311  	require.NoError(t, err)
   312  	_, err = syncer.AddSnapshot(peerC, sbc)
   313  	require.NoError(t, err)
   314  
   315  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   316  		Snapshot: toABCI(sbc), AppHash: []byte("app_hash"),
   317  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil)
   318  
   319  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   320  		Snapshot: toABCI(sa), AppHash: []byte("app_hash"),
   321  	}).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
   322  
   323  	_, _, err = syncer.SyncAny(0)
   324  	assert.Equal(t, errNoSnapshots, err)
   325  	connSnapshot.AssertExpectations(t)
   326  }
   327  
   328  func TestSyncer_SyncAny_abciError(t *testing.T) {
   329  	syncer, connSnapshot := setupOfferSyncer(t)
   330  
   331  	errBoom := errors.New("boom")
   332  	s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
   333  	_, err := syncer.AddSnapshot(simplePeer("id"), s)
   334  	require.NoError(t, err)
   335  	connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   336  		Snapshot: toABCI(s), AppHash: []byte("app_hash"),
   337  	}).Once().Return(nil, errBoom)
   338  
   339  	_, _, err = syncer.SyncAny(0)
   340  	assert.True(t, errors.Is(err, errBoom))
   341  	connSnapshot.AssertExpectations(t)
   342  }
   343  
   344  func TestSyncer_offerSnapshot(t *testing.T) {
   345  	unknownErr := errors.New("unknown error")
   346  	boom := errors.New("boom")
   347  
   348  	testcases := map[string]struct {
   349  		result    abci.ResponseOfferSnapshot_Result
   350  		err       error
   351  		expectErr error
   352  	}{
   353  		"accept":           {abci.ResponseOfferSnapshot_ACCEPT, nil, nil},
   354  		"abort":            {abci.ResponseOfferSnapshot_ABORT, nil, errAbort},
   355  		"reject":           {abci.ResponseOfferSnapshot_REJECT, nil, errRejectSnapshot},
   356  		"reject_format":    {abci.ResponseOfferSnapshot_REJECT_FORMAT, nil, errRejectFormat},
   357  		"reject_sender":    {abci.ResponseOfferSnapshot_REJECT_SENDER, nil, errRejectSender},
   358  		"unknown":          {abci.ResponseOfferSnapshot_UNKNOWN, nil, unknownErr},
   359  		"error":            {0, boom, boom},
   360  		"unknown non-zero": {9, nil, unknownErr},
   361  	}
   362  	for name, tc := range testcases {
   363  		tc := tc
   364  		t.Run(name, func(t *testing.T) {
   365  			syncer, connSnapshot := setupOfferSyncer(t)
   366  			s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")}
   367  			connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{
   368  				Snapshot: toABCI(s),
   369  				AppHash:  []byte("app_hash"),
   370  			}).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err)
   371  			err := syncer.offerSnapshot(s)
   372  			if tc.expectErr == unknownErr {
   373  				require.Error(t, err)
   374  			} else {
   375  				unwrapped := errors.Unwrap(err)
   376  				if unwrapped != nil {
   377  					err = unwrapped
   378  				}
   379  				assert.Equal(t, tc.expectErr, err)
   380  			}
   381  		})
   382  	}
   383  }
   384  
   385  func TestSyncer_applyChunks_Results(t *testing.T) {
   386  	unknownErr := errors.New("unknown error")
   387  	boom := errors.New("boom")
   388  
   389  	testcases := map[string]struct {
   390  		result    abci.ResponseApplySnapshotChunk_Result
   391  		err       error
   392  		expectErr error
   393  	}{
   394  		"accept":           {abci.ResponseApplySnapshotChunk_ACCEPT, nil, nil},
   395  		"abort":            {abci.ResponseApplySnapshotChunk_ABORT, nil, errAbort},
   396  		"retry":            {abci.ResponseApplySnapshotChunk_RETRY, nil, nil},
   397  		"retry_snapshot":   {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, nil, errRetrySnapshot},
   398  		"reject_snapshot":  {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT, nil, errRejectSnapshot},
   399  		"unknown":          {abci.ResponseApplySnapshotChunk_UNKNOWN, nil, unknownErr},
   400  		"error":            {0, boom, boom},
   401  		"unknown non-zero": {9, nil, unknownErr},
   402  	}
   403  	for name, tc := range testcases {
   404  		tc := tc
   405  		t.Run(name, func(t *testing.T) {
   406  			connQuery := &proxymocks.AppConnQuery{}
   407  			connSnapshot := &proxymocks.AppConnSnapshot{}
   408  			stateProvider := &mocks.StateProvider{}
   409  			stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil)
   410  			syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "")
   411  
   412  			body := []byte{1, 2, 3}
   413  			chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "")
   414  			require.NoError(t, err)
   415  			_, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body})
   416  			require.NoError(t, err)
   417  
   418  			connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   419  				Index: 0, Chunk: body,
   420  			}).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err)
   421  			if tc.result == abci.ResponseApplySnapshotChunk_RETRY {
   422  				connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   423  					Index: 0, Chunk: body,
   424  				}).Once().Return(&abci.ResponseApplySnapshotChunk{
   425  					Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   426  			}
   427  
   428  			err = syncer.applyChunks(chunks)
   429  			if tc.expectErr == unknownErr {
   430  				require.Error(t, err)
   431  			} else {
   432  				unwrapped := errors.Unwrap(err)
   433  				if unwrapped != nil {
   434  					err = unwrapped
   435  				}
   436  				assert.Equal(t, tc.expectErr, err)
   437  			}
   438  			connSnapshot.AssertExpectations(t)
   439  		})
   440  	}
   441  }
   442  
   443  func TestSyncer_applyChunks_RefetchChunks(t *testing.T) {
   444  	// Discarding chunks via refetch_chunks should work the same for all results
   445  	testcases := map[string]struct {
   446  		result abci.ResponseApplySnapshotChunk_Result
   447  	}{
   448  		"accept":          {abci.ResponseApplySnapshotChunk_ACCEPT},
   449  		"abort":           {abci.ResponseApplySnapshotChunk_ABORT},
   450  		"retry":           {abci.ResponseApplySnapshotChunk_RETRY},
   451  		"retry_snapshot":  {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT},
   452  		"reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT},
   453  	}
   454  	for name, tc := range testcases {
   455  		tc := tc
   456  		t.Run(name, func(t *testing.T) {
   457  			connQuery := &proxymocks.AppConnQuery{}
   458  			connSnapshot := &proxymocks.AppConnSnapshot{}
   459  			stateProvider := &mocks.StateProvider{}
   460  			stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil)
   461  			syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "")
   462  
   463  			chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "")
   464  			require.NoError(t, err)
   465  			added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}})
   466  			require.True(t, added)
   467  			require.NoError(t, err)
   468  			added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}})
   469  			require.True(t, added)
   470  			require.NoError(t, err)
   471  			added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}})
   472  			require.True(t, added)
   473  			require.NoError(t, err)
   474  
   475  			// The first two chunks are accepted, before the last one asks for 1 to be refetched
   476  			connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   477  				Index: 0, Chunk: []byte{0},
   478  			}).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   479  			connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   480  				Index: 1, Chunk: []byte{1},
   481  			}).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   482  			connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   483  				Index: 2, Chunk: []byte{2},
   484  			}).Once().Return(&abci.ResponseApplySnapshotChunk{
   485  				Result:        tc.result,
   486  				RefetchChunks: []uint32{1},
   487  			}, nil)
   488  
   489  			// Since removing the chunk will cause Next() to block, we spawn a goroutine, then
   490  			// check the queue contents, and finally close the queue to end the goroutine.
   491  			// We don't really care about the result of applyChunks, since it has separate test.
   492  			go func() {
   493  				syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error
   494  			}()
   495  
   496  			time.Sleep(50 * time.Millisecond)
   497  			assert.True(t, chunks.Has(0))
   498  			assert.False(t, chunks.Has(1))
   499  			assert.True(t, chunks.Has(2))
   500  			err = chunks.Close()
   501  			require.NoError(t, err)
   502  		})
   503  	}
   504  }
   505  
   506  func TestSyncer_applyChunks_RejectSenders(t *testing.T) {
   507  	// Banning chunks senders via ban_chunk_senders should work the same for all results
   508  	testcases := map[string]struct {
   509  		result abci.ResponseApplySnapshotChunk_Result
   510  	}{
   511  		"accept":          {abci.ResponseApplySnapshotChunk_ACCEPT},
   512  		"abort":           {abci.ResponseApplySnapshotChunk_ABORT},
   513  		"retry":           {abci.ResponseApplySnapshotChunk_RETRY},
   514  		"retry_snapshot":  {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT},
   515  		"reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT},
   516  	}
   517  	for name, tc := range testcases {
   518  		tc := tc
   519  		t.Run(name, func(t *testing.T) {
   520  			connQuery := &proxymocks.AppConnQuery{}
   521  			connSnapshot := &proxymocks.AppConnSnapshot{}
   522  			stateProvider := &mocks.StateProvider{}
   523  			stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil)
   524  			syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "")
   525  
   526  			// Set up three peers across two snapshots, and ask for one of them to be banned.
   527  			// It should be banned from all snapshots.
   528  			peerA := simplePeer("a")
   529  			peerB := simplePeer("b")
   530  			peerC := simplePeer("c")
   531  
   532  			s1 := &snapshot{Height: 1, Format: 1, Chunks: 3}
   533  			s2 := &snapshot{Height: 2, Format: 1, Chunks: 3}
   534  			_, err := syncer.AddSnapshot(peerA, s1)
   535  			require.NoError(t, err)
   536  			_, err = syncer.AddSnapshot(peerA, s2)
   537  			require.NoError(t, err)
   538  			_, err = syncer.AddSnapshot(peerB, s1)
   539  			require.NoError(t, err)
   540  			_, err = syncer.AddSnapshot(peerB, s2)
   541  			require.NoError(t, err)
   542  			_, err = syncer.AddSnapshot(peerC, s1)
   543  			require.NoError(t, err)
   544  			_, err = syncer.AddSnapshot(peerC, s2)
   545  			require.NoError(t, err)
   546  
   547  			chunks, err := newChunkQueue(s1, "")
   548  			require.NoError(t, err)
   549  			added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerA.ID()})
   550  			require.True(t, added)
   551  			require.NoError(t, err)
   552  			added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}, Sender: peerB.ID()})
   553  			require.True(t, added)
   554  			require.NoError(t, err)
   555  			added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}, Sender: peerC.ID()})
   556  			require.True(t, added)
   557  			require.NoError(t, err)
   558  
   559  			// The first two chunks are accepted, before the last one asks for b sender to be rejected
   560  			connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   561  				Index: 0, Chunk: []byte{0}, Sender: "a",
   562  			}).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   563  			connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   564  				Index: 1, Chunk: []byte{1}, Sender: "b",
   565  			}).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   566  			connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   567  				Index: 2, Chunk: []byte{2}, Sender: "c",
   568  			}).Once().Return(&abci.ResponseApplySnapshotChunk{
   569  				Result:        tc.result,
   570  				RejectSenders: []string{string(peerB.ID())},
   571  			}, nil)
   572  
   573  			// On retry, the last chunk will be tried again, so we just accept it then.
   574  			if tc.result == abci.ResponseApplySnapshotChunk_RETRY {
   575  				connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{
   576  					Index: 2, Chunk: []byte{2}, Sender: "c",
   577  				}).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
   578  			}
   579  
   580  			// We don't really care about the result of applyChunks, since it has separate test.
   581  			// However, it will block on e.g. retry result, so we spawn a goroutine that will
   582  			// be shut down when the chunk queue closes.
   583  			go func() {
   584  				syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error
   585  			}()
   586  
   587  			time.Sleep(50 * time.Millisecond)
   588  
   589  			s1peers := syncer.snapshots.GetPeers(s1)
   590  			assert.Len(t, s1peers, 2)
   591  			assert.EqualValues(t, "a", s1peers[0].ID())
   592  			assert.EqualValues(t, "c", s1peers[1].ID())
   593  
   594  			syncer.snapshots.GetPeers(s1)
   595  			assert.Len(t, s1peers, 2)
   596  			assert.EqualValues(t, "a", s1peers[0].ID())
   597  			assert.EqualValues(t, "c", s1peers[1].ID())
   598  
   599  			err = chunks.Close()
   600  			require.NoError(t, err)
   601  		})
   602  	}
   603  }
   604  
   605  func TestSyncer_verifyApp(t *testing.T) {
   606  	boom := errors.New("boom")
   607  	s := &snapshot{Height: 3, Format: 1, Chunks: 5, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")}
   608  
   609  	testcases := map[string]struct {
   610  		response  *abci.ResponseInfo
   611  		err       error
   612  		expectErr error
   613  	}{
   614  		"verified": {&abci.ResponseInfo{
   615  			LastBlockHeight:  3,
   616  			LastBlockAppHash: []byte("app_hash"),
   617  			AppVersion:       9,
   618  		}, nil, nil},
   619  		"invalid height": {&abci.ResponseInfo{
   620  			LastBlockHeight:  5,
   621  			LastBlockAppHash: []byte("app_hash"),
   622  			AppVersion:       9,
   623  		}, nil, errVerifyFailed},
   624  		"invalid hash": {&abci.ResponseInfo{
   625  			LastBlockHeight:  3,
   626  			LastBlockAppHash: []byte("xxx"),
   627  			AppVersion:       9,
   628  		}, nil, errVerifyFailed},
   629  		"error": {nil, boom, boom},
   630  	}
   631  	for name, tc := range testcases {
   632  		tc := tc
   633  		t.Run(name, func(t *testing.T) {
   634  			connQuery := &proxymocks.AppConnQuery{}
   635  			connSnapshot := &proxymocks.AppConnSnapshot{}
   636  			stateProvider := &mocks.StateProvider{}
   637  			syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "")
   638  
   639  			connQuery.On("InfoSync", proxy.RequestInfo).Return(tc.response, tc.err)
   640  			version, err := syncer.verifyApp(s)
   641  			unwrapped := errors.Unwrap(err)
   642  			if unwrapped != nil {
   643  				err = unwrapped
   644  			}
   645  			assert.Equal(t, tc.expectErr, err)
   646  			if err == nil {
   647  				assert.Equal(t, tc.response.AppVersion, version)
   648  			}
   649  		})
   650  	}
   651  }
   652  
   653  func toABCI(s *snapshot) *abci.Snapshot {
   654  	return &abci.Snapshot{
   655  		Height:   s.Height,
   656  		Format:   s.Format,
   657  		Chunks:   s.Chunks,
   658  		Hash:     s.Hash,
   659  		Metadata: s.Metadata,
   660  	}
   661  }