github.com/m3db/m3@v1.5.0/src/dbnode/client/session_fetch_bulk_blocks_test.go (about)

     1  // Copyright (c) 2016 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package client
    22  
    23  import (
    24  	"bytes"
    25  	"fmt"
    26  	"io"
    27  	"math"
    28  	"sort"
    29  	"sync"
    30  	"sync/atomic"
    31  	"testing"
    32  	"time"
    33  
    34  	"github.com/m3db/m3/src/dbnode/digest"
    35  	"github.com/m3db/m3/src/dbnode/encoding"
    36  	"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
    37  	"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
    38  	"github.com/m3db/m3/src/dbnode/namespace"
    39  	"github.com/m3db/m3/src/dbnode/retention"
    40  	"github.com/m3db/m3/src/dbnode/storage/block"
    41  	"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
    42  	"github.com/m3db/m3/src/dbnode/topology"
    43  	"github.com/m3db/m3/src/dbnode/ts"
    44  	"github.com/m3db/m3/src/dbnode/x/xio"
    45  	"github.com/m3db/m3/src/x/checked"
    46  	"github.com/m3db/m3/src/x/context"
    47  	"github.com/m3db/m3/src/x/ident"
    48  	"github.com/m3db/m3/src/x/pool"
    49  	xretry "github.com/m3db/m3/src/x/retry"
    50  	"github.com/m3db/m3/src/x/serialize"
    51  	xsync "github.com/m3db/m3/src/x/sync"
    52  	xtime "github.com/m3db/m3/src/x/time"
    53  
    54  	"github.com/golang/mock/gomock"
    55  	"github.com/stretchr/testify/assert"
    56  	"github.com/stretchr/testify/require"
    57  )
    58  
    59  var (
    60  	blockSize = 2 * time.Hour
    61  	nsID      = ident.StringID("testNs1")
    62  
    63  	nsRetentionOpts = retention.NewOptions().SetBlockSize(blockSize).SetRetentionPeriod(48 * blockSize)
    64  
    65  	testTagDecodingPool = serialize.NewTagDecoderPool(
    66  		serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
    67  		pool.NewObjectPoolOptions().SetSize(1))
    68  
    69  	testTagEncodingPool = serialize.NewTagEncoderPool(
    70  		serialize.NewTagEncoderOptions(),
    71  		pool.NewObjectPoolOptions().SetSize(1))
    72  
    73  	testIDPool     = newSessionTestOptions().IdentifierPool()
    74  	fooID          = ident.StringID("foo")
    75  	fooTags        checked.Bytes
    76  	fooDecodedTags = ident.NewTags(ident.StringTag("aaa", "bbb"))
    77  	barID          = ident.StringID("bar")
    78  	bazID          = ident.StringID("baz")
    79  	testHost       = topology.NewHost("testhost", "testhost:9000")
    80  )
    81  
    82  func init() {
    83  	testTagDecodingPool.Init()
    84  	testTagEncodingPool.Init()
    85  	tagEncoder := testTagEncodingPool.Get()
    86  	err := tagEncoder.Encode(ident.NewTagsIterator(fooDecodedTags))
    87  	if err != nil {
    88  		panic(err)
    89  	}
    90  	var ok bool
    91  	fooTags, ok = tagEncoder.Data()
    92  	if !ok {
    93  		panic(fmt.Errorf("encode tags failed"))
    94  	}
    95  }
    96  
    97  func testsNsMetadata(t *testing.T) namespace.Metadata {
    98  	md, err := namespace.NewMetadata(nsID, namespace.NewOptions().SetRetentionOptions(nsRetentionOpts))
    99  	require.NoError(t, err)
   100  	return md
   101  }
   102  
   103  func newSessionTestMultiReaderIteratorPool() encoding.MultiReaderIteratorPool {
   104  	p := encoding.NewMultiReaderIteratorPool(nil)
   105  	p.Init(m3tsz.DefaultReaderIteratorAllocFn(encoding.NewOptions()))
   106  	return p
   107  }
   108  
   109  func newSessionTestAdminOptions() AdminOptions {
   110  	opts := newSessionTestOptions().(AdminOptions)
   111  	hostShardSets := sessionTestHostAndShards(sessionTestShardSet())
   112  	host := hostShardSets[0].Host()
   113  	return opts.
   114  		SetOrigin(host).
   115  		SetFetchSeriesBlocksBatchSize(2).
   116  		SetFetchSeriesBlocksMetadataBatchTimeout(time.Second).
   117  		SetFetchSeriesBlocksBatchTimeout(time.Second).
   118  		SetFetchSeriesBlocksBatchConcurrency(4)
   119  }
   120  
   121  func newResultTestOptions() result.Options {
   122  	opts := result.NewOptions()
   123  	encoderPool := encoding.NewEncoderPool(nil)
   124  	encoderPool.Init(encoding.NewNullEncoder)
   125  	return opts.SetDatabaseBlockOptions(opts.DatabaseBlockOptions().
   126  		SetEncoderPool(encoderPool))
   127  }
   128  
   129  func testPeers(v []peer) peers {
   130  	return peers{peers: v, majorityReplicas: topology.Majority(len(v))}
   131  }
   132  
   133  func newRoundRobinPickBestPeerFn() pickBestPeerFn {
   134  	calls := int32(0)
   135  	return func(
   136  		perPeerBlocksMetadata []receivedBlockMetadata,
   137  		peerQueues peerBlocksQueues,
   138  		resources pickBestPeerPooledResources,
   139  	) (int, pickBestPeerPooledResources) {
   140  		numCalled := atomic.AddInt32(&calls, 1)
   141  		callIdx := numCalled - 1
   142  		idx := callIdx % int32(len(perPeerBlocksMetadata))
   143  		return int(idx), resources
   144  	}
   145  }
   146  
   147  func newTestBlocks(start xtime.UnixNano) []testBlocks {
   148  	return []testBlocks{
   149  		{
   150  			id: fooID,
   151  			blocks: []testBlock{
   152  				{
   153  					start: start.Add(blockSize * 1),
   154  					segments: &testBlockSegments{merged: &testBlockSegment{
   155  						head: []byte{1, 2},
   156  						tail: []byte{3},
   157  					}},
   158  				},
   159  			},
   160  		},
   161  		{
   162  			id: barID,
   163  			blocks: []testBlock{
   164  				{
   165  					start: start.Add(blockSize * 2),
   166  					segments: &testBlockSegments{merged: &testBlockSegment{
   167  						head: []byte{4, 5},
   168  						tail: []byte{6},
   169  					}},
   170  				},
   171  			},
   172  		},
   173  		{
   174  			id: bazID,
   175  			blocks: []testBlock{
   176  				{
   177  					start: start.Add(blockSize * 3),
   178  					segments: &testBlockSegments{merged: &testBlockSegment{
   179  						head: []byte{7, 8},
   180  						tail: []byte{9},
   181  					}},
   182  				},
   183  			},
   184  		},
   185  	}
   186  }
   187  func TestFetchBootstrapBlocksAllPeersSucceedV2(t *testing.T) {
   188  	ctrl := gomock.NewController(t)
   189  	defer ctrl.Finish()
   190  
   191  	opts := newSessionTestAdminOptions()
   192  	s, err := newSession(opts)
   193  	require.NoError(t, err)
   194  	session := s.(*session)
   195  
   196  	mockHostQueues, mockClients := mockHostQueuesAndClientsForFetchBootstrapBlocks(ctrl, opts)
   197  	session.newHostQueueFn = mockHostQueues.newHostQueueFn()
   198  
   199  	// Don't drain the peer blocks queue, explicitly drain ourselves to
   200  	// avoid unpredictable batches being retrieved from peers
   201  	var (
   202  		qs      []*peerBlocksQueue
   203  		qsMutex sync.RWMutex
   204  	)
   205  	session.newPeerBlocksQueueFn = func(
   206  		peer peer,
   207  		maxQueueSize int,
   208  		_ time.Duration,
   209  		workers xsync.WorkerPool,
   210  		processFn processFn,
   211  	) *peerBlocksQueue {
   212  		qsMutex.Lock()
   213  		defer qsMutex.Unlock()
   214  		q := newPeerBlocksQueue(peer, maxQueueSize, 0, workers, processFn)
   215  		qs = append(qs, q)
   216  		return q
   217  	}
   218  
   219  	require.NoError(t, session.Open())
   220  
   221  	var (
   222  		batchSize = opts.FetchSeriesBlocksBatchSize()
   223  		//nolint: durationcheck
   224  		start  = xtime.Now().Truncate(blockSize).Add(blockSize * -(24 - 1))
   225  		blocks = newTestBlocks(start)
   226  	)
   227  
   228  	// Expect the fetch metadata calls
   229  	metadataResult := resultMetadataFromBlocks(blocks)
   230  	// Skip the first client which is the client for the origin
   231  	mockClients[1:].expectFetchMetadataAndReturn(metadataResult, opts)
   232  
   233  	// Expect the fetch blocks calls
   234  	participating := len(mockClients) - 1
   235  	blocksExpectedReqs, blocksResult := expectedReqsAndResultFromBlocks(t,
   236  		blocks, batchSize, participating,
   237  		func(_ ident.ID, blockIdx int) (clientIdx int) {
   238  			// Round robin to match the best peer selection algorithm
   239  			return blockIdx % participating
   240  		})
   241  	// Skip the first client which is the client for the origin
   242  	for i, client := range mockClients[1:] {
   243  		expectFetchBlocksAndReturn(client, blocksExpectedReqs[i], blocksResult[i])
   244  	}
   245  
   246  	// Make sure peer selection is round robin to match our expected
   247  	// peer fetch calls
   248  	session.pickBestPeerFn = newRoundRobinPickBestPeerFn()
   249  
   250  	// Fetch blocks
   251  	go func() {
   252  		// Trigger peer queues to drain explicitly when all work enqueued
   253  		for {
   254  			qsMutex.RLock()
   255  			assigned := 0
   256  			for _, q := range qs {
   257  				assigned += int(atomic.LoadUint64(&q.assigned))
   258  			}
   259  			qsMutex.RUnlock()
   260  			if assigned == len(blocks) {
   261  				qsMutex.Lock()
   262  				defer qsMutex.Unlock()
   263  				for _, q := range qs {
   264  					q.drain()
   265  				}
   266  				return
   267  			}
   268  			time.Sleep(10 * time.Millisecond)
   269  		}
   270  	}()
   271  	rangeStart := start
   272  	rangeEnd := start.Add(blockSize * (24 - 1))
   273  	bootstrapOpts := newResultTestOptions()
   274  	result, err := session.FetchBootstrapBlocksFromPeers(
   275  		testsNsMetadata(t), 0, rangeStart, rangeEnd, bootstrapOpts)
   276  	assert.NoError(t, err)
   277  	assert.NotNil(t, result)
   278  
   279  	// Assert result
   280  	assertFetchBootstrapBlocksResult(t, blocks, result)
   281  
   282  	assert.NoError(t, session.Close())
   283  }
   284  
   285  // TestFetchBootstrapBlocksDontRetryHostNotAvailableInRetrier was added as a regression test
   286  // to ensure that in the scenario where a peer is not available (hard down) but the others are
   287  // available the streamBlocksMetadataFromPeers does not wait for all of the exponential retries
   288  // to the downed host to fail before continuing. This is important because if the client waits for
   289  // all the retries to the downed host to complete it can block each metadata fetch for up to 30 seconds
   290  // due to the exponential backoff logic in the retrier.
   291  func TestFetchBootstrapBlocksDontRetryHostNotAvailableInRetrier(t *testing.T) {
   292  	ctrl := gomock.NewController(t)
   293  	defer ctrl.Finish()
   294  
   295  	opts := newSessionTestAdminOptions().
   296  		// Set bootstrap consistency level to unstrict majority because there are only 3 nodes in the
   297  		// cluster. The first one will not return data because it is the origin and the last node will
   298  		// return an error.
   299  		SetBootstrapConsistencyLevel(topology.ReadConsistencyLevelUnstrictMajority).
   300  		// Configure the stream blocks retrier such that if the short-circuit logic did not work the
   301  		// test would timeout.
   302  		SetStreamBlocksRetrier(xretry.NewRetrier(
   303  			xretry.NewOptions().
   304  				SetBackoffFactor(10).
   305  				SetMaxRetries(10).
   306  				SetInitialBackoff(30 * time.Second).
   307  				SetJitter(true),
   308  		)).
   309  		// Ensure that the batch size is configured such that all of the blocks could
   310  		// be retrieved from a single peer in a single request. This makes mocking
   311  		// expected calls and responses significantly easier since which batch call a
   312  		// given block will fall into is non-deterministic (depends on the result of
   313  		// concurrent execution).
   314  		SetFetchSeriesBlocksBatchSize(len(newTestBlocks(xtime.Now())))
   315  	s, err := newSession(opts)
   316  	require.NoError(t, err)
   317  	session := s.(*session)
   318  
   319  	var (
   320  		mockHostQueues MockHostQueues
   321  		mockClients    MockTChanNodes
   322  		hostShardSets  = sessionTestHostAndShards(sessionTestShardSet())
   323  	)
   324  	// Skip the last one because it is going to be manually configured to return an error.
   325  	for i := 0; i < len(hostShardSets)-1; i++ {
   326  		host := hostShardSets[i].Host()
   327  		hostQueue, client := defaultHostAndClientWithExpect(ctrl, host, opts)
   328  		mockHostQueues = append(mockHostQueues, hostQueue)
   329  
   330  		if i != 0 {
   331  			// Skip creating a client for the origin because it will never be called so we want
   332  			// to avoid setting up expectations for it.
   333  			mockClients = append(mockClients, client)
   334  		}
   335  	}
   336  
   337  	// Construct the last hostQueue with a connection pool that will error out.
   338  	host := hostShardSets[len(hostShardSets)-1].Host()
   339  	connectionPool := NewMockconnectionPool(ctrl)
   340  	connectionPool.EXPECT().
   341  		NextClient().
   342  		Return(nil, nil, errConnectionPoolHasNoConnections).
   343  		AnyTimes()
   344  	hostQueue := NewMockhostQueue(ctrl)
   345  	hostQueue.EXPECT().Open()
   346  	hostQueue.EXPECT().Host().Return(host).AnyTimes()
   347  	hostQueue.EXPECT().
   348  		ConnectionCount().
   349  		Return(opts.MinConnectionCount()).
   350  		Times(sessionTestShards)
   351  	hostQueue.EXPECT().
   352  		ConnectionPool().
   353  		Return(connectionPool).
   354  		AnyTimes()
   355  	hostQueue.EXPECT().
   356  		BorrowConnection(gomock.Any()).
   357  		Return(errConnectionPoolHasNoConnections).
   358  		AnyTimes()
   359  	hostQueue.EXPECT().Close()
   360  	mockHostQueues = append(mockHostQueues, hostQueue)
   361  
   362  	session.newHostQueueFn = mockHostQueues.newHostQueueFn()
   363  
   364  	// Don't drain the peer blocks queue, explicitly drain ourselves to
   365  	// avoid unpredictable batches being retrieved from peers
   366  	var (
   367  		qs      []*peerBlocksQueue
   368  		qsMutex sync.RWMutex
   369  	)
   370  	session.newPeerBlocksQueueFn = func(
   371  		peer peer,
   372  		maxQueueSize int,
   373  		_ time.Duration,
   374  		workers xsync.WorkerPool,
   375  		processFn processFn,
   376  	) *peerBlocksQueue {
   377  		qsMutex.Lock()
   378  		defer qsMutex.Unlock()
   379  		q := newPeerBlocksQueue(peer, maxQueueSize, 0, workers, processFn)
   380  		qs = append(qs, q)
   381  		return q
   382  	}
   383  	require.NoError(t, session.Open())
   384  
   385  	var (
   386  		batchSize = opts.FetchSeriesBlocksBatchSize()
   387  		//nolint: durationcheck
   388  		start  = xtime.Now().Truncate(blockSize).Add(blockSize * -(24 - 1))
   389  		blocks = newTestBlocks(start)
   390  
   391  		// Expect the fetch metadata calls.
   392  		metadataResult = resultMetadataFromBlocks(blocks)
   393  	)
   394  	mockClients.expectFetchMetadataAndReturn(metadataResult, opts)
   395  
   396  	// Expect the fetch blocks calls.
   397  	participating := len(mockClients)
   398  	blocksExpectedReqs, blocksResult := expectedReqsAndResultFromBlocks(t,
   399  		blocks, batchSize, participating,
   400  		func(id ident.ID, blockIdx int) (clientIdx int) {
   401  			// Only one host to pull data from.
   402  			return 0
   403  		})
   404  	// Skip the first client which is the client for the origin.
   405  	for i, client := range mockClients {
   406  		expectFetchBlocksAndReturn(client, blocksExpectedReqs[i], blocksResult[i])
   407  	}
   408  
   409  	// Make sure peer selection is round robin to match our expected
   410  	// peer fetch calls.
   411  	session.pickBestPeerFn = func(
   412  		perPeerBlocksMetadata []receivedBlockMetadata,
   413  		peerQueues peerBlocksQueues,
   414  		resources pickBestPeerPooledResources,
   415  	) (int, pickBestPeerPooledResources) {
   416  		// Only one host to pull data from.
   417  		return 0, resources
   418  	}
   419  
   420  	// Fetch blocks.
   421  	go func() {
   422  		// Trigger peer queues to drain explicitly when all work enqueued.
   423  		for {
   424  			qsMutex.RLock()
   425  			assigned := 0
   426  			for _, q := range qs {
   427  				assigned += int(atomic.LoadUint64(&q.assigned))
   428  			}
   429  			qsMutex.RUnlock()
   430  			if assigned == len(blocks) {
   431  				qsMutex.Lock()
   432  				defer qsMutex.Unlock()
   433  				for _, q := range qs {
   434  					q.drain()
   435  				}
   436  				return
   437  			}
   438  			time.Sleep(10 * time.Millisecond)
   439  		}
   440  	}()
   441  	rangeStart := start
   442  	rangeEnd := start.Add(blockSize * (24 - 1))
   443  	bootstrapOpts := newResultTestOptions()
   444  	result, err := session.FetchBootstrapBlocksFromPeers(
   445  		testsNsMetadata(t), 0, rangeStart, rangeEnd, bootstrapOpts)
   446  	require.NoError(t, err)
   447  	require.NotNil(t, result)
   448  
   449  	// Assert result.
   450  	assertFetchBootstrapBlocksResult(t, blocks, result)
   451  
   452  	require.NoError(t, session.Close())
   453  }
   454  
   455  type fetchBlocksFromPeersTestScenarioGenerator func(
   456  	peerIdx int,
   457  	numPeers int,
   458  	start xtime.UnixNano,
   459  ) []testBlocks
   460  
   461  func fetchBlocksFromPeersTestsHelper(
   462  	t *testing.T,
   463  	peerScenarioFn fetchBlocksFromPeersTestScenarioGenerator,
   464  ) {
   465  	ctrl := gomock.NewController(t)
   466  	defer ctrl.Finish()
   467  
   468  	opts := newSessionTestAdminOptions()
   469  	s, err := newSession(opts)
   470  	require.NoError(t, err)
   471  	session := s.(*session)
   472  
   473  	mockHostQueues, mockClients := mockHostQueuesAndClientsForFetchBootstrapBlocks(ctrl, opts)
   474  	session.newHostQueueFn = mockHostQueues.newHostQueueFn()
   475  
   476  	// Don't drain the peer blocks queue, explicitly drain ourselves to
   477  	// avoid unpredictable batches being retrieved from peers
   478  	var (
   479  		qs      []*peerBlocksQueue
   480  		qsMutex sync.RWMutex
   481  	)
   482  	session.newPeerBlocksQueueFn = func(
   483  		peer peer,
   484  		maxQueueSize int,
   485  		_ time.Duration,
   486  		workers xsync.WorkerPool,
   487  		processFn processFn,
   488  	) *peerBlocksQueue {
   489  		qsMutex.Lock()
   490  		defer qsMutex.Unlock()
   491  		q := newPeerBlocksQueue(peer, maxQueueSize, 0, workers, processFn)
   492  		qs = append(qs, q)
   493  		return q
   494  	}
   495  
   496  	require.NoError(t, session.Open())
   497  
   498  	batchSize := opts.FetchSeriesBlocksBatchSize()
   499  	//nolint: durationcheck
   500  	start := xtime.Now().Truncate(blockSize).Add(blockSize * -(24 - 1))
   501  
   502  	allBlocks := make([][]testBlocks, 0, len(mockHostQueues))
   503  	peerBlocks := make([][]testBlocks, 0, len(mockHostQueues))
   504  	numBlocks := 0
   505  	for idx := 0; idx < len(mockHostQueues); idx++ {
   506  		blocks := peerScenarioFn(idx, len(mockHostQueues), start)
   507  
   508  		// Add to the expected list
   509  		allBlocks = append(allBlocks, blocks)
   510  
   511  		if idx == 0 {
   512  			continue // i.e. skip the first host. used as local m3dbnode
   513  		}
   514  
   515  		// Expect the fetch blocks calls
   516  		blocksExpectedReqs, blocksResult := expectedRepairFetchRequestsAndResponses(blocks, batchSize)
   517  		expectFetchBlocksAndReturn(mockClients[idx], blocksExpectedReqs, blocksResult)
   518  
   519  		// Track number of blocks to be used to drain the work queue
   520  		for _, blk := range blocks {
   521  			numBlocks = numBlocks + len(blk.blocks)
   522  		}
   523  		peerBlocks = append(peerBlocks, blocks)
   524  	}
   525  
   526  	// Fetch blocks
   527  	go func() {
   528  		// Trigger peer queues to drain explicitly when all work enqueued
   529  		for {
   530  			qsMutex.RLock()
   531  			assigned := 0
   532  			for _, q := range qs {
   533  				assigned += int(atomic.LoadUint64(&q.assigned))
   534  			}
   535  			qsMutex.RUnlock()
   536  			if assigned == numBlocks {
   537  				qsMutex.Lock()
   538  				defer qsMutex.Unlock()
   539  				for _, q := range qs {
   540  					q.drain()
   541  				}
   542  				return
   543  			}
   544  			time.Sleep(10 * time.Millisecond)
   545  		}
   546  	}()
   547  	blockReplicasMetadata := testBlocksToBlockReplicasMetadata(t, peerBlocks, mockHostQueues[1:])
   548  	bootstrapOpts := newResultTestOptions()
   549  	result, err := session.FetchBlocksFromPeers(testsNsMetadata(t), 0, topology.ReadConsistencyLevelAll,
   550  		blockReplicasMetadata, bootstrapOpts)
   551  	require.NoError(t, err)
   552  	require.NotNil(t, result)
   553  
   554  	assertFetchBlocksFromPeersResult(t, peerBlocks, mockHostQueues[1:], result)
   555  	require.NoError(t, session.Close())
   556  }
   557  
   558  func TestFetchBlocksFromPeersSingleNonIdenticalBlockReplica(t *testing.T) {
   559  	peerScenarioGeneratorFn := func(
   560  		peerIdx int,
   561  		numPeers int,
   562  		start xtime.UnixNano,
   563  	) []testBlocks {
   564  		if peerIdx == 0 {
   565  			return []testBlocks{}
   566  		}
   567  		return []testBlocks{
   568  			{
   569  				id: fooID,
   570  				blocks: []testBlock{
   571  					{
   572  						start: start.Add(blockSize * 1),
   573  						segments: &testBlockSegments{merged: &testBlockSegment{
   574  							head: []byte{byte(1 + 10*peerIdx), byte(2 + 10*peerIdx)},
   575  							tail: []byte{byte(3 + 10*peerIdx)},
   576  						}},
   577  					},
   578  				},
   579  			},
   580  		}
   581  	}
   582  	fetchBlocksFromPeersTestsHelper(t, peerScenarioGeneratorFn)
   583  }
   584  
   585  func TestFetchRepairBlocksMultipleDifferentBlocks(t *testing.T) {
   586  	peerScenarioGeneratorFn := func(
   587  		peerIdx int,
   588  		numPeers int,
   589  		start xtime.UnixNano,
   590  	) []testBlocks {
   591  		return []testBlocks{
   592  			{
   593  				id: fooID,
   594  				blocks: []testBlock{
   595  					{
   596  						start: start.Add(blockSize * 1),
   597  						segments: &testBlockSegments{merged: &testBlockSegment{
   598  							head: []byte{byte(1 + 10*peerIdx), byte(2 + 10*peerIdx)},
   599  							tail: []byte{byte(3 + 10*peerIdx)},
   600  						}},
   601  					},
   602  				},
   603  			},
   604  			{
   605  				id: barID,
   606  				blocks: []testBlock{
   607  					{
   608  						start: start.Add(blockSize * 2),
   609  						segments: &testBlockSegments{merged: &testBlockSegment{
   610  							head: []byte{byte(4 + 10*peerIdx), byte(5 + 10*peerIdx)},
   611  							tail: []byte{byte(6 + 10*peerIdx)},
   612  						}},
   613  					},
   614  				},
   615  			},
   616  			{
   617  				id: bazID,
   618  				blocks: []testBlock{
   619  					{
   620  						start: start.Add(blockSize * 3),
   621  						segments: &testBlockSegments{merged: &testBlockSegment{
   622  							head: []byte{byte(7 + 10*peerIdx), byte(8 + 10*peerIdx)},
   623  							tail: []byte{byte(9 + 10*peerIdx)},
   624  						}},
   625  					},
   626  				},
   627  			},
   628  		}
   629  	}
   630  	fetchBlocksFromPeersTestsHelper(t, peerScenarioGeneratorFn)
   631  }
   632  
   633  func TestFetchRepairBlocksMultipleBlocksSameIDAndPeer(t *testing.T) {
   634  	peerScenarioGeneratorFn := func(
   635  		peerIdx int,
   636  		numPeers int,
   637  		start xtime.UnixNano,
   638  	) []testBlocks {
   639  		return []testBlocks{
   640  			{
   641  				id: fooID,
   642  				blocks: []testBlock{
   643  					{
   644  						start: start.Add(blockSize * 1),
   645  						segments: &testBlockSegments{merged: &testBlockSegment{
   646  							head: []byte{byte(1 + 10*peerIdx), byte(2 + 10*peerIdx)},
   647  							tail: []byte{byte(3 + 10*peerIdx)},
   648  						}},
   649  					},
   650  				},
   651  			},
   652  			{
   653  				id: barID,
   654  				blocks: []testBlock{
   655  					{
   656  						start: start.Add(blockSize * 2),
   657  						segments: &testBlockSegments{merged: &testBlockSegment{
   658  							head: []byte{byte(4 + 10*peerIdx), byte(5 + 10*peerIdx)},
   659  							tail: []byte{byte(6 + 10*peerIdx)},
   660  						}},
   661  					},
   662  				},
   663  			},
   664  			{
   665  				id: bazID,
   666  				blocks: []testBlock{
   667  					{
   668  						start: start.Add(blockSize * 3),
   669  						segments: &testBlockSegments{merged: &testBlockSegment{
   670  							head: []byte{byte(7 + 10*peerIdx), byte(8 + 10*peerIdx)},
   671  							tail: []byte{byte(9 + 10*peerIdx)},
   672  						}},
   673  					},
   674  				},
   675  			},
   676  			{
   677  				id: bazID,
   678  				blocks: []testBlock{
   679  					{
   680  						start: start.Add(blockSize * 4),
   681  						segments: &testBlockSegments{merged: &testBlockSegment{
   682  							head: []byte{byte(8 + 10*peerIdx), byte(9 + 10*peerIdx)},
   683  							tail: []byte{byte(1 + 10*peerIdx)},
   684  						}},
   685  					},
   686  				},
   687  			},
   688  		}
   689  	}
   690  	fetchBlocksFromPeersTestsHelper(t, peerScenarioGeneratorFn)
   691  }
   692  
   693  func assertFetchBlocksFromPeersResult(
   694  	t *testing.T,
   695  	expectedBlocks [][]testBlocks,
   696  	peers MockHostQueues,
   697  	observedBlocksIter PeerBlocksIter,
   698  ) {
   699  	matchedBlocks := make([][][]bool, 0, len(expectedBlocks))
   700  	for _, blocks := range expectedBlocks {
   701  		unsetBlocks := make([][]bool, len(blocks))
   702  		matchedBlocks = append(matchedBlocks, unsetBlocks)
   703  	}
   704  	extraBlocks := []peerBlocksDatapoint{}
   705  	for observedBlocksIter.Next() {
   706  		observedHost, observedID, _, observedBlock := observedBlocksIter.Current()
   707  
   708  		// find which peer the current datapoint is for
   709  		peerIdx := -1
   710  		for idx, mockPeer := range peers {
   711  			if observedHost.String() == mockPeer.Host().String() {
   712  				peerIdx = idx
   713  				break
   714  			}
   715  		}
   716  
   717  		// unknown peer, marking extra block
   718  		if peerIdx == -1 {
   719  			extraBlocks = append(extraBlocks, peerBlocksDatapoint{
   720  				id:    observedID,
   721  				peer:  observedHost,
   722  				block: observedBlock,
   723  			})
   724  			continue
   725  		}
   726  
   727  		// find blockIdx
   728  		blockIdx := -1
   729  		subBlockIdx := -1
   730  		for i, blocks := range expectedBlocks[peerIdx] {
   731  			if !blocks.id.Equal(observedID) {
   732  				continue
   733  			}
   734  			for j, expectedBlock := range blocks.blocks {
   735  				if observedBlock.StartTime().Equal(expectedBlock.start) {
   736  					blockIdx = i
   737  					subBlockIdx = j
   738  					break
   739  				}
   740  
   741  			}
   742  		}
   743  
   744  		// unknown block, marking extra
   745  		if blockIdx == -1 || subBlockIdx == -1 {
   746  			extraBlocks = append(extraBlocks, peerBlocksDatapoint{
   747  				id:    observedID,
   748  				peer:  observedHost,
   749  				block: observedBlock,
   750  			})
   751  			continue
   752  		}
   753  
   754  		// lazily construct matchedBlocks inner most array
   755  		if matchedBlocks[peerIdx][blockIdx] == nil {
   756  			matchedBlocks[peerIdx][blockIdx] = make([]bool, len(expectedBlocks[peerIdx][blockIdx].blocks))
   757  		}
   758  
   759  		expectedBlock := expectedBlocks[peerIdx][blockIdx].blocks[subBlockIdx]
   760  		expectedData := append(expectedBlock.segments.merged.head, expectedBlock.segments.merged.tail...)
   761  		ctx := context.NewBackground()
   762  		defer ctx.Close()
   763  		stream, err := observedBlock.Stream(ctx)
   764  		require.NoError(t, err)
   765  		seg, err := stream.Segment()
   766  		require.NoError(t, err)
   767  
   768  		actualData := append(bytesFor(seg.Head), bytesFor(seg.Tail)...)
   769  
   770  		// compare actual v expected data
   771  		if len(expectedData) != len(actualData) {
   772  			continue
   773  		}
   774  		for i := range expectedData {
   775  			if expectedData[i] != actualData[i] {
   776  				continue
   777  			}
   778  		}
   779  
   780  		// data is the same, mark match
   781  		matchedBlocks[peerIdx][blockIdx][subBlockIdx] = true
   782  	}
   783  
   784  	for _, extraBlock := range extraBlocks {
   785  		assert.Fail(t, "received extra block: %v", extraBlock)
   786  	}
   787  
   788  	for i, peerMatches := range matchedBlocks {
   789  		for j, blockMatches := range peerMatches {
   790  			if len(blockMatches) == 0 {
   791  				assert.Fail(t,
   792  					"un-matched block [ peer=%d, block=%d, expected=%v ]",
   793  					i, j, expectedBlocks[i][j])
   794  			}
   795  			for k, blockMatch := range blockMatches {
   796  				if !blockMatch {
   797  					assert.Fail(t,
   798  						"un-matched block [ peer=%d, block=%d, sub-block=%d, expected=%v ]",
   799  						i, j, k, expectedBlocks[i][j].blocks[k])
   800  				}
   801  			}
   802  		}
   803  	}
   804  }
   805  
   806  func testBlocksToBlockReplicasMetadata(
   807  	t *testing.T,
   808  	peerBlocks [][]testBlocks,
   809  	peers MockHostQueues,
   810  ) []block.ReplicaMetadata {
   811  	assert.True(t, len(peerBlocks) == len(peers))
   812  	blockReplicas := make([]block.ReplicaMetadata, 0, len(peers))
   813  	for idx, blocks := range peerBlocks {
   814  		blocksMetadata := resultMetadataFromBlocks(blocks)
   815  		peerHost := peers[idx].Host()
   816  		for _, bm := range blocksMetadata {
   817  			for _, b := range bm.blocks {
   818  				blockReplicas = append(blockReplicas, block.ReplicaMetadata{
   819  					Metadata: block.Metadata{
   820  						ID:       bm.id,
   821  						Start:    b.start,
   822  						Size:     *(b.size),
   823  						Checksum: b.checksum,
   824  					},
   825  					Host: peerHost,
   826  				})
   827  			}
   828  		}
   829  	}
   830  	return blockReplicas
   831  }
   832  
   833  func TestSelectPeersFromPerPeerBlockMetadatasAllPeersSucceed(t *testing.T) {
   834  	ctrl := gomock.NewController(t)
   835  	defer ctrl.Finish()
   836  
   837  	opts := newSessionTestAdminOptions()
   838  	s, err := newSession(opts)
   839  	require.NoError(t, err)
   840  	session := s.(*session)
   841  
   842  	var (
   843  		metrics          = session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
   844  		peerA            = NewMockpeer(ctrl)
   845  		peerB            = NewMockpeer(ctrl)
   846  		peers            = preparedMockPeers(peerA, peerB)
   847  		enqueueCh        = NewMockenqueueChannel(ctrl)
   848  		peerBlocksQueues = mockPeerBlocksQueues(peers, opts)
   849  	)
   850  	defer peerBlocksQueues.closeAll()
   851  
   852  	var (
   853  		start    = timeZero
   854  		checksum = uint32(1)
   855  		perPeer  = []receivedBlockMetadata{
   856  			{
   857  				peer: peerA,
   858  				id:   fooID,
   859  				block: blockMetadata{
   860  					start: start, size: 2, checksum: &checksum,
   861  				},
   862  			},
   863  			{
   864  				peer: peerB,
   865  				id:   fooID,
   866  				block: blockMetadata{
   867  					start: start, size: 2, checksum: &checksum,
   868  				},
   869  			},
   870  		}
   871  		pooled = selectPeersFromPerPeerBlockMetadatasPooledResources{}
   872  	)
   873  
   874  	// Perform selection
   875  	selected, _ := session.selectPeersFromPerPeerBlockMetadatas(
   876  		perPeer, peerBlocksQueues, enqueueCh,
   877  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
   878  		testPeers(peers), pooled, metrics)
   879  
   880  	// Assert selection first peer
   881  	require.Equal(t, 1, len(selected))
   882  
   883  	assert.Equal(t, start, selected[0].block.start)
   884  	assert.Equal(t, int64(2), selected[0].block.size)
   885  	assert.Equal(t, &checksum, selected[0].block.checksum)
   886  
   887  	assert.Equal(t, 1, selected[0].block.reattempt.attempt)
   888  	assert.Equal(t, []peer{peerA}, selected[0].block.reattempt.attempted)
   889  }
   890  
   891  func TestSelectPeersFromPerPeerBlockMetadatasSelectAllOnDifferingChecksums(t *testing.T) {
   892  	ctrl := gomock.NewController(t)
   893  	defer ctrl.Finish()
   894  
   895  	opts := newSessionTestAdminOptions()
   896  	s, err := newSession(opts)
   897  	require.NoError(t, err)
   898  	session := s.(*session)
   899  
   900  	var (
   901  		metrics          = session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
   902  		peerA            = NewMockpeer(ctrl)
   903  		peerB            = NewMockpeer(ctrl)
   904  		peerC            = NewMockpeer(ctrl)
   905  		peers            = preparedMockPeers(peerA, peerB, peerC)
   906  		enqueueCh        = NewMockenqueueChannel(ctrl)
   907  		peerBlocksQueues = mockPeerBlocksQueues(peers, opts)
   908  	)
   909  	defer peerBlocksQueues.closeAll()
   910  
   911  	var (
   912  		start     = timeZero
   913  		checksums = []uint32{1, 2}
   914  		perPeer   = []receivedBlockMetadata{
   915  			{
   916  				peer: peerA,
   917  				id:   fooID,
   918  				block: blockMetadata{
   919  					start: start, size: 2, checksum: &checksums[0],
   920  				},
   921  			},
   922  			{
   923  				peer: peerB,
   924  				id:   fooID,
   925  				block: blockMetadata{
   926  					start: start, size: 2, checksum: &checksums[1],
   927  				},
   928  			},
   929  			{
   930  				peer: peerC,
   931  				id:   fooID,
   932  				block: blockMetadata{
   933  					start: start, size: 2, checksum: &checksums[1],
   934  				},
   935  			},
   936  		}
   937  		pooled = selectPeersFromPerPeerBlockMetadatasPooledResources{}
   938  	)
   939  
   940  	// Perform selection
   941  	selected, _ := session.selectPeersFromPerPeerBlockMetadatas(
   942  		perPeer, peerBlocksQueues, enqueueCh,
   943  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
   944  		testPeers(peers), pooled, metrics)
   945  
   946  	// Assert selection all peers
   947  	require.Equal(t, 3, len(selected))
   948  
   949  	for i, metadata := range perPeer {
   950  		assert.Equal(t, metadata.peer, selected[i].peer)
   951  		assert.True(t, metadata.block.start.Equal(selected[i].block.start))
   952  		assert.Equal(t, metadata.block.size, selected[i].block.size)
   953  		assert.Equal(t, metadata.block.checksum, selected[i].block.checksum)
   954  	}
   955  }
   956  
   957  func TestSelectPeersFromPerPeerBlockMetadatasTakeSinglePeer(t *testing.T) {
   958  	ctrl := gomock.NewController(t)
   959  	defer ctrl.Finish()
   960  
   961  	opts := newSessionTestAdminOptions()
   962  	s, err := newSession(opts)
   963  	require.NoError(t, err)
   964  	session := s.(*session)
   965  
   966  	var (
   967  		metrics          = session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
   968  		peerA            = NewMockpeer(ctrl)
   969  		peerB            = NewMockpeer(ctrl)
   970  		peerC            = NewMockpeer(ctrl)
   971  		peers            = preparedMockPeers(peerA, peerB, peerC)
   972  		enqueueCh        = NewMockenqueueChannel(ctrl)
   973  		peerBlocksQueues = mockPeerBlocksQueues(peers, opts)
   974  	)
   975  	defer peerBlocksQueues.closeAll()
   976  
   977  	var (
   978  		start    = timeZero
   979  		checksum = uint32(2)
   980  		perPeer  = []receivedBlockMetadata{
   981  			{
   982  				peer: peerA,
   983  				id:   fooID,
   984  				block: blockMetadata{
   985  					start: start, size: 2, checksum: &checksum,
   986  				},
   987  			},
   988  		}
   989  		pooled = selectPeersFromPerPeerBlockMetadatasPooledResources{}
   990  	)
   991  
   992  	// Perform selection
   993  	selected, _ := session.selectPeersFromPerPeerBlockMetadatas(
   994  		perPeer, peerBlocksQueues, enqueueCh,
   995  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
   996  		testPeers(peers), pooled, metrics)
   997  
   998  	// Assert selection first peer
   999  	require.Equal(t, 1, len(selected))
  1000  
  1001  	assert.Equal(t, start, selected[0].block.start)
  1002  	assert.Equal(t, int64(2), selected[0].block.size)
  1003  	assert.Equal(t, &checksum, selected[0].block.checksum)
  1004  
  1005  	assert.Equal(t, 1, selected[0].block.reattempt.attempt)
  1006  	assert.Equal(t, []peer{peerA}, selected[0].block.reattempt.attempted)
  1007  }
  1008  
  1009  func TestSelectPeersFromPerPeerBlockMetadatasAvoidsReattemptingFromAttemptedPeers(t *testing.T) {
  1010  	ctrl := gomock.NewController(t)
  1011  	defer ctrl.Finish()
  1012  
  1013  	opts := newSessionTestAdminOptions()
  1014  	s, err := newSession(opts)
  1015  	require.NoError(t, err)
  1016  	session := s.(*session)
  1017  
  1018  	var (
  1019  		metrics          = session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
  1020  		peerA            = NewMockpeer(ctrl)
  1021  		peerB            = NewMockpeer(ctrl)
  1022  		peerC            = NewMockpeer(ctrl)
  1023  		peers            = preparedMockPeers(peerA, peerB, peerC)
  1024  		enqueueCh        = NewMockenqueueChannel(ctrl)
  1025  		peerBlocksQueues = mockPeerBlocksQueues(peers, opts)
  1026  	)
  1027  	defer peerBlocksQueues.closeAll()
  1028  
  1029  	var (
  1030  		start     = timeZero
  1031  		checksum  = uint32(2)
  1032  		reattempt = blockMetadataReattempt{
  1033  			attempt:   1,
  1034  			attempted: []peer{peerA},
  1035  		}
  1036  		perPeer = []receivedBlockMetadata{
  1037  			{
  1038  				peer: peerA,
  1039  				id:   fooID,
  1040  				block: blockMetadata{
  1041  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1042  				},
  1043  			},
  1044  			{
  1045  				peer: peerB,
  1046  				id:   fooID,
  1047  				block: blockMetadata{
  1048  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1049  				},
  1050  			},
  1051  			{
  1052  				peer: peerC,
  1053  				id:   fooID,
  1054  				block: blockMetadata{
  1055  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1056  				},
  1057  			},
  1058  		}
  1059  		pooled = selectPeersFromPerPeerBlockMetadatasPooledResources{}
  1060  	)
  1061  
  1062  	// Track peer C as having an assigned block to ensure block ends up
  1063  	// under peer B which is just as eligible as peer C to receive the block
  1064  	// assignment
  1065  	peerBlocksQueues.findQueue(peerC).trackAssigned(1)
  1066  
  1067  	// Perform selection
  1068  	selected, _ := session.selectPeersFromPerPeerBlockMetadatas(
  1069  		perPeer, peerBlocksQueues, enqueueCh,
  1070  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
  1071  		testPeers(peers), pooled, metrics)
  1072  
  1073  	// Assert selection length
  1074  	require.Equal(t, 1, len(selected))
  1075  
  1076  	// Assert selection second peer
  1077  	assert.Equal(t, peerB, selected[0].peer)
  1078  	assert.Equal(t, start, selected[0].block.start)
  1079  	assert.Equal(t, int64(2), selected[0].block.size)
  1080  	assert.Equal(t, &checksum, selected[0].block.checksum)
  1081  
  1082  	assert.Equal(t, 2, selected[0].block.reattempt.attempt)
  1083  	assert.Equal(t, []peer{
  1084  		peerA, peerB,
  1085  	}, selected[0].block.reattempt.attempted)
  1086  }
  1087  
  1088  func TestSelectPeersFromPerPeerBlockMetadatasAvoidRetryWithLevelNone(t *testing.T) {
  1089  	ctrl := gomock.NewController(t)
  1090  	defer ctrl.Finish()
  1091  
  1092  	opts := newSessionTestAdminOptions().
  1093  		SetFetchSeriesBlocksMaxBlockRetries(0).
  1094  		SetBootstrapConsistencyLevel(topology.ReadConsistencyLevelNone)
  1095  	s, err := newSession(opts)
  1096  	require.NoError(t, err)
  1097  	session := s.(*session)
  1098  
  1099  	var (
  1100  		metrics          = session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
  1101  		peerA            = NewMockpeer(ctrl)
  1102  		peerB            = NewMockpeer(ctrl)
  1103  		peerC            = NewMockpeer(ctrl)
  1104  		peers            = preparedMockPeers(peerA, peerB, peerC)
  1105  		enqueueCh        = NewMockenqueueChannel(ctrl)
  1106  		peerBlocksQueues = mockPeerBlocksQueues(peers, opts)
  1107  	)
  1108  	defer peerBlocksQueues.closeAll()
  1109  
  1110  	var (
  1111  		start    = timeZero
  1112  		checksum = uint32(2)
  1113  		// Block should be avoided being fetched at all as all peers already
  1114  		// attempted
  1115  		reattempt = blockMetadataReattempt{
  1116  			attempt:   3,
  1117  			attempted: []peer{peerA, peerB, peerC},
  1118  			errs:      []error{fmt.Errorf("errA"), fmt.Errorf("errB"), fmt.Errorf("errC")},
  1119  		}
  1120  		perPeer = []receivedBlockMetadata{
  1121  			{
  1122  				peer: peerA,
  1123  				id:   fooID,
  1124  				block: blockMetadata{
  1125  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1126  				},
  1127  			},
  1128  			{
  1129  				peer: peerB,
  1130  				id:   fooID,
  1131  				block: blockMetadata{
  1132  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1133  				},
  1134  			},
  1135  			{
  1136  				peer: peerC,
  1137  				id:   fooID,
  1138  				block: blockMetadata{
  1139  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1140  				},
  1141  			},
  1142  		}
  1143  		pooled = selectPeersFromPerPeerBlockMetadatasPooledResources{}
  1144  	)
  1145  
  1146  	// Perform selection
  1147  	selected, _ := session.selectPeersFromPerPeerBlockMetadatas(
  1148  		perPeer, peerBlocksQueues, enqueueCh,
  1149  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
  1150  		testPeers(peers), pooled, metrics)
  1151  
  1152  	// Assert no selection
  1153  	require.Equal(t, 0, len(selected))
  1154  }
  1155  
  1156  func TestSelectPeersFromPerPeerBlockMetadatasPerformsRetries(t *testing.T) {
  1157  	ctrl := gomock.NewController(t)
  1158  	defer ctrl.Finish()
  1159  
  1160  	opts := newSessionTestAdminOptions().
  1161  		SetFetchSeriesBlocksMaxBlockRetries(2)
  1162  	s, err := newSession(opts)
  1163  	require.NoError(t, err)
  1164  	session := s.(*session)
  1165  
  1166  	var (
  1167  		metrics          = session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
  1168  		peerA            = NewMockpeer(ctrl)
  1169  		peerB            = NewMockpeer(ctrl)
  1170  		peers            = preparedMockPeers(peerA, peerB)
  1171  		enqueueCh        = NewMockenqueueChannel(ctrl)
  1172  		peerBlocksQueues = mockPeerBlocksQueues(peers, opts)
  1173  	)
  1174  	atomic.StoreUint64(&peerBlocksQueues[0].assigned, 16)
  1175  	atomic.StoreUint64(&peerBlocksQueues[1].assigned, 0)
  1176  	defer peerBlocksQueues.closeAll()
  1177  
  1178  	var (
  1179  		start    = timeZero
  1180  		checksum = uint32(2)
  1181  		// Peer A has 2 attempts, peer B has 1 attempt, peer B should be selected
  1182  		// for the second block as it has one retry remaining.  The first block
  1183  		// should select peer B to retrieve as we synthetically set the assigned
  1184  		// blocks count for peer A much higher than peer B.
  1185  		reattempt = blockMetadataReattempt{
  1186  			attempt:   3,
  1187  			attempted: []peer{peerA, peerB, peerA},
  1188  		}
  1189  		perPeer = []receivedBlockMetadata{
  1190  			{
  1191  				peer: peerA,
  1192  				id:   fooID,
  1193  				block: blockMetadata{
  1194  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1195  				},
  1196  			},
  1197  			{
  1198  				peer: peerB,
  1199  				id:   fooID,
  1200  				block: blockMetadata{
  1201  					start: start, size: 2, checksum: &checksum, reattempt: reattempt,
  1202  				},
  1203  			},
  1204  		}
  1205  		pooled = selectPeersFromPerPeerBlockMetadatasPooledResources{}
  1206  	)
  1207  
  1208  	// Perform selection
  1209  	selected, _ := session.selectPeersFromPerPeerBlockMetadatas(
  1210  		perPeer, peerBlocksQueues, enqueueCh,
  1211  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
  1212  		testPeers(peers), pooled, metrics)
  1213  
  1214  	// Assert selection
  1215  	require.Equal(t, 1, len(selected))
  1216  
  1217  	// Assert block selected for second peer
  1218  	assert.True(t, start.Equal(selected[0].block.start))
  1219  	assert.Equal(t, int64(2), selected[0].block.size)
  1220  	assert.Equal(t, &checksum, selected[0].block.checksum)
  1221  
  1222  	assert.Equal(t, 4, selected[0].block.reattempt.attempt)
  1223  	assert.Equal(t, []peer{
  1224  		peerA, peerB, peerA, peerB,
  1225  	}, selected[0].block.reattempt.attempted)
  1226  }
  1227  
  1228  func TestSelectPeersFromPerPeerBlockMetadatasRetryOnFanoutConsistencyLevelFailure(t *testing.T) {
  1229  	ctrl := gomock.NewController(t)
  1230  	defer ctrl.Finish()
  1231  
  1232  	opts := newSessionTestAdminOptions().
  1233  		SetFetchSeriesBlocksMaxBlockRetries(0).
  1234  		SetBootstrapConsistencyLevel(topology.ReadConsistencyLevelMajority)
  1235  	s, err := newSession(opts)
  1236  	require.NoError(t, err)
  1237  	session := s.(*session)
  1238  
  1239  	var (
  1240  		metrics          = session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
  1241  		peerA            = NewMockpeer(ctrl)
  1242  		peerB            = NewMockpeer(ctrl)
  1243  		peerC            = NewMockpeer(ctrl)
  1244  		peers            = preparedMockPeers(peerA, peerB, peerC)
  1245  		enqueueCh        = NewMockenqueueChannel(ctrl)
  1246  		peerBlocksQueues = mockPeerBlocksQueues(peers, opts)
  1247  	)
  1248  	defer peerBlocksQueues.closeAll()
  1249  
  1250  	var (
  1251  		start     = timeZero
  1252  		checksums = []uint32{1, 2, 3}
  1253  		// This simulates a fanout fetch where the first peer A has returned successfully
  1254  		// and then retries are enqueued and processed for reselection for peer B and peer C
  1255  		// which eventually satisfies another fanout retry once processed.
  1256  		fanoutFetchState = &blockFanoutFetchState{numPending: 2, numSuccess: 1}
  1257  		initialPerPeer   = []receivedBlockMetadata{
  1258  			{
  1259  				peer: peerA,
  1260  				id:   fooID,
  1261  				block: blockMetadata{
  1262  					start: start, size: 2, checksum: &checksums[0],
  1263  				},
  1264  			},
  1265  			{
  1266  				peer: peerB,
  1267  				id:   fooID,
  1268  				block: blockMetadata{
  1269  					start: start, size: 2, checksum: &checksums[1],
  1270  				},
  1271  			},
  1272  			{
  1273  				peer: peerC,
  1274  				id:   fooID,
  1275  				block: blockMetadata{
  1276  					start: start, size: 2, checksum: &checksums[2],
  1277  				},
  1278  			},
  1279  		}
  1280  		firstRetry = []receivedBlockMetadata{
  1281  			{
  1282  				peer: peerB,
  1283  				id:   fooID,
  1284  				block: blockMetadata{
  1285  					start: start, size: 2, checksum: &checksums[1], reattempt: blockMetadataReattempt{
  1286  						attempt:              1,
  1287  						fanoutFetchState:     fanoutFetchState,
  1288  						attempted:            []peer{peerB},
  1289  						fetchedPeersMetadata: initialPerPeer,
  1290  					},
  1291  				},
  1292  			},
  1293  		}
  1294  		secondRetry = []receivedBlockMetadata{
  1295  			{
  1296  				peer: peerC,
  1297  				id:   fooID,
  1298  				block: blockMetadata{
  1299  					start: start, size: 2, checksum: &checksums[2], reattempt: blockMetadataReattempt{
  1300  						attempt:              1,
  1301  						fanoutFetchState:     fanoutFetchState,
  1302  						attempted:            []peer{peerC},
  1303  						fetchedPeersMetadata: initialPerPeer,
  1304  					},
  1305  				},
  1306  			},
  1307  		}
  1308  		pooled = selectPeersFromPerPeerBlockMetadatasPooledResources{}
  1309  	)
  1310  
  1311  	// Perform first selection
  1312  	selected, _ := session.selectPeersFromPerPeerBlockMetadatas(
  1313  		firstRetry, peerBlocksQueues, enqueueCh,
  1314  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
  1315  		testPeers(peers), pooled, metrics)
  1316  
  1317  	// Assert selection
  1318  	require.Equal(t, 0, len(selected))
  1319  
  1320  	// Before second selection expect the re-enqueue of the block
  1321  	var wg sync.WaitGroup
  1322  	wg.Add(1)
  1323  	enqueueCh.EXPECT().
  1324  		enqueueDelayed(1).
  1325  		Return(func(reEnqueuedPerPeer []receivedBlockMetadata) {
  1326  			assert.Equal(t, len(initialPerPeer), len(reEnqueuedPerPeer))
  1327  			for i := range reEnqueuedPerPeer {
  1328  				expected := initialPerPeer[i]
  1329  				actual := reEnqueuedPerPeer[i]
  1330  
  1331  				assert.True(t, expected.id.Equal(actual.id))
  1332  				assert.Equal(t, expected.peer, actual.peer)
  1333  				assert.Equal(t, expected.block.start, actual.block.start)
  1334  				assert.Equal(t, expected.block.size, actual.block.size)
  1335  				assert.Equal(t, expected.block.checksum, actual.block.checksum)
  1336  
  1337  				// Ensure no reattempt data is attached
  1338  				assert.Equal(t, blockMetadataReattempt{}, actual.block.reattempt)
  1339  			}
  1340  
  1341  			return
  1342  		}, func() {
  1343  			wg.Done()
  1344  		}, nil)
  1345  
  1346  	// Perform second selection
  1347  	selected, _ = session.selectPeersFromPerPeerBlockMetadatas(
  1348  		secondRetry, peerBlocksQueues, enqueueCh,
  1349  		newStaticRuntimeReadConsistencyLevel(opts.BootstrapConsistencyLevel()),
  1350  		testPeers(peers), pooled, metrics)
  1351  
  1352  	// Assert selection
  1353  	require.Equal(t, 0, len(selected))
  1354  
  1355  	// Wait for re-enqueue of the block
  1356  	wg.Wait()
  1357  }
  1358  
  1359  func TestStreamBlocksBatchFromPeerReenqueuesOnFailCall(t *testing.T) {
  1360  	ctrl := gomock.NewController(t)
  1361  	defer ctrl.Finish()
  1362  
  1363  	opts := newSessionTestAdminOptions()
  1364  	s, err := newSession(opts)
  1365  	require.NoError(t, err)
  1366  	session := s.(*session)
  1367  	session.reattemptStreamBlocksFromPeersFn = func(
  1368  		blocks []receivedBlockMetadata,
  1369  		enqueueCh enqueueChannel,
  1370  		attemptErr error,
  1371  		_ reason,
  1372  		reattemptType reattemptType,
  1373  		_ *streamFromPeersMetrics,
  1374  	) error {
  1375  		enqueue, done, err := enqueueCh.enqueueDelayed(len(blocks))
  1376  		require.NoError(t, err)
  1377  		session.streamBlocksReattemptFromPeersEnqueue(blocks, attemptErr,
  1378  			reattemptType, enqueue, done)
  1379  		return nil
  1380  	}
  1381  
  1382  	mockHostQueues, mockClients := mockHostQueuesAndClientsForFetchBootstrapBlocks(ctrl, opts)
  1383  	session.newHostQueueFn = mockHostQueues.newHostQueueFn()
  1384  	require.NoError(t, session.Open())
  1385  
  1386  	var (
  1387  		//nolint: durationcheck
  1388  		start   = xtime.Now().Truncate(blockSize).Add(blockSize * -(24 - 1))
  1389  		retrier = xretry.NewRetrier(xretry.NewOptions().
  1390  			SetMaxRetries(1).
  1391  			SetInitialBackoff(time.Millisecond))
  1392  		peerIdx   = len(mockHostQueues) - 1
  1393  		peer      = mockHostQueues[peerIdx]
  1394  		client    = mockClients[peerIdx]
  1395  		enqueueCh = newEnqueueChannel(session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw))
  1396  		batch     = []receivedBlockMetadata{
  1397  			{
  1398  				id: fooID,
  1399  				block: blockMetadata{
  1400  					start: start, size: 2, reattempt: blockMetadataReattempt{
  1401  						retryPeersMetadata: []receivedBlockMetadata{
  1402  							{block: blockMetadata{start: start, size: 2}},
  1403  						},
  1404  					},
  1405  				}},
  1406  			{
  1407  				id: barID,
  1408  				block: blockMetadata{
  1409  					start: start, size: 2, reattempt: blockMetadataReattempt{
  1410  						retryPeersMetadata: []receivedBlockMetadata{
  1411  							{block: blockMetadata{start: start, size: 2}},
  1412  						},
  1413  					},
  1414  				}},
  1415  		}
  1416  	)
  1417  
  1418  	// Fail the call twice due to retry
  1419  	client.EXPECT().FetchBlocksRaw(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("an error")).Times(2)
  1420  
  1421  	// Attempt stream blocks
  1422  	bopts := result.NewOptions()
  1423  	m := session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
  1424  	session.streamBlocksBatchFromPeer(testsNsMetadata(t), 0, peer, batch, bopts, nil, enqueueCh, retrier, m)
  1425  
  1426  	// Assert result
  1427  	assertEnqueueChannel(t, batch, enqueueCh)
  1428  
  1429  	assert.NoError(t, session.Close())
  1430  }
  1431  
  1432  func TestStreamBlocksBatchFromPeerVerifiesBlockErr(t *testing.T) {
  1433  	ctrl := gomock.NewController(t)
  1434  	defer ctrl.Finish()
  1435  
  1436  	opts := newSessionTestAdminOptions()
  1437  	s, err := newSession(opts)
  1438  	require.NoError(t, err)
  1439  	session := s.(*session)
  1440  	session.reattemptStreamBlocksFromPeersFn = func(
  1441  		blocks []receivedBlockMetadata,
  1442  		enqueueCh enqueueChannel,
  1443  		attemptErr error,
  1444  		_ reason,
  1445  		reattemptType reattemptType,
  1446  		_ *streamFromPeersMetrics,
  1447  	) error {
  1448  		enqueue, done, err := enqueueCh.enqueueDelayed(len(blocks))
  1449  		require.NoError(t, err)
  1450  		session.streamBlocksReattemptFromPeersEnqueue(blocks, attemptErr,
  1451  			reattemptType, enqueue, done)
  1452  		return nil
  1453  	}
  1454  
  1455  	mockHostQueues, mockClients := mockHostQueuesAndClientsForFetchBootstrapBlocks(ctrl, opts)
  1456  	session.newHostQueueFn = mockHostQueues.newHostQueueFn()
  1457  	require.NoError(t, session.Open())
  1458  
  1459  	//nolint: durationcheck
  1460  	start := xtime.Now().Truncate(blockSize).Add(blockSize * -(24 - 1))
  1461  	enc := m3tsz.NewEncoder(start, nil, true, encoding.NewOptions())
  1462  	require.NoError(t, enc.Encode(ts.Datapoint{
  1463  		TimestampNanos: start.Add(10 * time.Second),
  1464  		Value:          42,
  1465  	}, xtime.Second, nil))
  1466  
  1467  	ctx := context.NewBackground()
  1468  	defer ctx.Close()
  1469  
  1470  	reader, ok := enc.Stream(ctx)
  1471  	require.True(t, ok)
  1472  	segment, err := reader.Segment()
  1473  	require.NoError(t, err)
  1474  	rawBlockData, err := xio.ToBytes(reader)
  1475  	require.Equal(t, io.EOF, err)
  1476  	require.Equal(t, len(rawBlockData), segment.Len())
  1477  	rawBlockLen := int64(len(rawBlockData))
  1478  
  1479  	var (
  1480  		retrier = xretry.NewRetrier(xretry.NewOptions().
  1481  			SetMaxRetries(1).
  1482  			SetInitialBackoff(time.Millisecond))
  1483  		peerIdx       = len(mockHostQueues) - 1
  1484  		peer          = mockHostQueues[peerIdx]
  1485  		client        = mockClients[peerIdx]
  1486  		enqueueCh     = newEnqueueChannel(session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw))
  1487  		blockChecksum = digest.Checksum(rawBlockData)
  1488  		batch         = []receivedBlockMetadata{
  1489  			{
  1490  				id: fooID,
  1491  				block: blockMetadata{
  1492  					start: start, size: rawBlockLen, reattempt: blockMetadataReattempt{
  1493  						retryPeersMetadata: []receivedBlockMetadata{
  1494  							{block: blockMetadata{start: start, size: rawBlockLen, checksum: &blockChecksum}},
  1495  						},
  1496  					},
  1497  				},
  1498  			},
  1499  			{
  1500  				id: barID,
  1501  				block: blockMetadata{
  1502  					start: start, size: rawBlockLen, reattempt: blockMetadataReattempt{
  1503  						retryPeersMetadata: []receivedBlockMetadata{
  1504  							{block: blockMetadata{start: start, size: rawBlockLen, checksum: &blockChecksum}},
  1505  						},
  1506  					},
  1507  				},
  1508  			},
  1509  			{
  1510  				id: barID,
  1511  				block: blockMetadata{
  1512  					start: start.Add(blockSize), size: rawBlockLen, reattempt: blockMetadataReattempt{
  1513  						retryPeersMetadata: []receivedBlockMetadata{
  1514  							{block: blockMetadata{start: start.Add(blockSize), size: rawBlockLen, checksum: &blockChecksum}},
  1515  						},
  1516  					},
  1517  				},
  1518  			},
  1519  		}
  1520  	)
  1521  
  1522  	// Fail the call twice due to retry
  1523  	client.EXPECT().
  1524  		FetchBlocksRaw(gomock.Any(), gomock.Any()).
  1525  		Return(&rpc.FetchBlocksRawResult_{
  1526  			Elements: []*rpc.Blocks{
  1527  				// First foo block intact
  1528  				{ID: []byte("foo"), Blocks: []*rpc.Block{
  1529  					{Start: int64(start), Segments: &rpc.Segments{
  1530  						Merged: &rpc.Segment{
  1531  							Head: rawBlockData[:len(rawBlockData)-1],
  1532  							Tail: []byte{rawBlockData[len(rawBlockData)-1]},
  1533  						},
  1534  					}},
  1535  				}},
  1536  				// First bar block intact, second with error
  1537  				{ID: []byte("bar"), Blocks: []*rpc.Block{
  1538  					{Start: int64(start), Segments: &rpc.Segments{
  1539  						Merged: &rpc.Segment{
  1540  							Head: rawBlockData[:len(rawBlockData)-1],
  1541  							Tail: []byte{rawBlockData[len(rawBlockData)-1]},
  1542  						},
  1543  					}},
  1544  				}},
  1545  				{ID: []byte("bar"), Blocks: []*rpc.Block{
  1546  					{Start: int64(start.Add(blockSize)), Err: &rpc.Error{
  1547  						Type:    rpc.ErrorType_INTERNAL_ERROR,
  1548  						Message: "an error",
  1549  					}},
  1550  				}},
  1551  			},
  1552  		}, nil)
  1553  
  1554  	// Attempt stream blocks
  1555  	bopts := result.NewOptions()
  1556  	m := session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
  1557  	r := newBulkBlocksResult(namespace.Context{}, opts, bopts, session.pools.tagDecoder, session.pools.id)
  1558  	session.streamBlocksBatchFromPeer(testsNsMetadata(t), 0, peer, batch, bopts, r, enqueueCh, retrier, m)
  1559  
  1560  	// Assert result
  1561  	assertEnqueueChannel(t, batch[2:], enqueueCh)
  1562  
  1563  	// Assert length of blocks result
  1564  	assert.Equal(t, 2, r.result.AllSeries().Len())
  1565  	fooBlocks, ok := r.result.AllSeries().Get(fooID)
  1566  	require.True(t, ok)
  1567  	assert.Equal(t, 1, fooBlocks.Blocks.Len())
  1568  	barBlocks, ok := r.result.AllSeries().Get(barID)
  1569  	require.True(t, ok)
  1570  	assert.Equal(t, 1, barBlocks.Blocks.Len())
  1571  
  1572  	assert.NoError(t, session.Close())
  1573  }
  1574  
  1575  // TODO: add test TestStreamBlocksBatchFromPeerDoesNotRetryOnUnreachable
  1576  
  1577  // TODO: add test TestVerifyFetchedBlockSegmentsNil
  1578  
  1579  // TODO: add test TestVerifyFetchedBlockSegmentsNoMergedOrUnmerged
  1580  
  1581  func TestStreamBlocksBatchFromPeerVerifiesBlockChecksum(t *testing.T) {
  1582  	ctrl := gomock.NewController(t)
  1583  	defer ctrl.Finish()
  1584  
  1585  	opts := newSessionTestAdminOptions()
  1586  	s, err := newSession(opts)
  1587  	require.NoError(t, err)
  1588  	session := s.(*session)
  1589  	session.reattemptStreamBlocksFromPeersFn = func(
  1590  		blocks []receivedBlockMetadata,
  1591  		enqueueCh enqueueChannel,
  1592  		attemptErr error,
  1593  		_ reason,
  1594  		reattemptType reattemptType,
  1595  		_ *streamFromPeersMetrics,
  1596  	) error {
  1597  		enqueue, done, err := enqueueCh.enqueueDelayed(len(blocks))
  1598  		require.NoError(t, err)
  1599  		session.streamBlocksReattemptFromPeersEnqueue(blocks, attemptErr,
  1600  			reattemptType, enqueue, done)
  1601  		return nil
  1602  	}
  1603  
  1604  	mockHostQueues, mockClients := mockHostQueuesAndClientsForFetchBootstrapBlocks(ctrl, opts)
  1605  	session.newHostQueueFn = mockHostQueues.newHostQueueFn()
  1606  
  1607  	require.NoError(t, session.Open())
  1608  
  1609  	//nolint: durationcheck
  1610  	start := xtime.Now().Truncate(blockSize).Add(blockSize * -(24 - 1))
  1611  
  1612  	enc := m3tsz.NewEncoder(start, nil, true, encoding.NewOptions())
  1613  	require.NoError(t, enc.Encode(ts.Datapoint{
  1614  		TimestampNanos: start.Add(10 * time.Second),
  1615  		Value:          42,
  1616  	}, xtime.Second, nil))
  1617  
  1618  	ctx := context.NewBackground()
  1619  	defer ctx.Close()
  1620  
  1621  	reader, ok := enc.Stream(ctx)
  1622  	require.True(t, ok)
  1623  	segment, err := reader.Segment()
  1624  	require.NoError(t, err)
  1625  	rawBlockData, err := xio.ToBytes(reader)
  1626  	require.Equal(t, io.EOF, err)
  1627  	require.Equal(t, len(rawBlockData), segment.Len())
  1628  	rawBlockLen := int64(len(rawBlockData))
  1629  
  1630  	var (
  1631  		retrier = xretry.NewRetrier(xretry.NewOptions().
  1632  			SetMaxRetries(1).
  1633  			SetInitialBackoff(time.Millisecond))
  1634  		peerIdx       = len(mockHostQueues) - 1
  1635  		peer          = mockHostQueues[peerIdx]
  1636  		client        = mockClients[peerIdx]
  1637  		enqueueCh     = newEnqueueChannel(session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw))
  1638  		blockChecksum = digest.Checksum(rawBlockData)
  1639  		batch         = []receivedBlockMetadata{
  1640  			{
  1641  				id: fooID,
  1642  				block: blockMetadata{
  1643  					start: start, size: rawBlockLen, reattempt: blockMetadataReattempt{
  1644  						retryPeersMetadata: []receivedBlockMetadata{
  1645  							{block: blockMetadata{start: start, size: rawBlockLen, checksum: &blockChecksum}},
  1646  						},
  1647  					},
  1648  				},
  1649  			},
  1650  			{
  1651  				id: barID,
  1652  				block: blockMetadata{
  1653  					start: start, size: rawBlockLen, reattempt: blockMetadataReattempt{
  1654  						retryPeersMetadata: []receivedBlockMetadata{
  1655  							{block: blockMetadata{start: start, size: rawBlockLen, checksum: &blockChecksum}},
  1656  						},
  1657  					},
  1658  				},
  1659  			},
  1660  			{
  1661  				id: barID,
  1662  				block: blockMetadata{
  1663  					start: start.Add(blockSize), size: rawBlockLen, reattempt: blockMetadataReattempt{
  1664  						retryPeersMetadata: []receivedBlockMetadata{
  1665  							{block: blockMetadata{start: start.Add(blockSize), size: rawBlockLen, checksum: &blockChecksum}},
  1666  						},
  1667  					},
  1668  				},
  1669  			},
  1670  		}
  1671  	)
  1672  
  1673  	head := rawBlockData[:len(rawBlockData)-1]
  1674  	tail := []byte{rawBlockData[len(rawBlockData)-1]}
  1675  	d := digest.NewDigest().Update(head).Update(tail).Sum32()
  1676  	validChecksum := int64(d)
  1677  	invalidChecksum := 1 + validChecksum
  1678  
  1679  	client.EXPECT().
  1680  		FetchBlocksRaw(gomock.Any(), gomock.Any()).
  1681  		Return(&rpc.FetchBlocksRawResult_{
  1682  			Elements: []*rpc.Blocks{
  1683  				// valid foo block
  1684  				{ID: []byte("foo"), Blocks: []*rpc.Block{
  1685  					{Start: int64(start), Checksum: &validChecksum, Segments: &rpc.Segments{
  1686  						Merged: &rpc.Segment{
  1687  							Head: head,
  1688  							Tail: tail,
  1689  						},
  1690  					}},
  1691  				}},
  1692  				{ID: []byte("bar"), Blocks: []*rpc.Block{
  1693  					// invalid bar block
  1694  					{Start: int64(start), Checksum: &invalidChecksum, Segments: &rpc.Segments{
  1695  						Merged: &rpc.Segment{
  1696  							Head: head,
  1697  							Tail: tail,
  1698  						},
  1699  					}},
  1700  				}},
  1701  				{ID: []byte("bar"), Blocks: []*rpc.Block{
  1702  					// valid bar block, no checksum
  1703  					{Start: int64(start.Add(blockSize)), Segments: &rpc.Segments{
  1704  						Merged: &rpc.Segment{
  1705  							Head: head,
  1706  							Tail: tail,
  1707  						},
  1708  					}},
  1709  				}},
  1710  			},
  1711  		}, nil)
  1712  
  1713  	// Attempt stream blocks
  1714  	bopts := result.NewOptions()
  1715  	m := session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw)
  1716  	r := newBulkBlocksResult(namespace.Context{}, opts, bopts, session.pools.tagDecoder, session.pools.id)
  1717  	session.streamBlocksBatchFromPeer(testsNsMetadata(t), 0, peer, batch, bopts, r, enqueueCh, retrier, m)
  1718  
  1719  	// Assert enqueueChannel contents (bad bar block)
  1720  	assertEnqueueChannel(t, batch[1:2], enqueueCh)
  1721  
  1722  	// Assert length of blocks result
  1723  	assert.Equal(t, 2, r.result.AllSeries().Len())
  1724  
  1725  	fooBlocks, ok := r.result.AllSeries().Get(fooID)
  1726  	require.True(t, ok)
  1727  	assert.Equal(t, 1, fooBlocks.Blocks.Len())
  1728  	_, ok = fooBlocks.Blocks.BlockAt(start)
  1729  	assert.True(t, ok)
  1730  
  1731  	barBlocks, ok := r.result.AllSeries().Get(barID)
  1732  	require.True(t, ok)
  1733  	assert.Equal(t, 1, barBlocks.Blocks.Len())
  1734  	_, ok = barBlocks.Blocks.BlockAt(start.Add(blockSize))
  1735  	assert.True(t, ok)
  1736  
  1737  	assert.NoError(t, session.Close())
  1738  }
  1739  
  1740  func TestBlocksResultAddBlockFromPeerReadMerged(t *testing.T) {
  1741  	opts := newSessionTestAdminOptions()
  1742  	bopts := newResultTestOptions()
  1743  	start := xtime.Now().Truncate(time.Hour)
  1744  
  1745  	blockSize := time.Minute
  1746  	bs := int64(blockSize)
  1747  	rpcBlockSize := &bs
  1748  
  1749  	bl := &rpc.Block{
  1750  		Start: int64(start),
  1751  		Segments: &rpc.Segments{Merged: &rpc.Segment{
  1752  			Head:      []byte{1, 2},
  1753  			Tail:      []byte{3},
  1754  			BlockSize: rpcBlockSize,
  1755  		}},
  1756  	}
  1757  
  1758  	r := newBulkBlocksResult(namespace.Context{}, opts, bopts,
  1759  		testTagDecodingPool, testIDPool)
  1760  	r.addBlockFromPeer(fooID, fooTags, testHost, bl)
  1761  
  1762  	series := r.result.AllSeries()
  1763  	assert.Equal(t, 1, series.Len())
  1764  
  1765  	sl, ok := series.Get(fooID)
  1766  	assert.True(t, ok)
  1767  	blocks := sl.Blocks
  1768  	assert.Equal(t, 1, blocks.Len())
  1769  	result, ok := blocks.BlockAt(start)
  1770  	assert.True(t, ok)
  1771  
  1772  	ctx := context.NewBackground()
  1773  	defer ctx.Close()
  1774  
  1775  	stream, err := result.Stream(ctx)
  1776  	require.NoError(t, err)
  1777  	require.NotNil(t, stream)
  1778  
  1779  	// block reader has correct start time and block size
  1780  	assert.Equal(t, start, stream.Start)
  1781  	assert.Equal(t, blockSize, stream.BlockSize)
  1782  
  1783  	seg, err := stream.Segment()
  1784  	require.NoError(t, err)
  1785  
  1786  	// Assert block has data
  1787  	data, err := xio.ToBytes(xio.NewSegmentReader(seg))
  1788  	require.Equal(t, io.EOF, err)
  1789  	assert.Equal(t, []byte{1, 2, 3}, data)
  1790  }
  1791  
  1792  func TestBlocksResultAddBlockFromPeerReadUnmerged(t *testing.T) {
  1793  	var wrapEncoderFn func(enc encoding.Encoder) encoding.Encoder
  1794  	eops := encoding.NewOptions()
  1795  	intopt := true
  1796  
  1797  	encoderPool := encoding.NewEncoderPool(nil)
  1798  	encoderPool.Init(func() encoding.Encoder {
  1799  		enc := m3tsz.NewEncoder(0, nil, intopt, eops)
  1800  		if wrapEncoderFn != nil {
  1801  			enc = wrapEncoderFn(enc)
  1802  		}
  1803  		return enc
  1804  	})
  1805  
  1806  	opts := newSessionTestAdminOptions()
  1807  	bopts := result.NewOptions()
  1808  	bopts = bopts.SetDatabaseBlockOptions(bopts.DatabaseBlockOptions().
  1809  		SetEncoderPool(encoderPool).
  1810  		SetMultiReaderIteratorPool(newSessionTestMultiReaderIteratorPool()))
  1811  
  1812  	start := xtime.Now()
  1813  
  1814  	vals0 := []testValue{
  1815  		{1.0, start, xtime.Second, []byte{1, 2, 3}},
  1816  		{4.0, start.Add(3 * time.Second), xtime.Second, nil},
  1817  	}
  1818  
  1819  	vals1 := []testValue{
  1820  		{2.0, start.Add(1 * time.Second), xtime.Second, []byte{4, 5, 6}},
  1821  	}
  1822  
  1823  	vals2 := []testValue{
  1824  		{3.0, start.Add(2 * time.Second), xtime.Second, []byte{7, 8, 9}},
  1825  	}
  1826  
  1827  	var all []testValue
  1828  	bl := &rpc.Block{
  1829  		Start:    int64(start),
  1830  		Segments: &rpc.Segments{},
  1831  	}
  1832  	for _, vals := range [][]testValue{vals0, vals1, vals2} {
  1833  		nsCtx := namespace.NewContextFor(ident.StringID("default"), opts.SchemaRegistry())
  1834  		encoder := encoderPool.Get()
  1835  		encoder.Reset(start, 0, nsCtx.Schema)
  1836  		for _, val := range vals {
  1837  			dp := ts.Datapoint{TimestampNanos: val.t, Value: val.value}
  1838  			assert.NoError(t, encoder.Encode(dp, val.unit, val.annotation))
  1839  			all = append(all, val)
  1840  		}
  1841  		result := encoder.Discard()
  1842  		seg := &rpc.Segment{Head: result.Head.Bytes(), Tail: result.Tail.Bytes()}
  1843  		bl.Segments.Unmerged = append(bl.Segments.Unmerged, seg)
  1844  	}
  1845  
  1846  	r := newBulkBlocksResult(namespace.Context{}, opts, bopts, testTagDecodingPool, testIDPool)
  1847  	r.addBlockFromPeer(fooID, fooTags, testHost, bl)
  1848  
  1849  	series := r.result.AllSeries()
  1850  	assert.Equal(t, 1, series.Len())
  1851  
  1852  	sl, ok := series.Get(fooID)
  1853  	assert.True(t, ok)
  1854  	blocks := sl.Blocks
  1855  	assert.Equal(t, 1, blocks.Len())
  1856  	result, ok := blocks.BlockAt(start)
  1857  	assert.True(t, ok)
  1858  
  1859  	ctx := context.NewBackground()
  1860  	defer ctx.Close()
  1861  
  1862  	stream, err := result.Stream(ctx)
  1863  	assert.NoError(t, err)
  1864  
  1865  	// Sort the test values
  1866  	sort.Sort(testValuesByTime(all))
  1867  
  1868  	// Assert test values sorted match the block values
  1869  	iter := m3tsz.NewReaderIterator(stream, intopt, eops)
  1870  	defer iter.Close()
  1871  	asserted := 0
  1872  	for iter.Next() {
  1873  		idx := asserted
  1874  		dp, unit, annotation := iter.Current()
  1875  		assert.Equal(t, all[idx].value, dp.Value)
  1876  		assert.Equal(t, all[idx].t, dp.TimestampNanos)
  1877  		assert.Equal(t, all[idx].unit, unit)
  1878  		assert.Equal(t, all[idx].annotation, []byte(annotation))
  1879  		asserted++
  1880  	}
  1881  	assert.Equal(t, len(all), asserted)
  1882  	assert.NoError(t, iter.Err())
  1883  }
  1884  
  1885  // TODO: add test TestBlocksResultAddBlockFromPeerMergeExistingResult
  1886  
  1887  func TestBlocksResultAddBlockFromPeerErrorOnNoSegments(t *testing.T) {
  1888  	opts := newSessionTestAdminOptions()
  1889  	bopts := result.NewOptions()
  1890  	r := newBulkBlocksResult(namespace.Context{}, opts, bopts, testTagDecodingPool, testIDPool)
  1891  
  1892  	bl := &rpc.Block{Start: time.Now().UnixNano()}
  1893  	err := r.addBlockFromPeer(fooID, fooTags, testHost, bl)
  1894  	assert.Error(t, err)
  1895  	assert.Equal(t, errSessionBadBlockResultFromPeer, err)
  1896  }
  1897  
  1898  func TestBlocksResultAddBlockFromPeerErrorOnNoSegmentsData(t *testing.T) {
  1899  	opts := newSessionTestAdminOptions()
  1900  	bopts := result.NewOptions()
  1901  	r := newBulkBlocksResult(namespace.Context{}, opts, bopts, testTagDecodingPool, testIDPool)
  1902  
  1903  	bl := &rpc.Block{Start: time.Now().UnixNano(), Segments: &rpc.Segments{}}
  1904  	err := r.addBlockFromPeer(fooID, fooTags, testHost, bl)
  1905  	assert.Error(t, err)
  1906  	assert.Equal(t, errSessionBadBlockResultFromPeer, err)
  1907  }
  1908  
  1909  func TestEnqueueChannelEnqueueDelayed(t *testing.T) {
  1910  	ctrl := gomock.NewController(t)
  1911  	defer ctrl.Finish()
  1912  
  1913  	opts := newSessionTestAdminOptions()
  1914  	s, err := newSession(opts)
  1915  	assert.NoError(t, err)
  1916  	session := s.(*session)
  1917  	enqueueCh := newEnqueueChannel(session.newPeerMetadataStreamingProgressMetrics(0, resultTypeRaw))
  1918  
  1919  	// Enqueue multiple blocks metadata
  1920  	numBlocks := 10
  1921  	blocks := make([][]receivedBlockMetadata, numBlocks)
  1922  	enqueueFn, enqueueDelayedDone, err := enqueueCh.enqueueDelayed(len(blocks))
  1923  	require.NoError(t, err)
  1924  
  1925  	require.Equal(t, numBlocks, enqueueCh.unprocessedLen())
  1926  	enqueueChInputs := enqueueCh.read()
  1927  	require.Equal(t, 0, len(enqueueChInputs))
  1928  
  1929  	// Actually enqueue the blocks
  1930  	for i := 0; i < numBlocks; i++ {
  1931  		enqueueFn(blocks[i])
  1932  	}
  1933  	enqueueDelayedDone()
  1934  
  1935  	require.Equal(t, numBlocks, enqueueCh.unprocessedLen())
  1936  	enqueueChInputs = enqueueCh.read()
  1937  	require.Equal(t, numBlocks, len(enqueueChInputs))
  1938  
  1939  	// Process the blocks
  1940  	require.NoError(t, err)
  1941  	for i := 0; i < numBlocks; i++ {
  1942  		<-enqueueChInputs
  1943  		enqueueCh.trackProcessed(1)
  1944  	}
  1945  
  1946  	require.Equal(t, 0, enqueueCh.unprocessedLen())
  1947  	enqueueChInputs = enqueueCh.read()
  1948  	require.Equal(t, 0, len(enqueueChInputs))
  1949  }
  1950  
  1951  func mockPeerBlocksQueues(peers []peer, opts AdminOptions) peerBlocksQueues {
  1952  	var (
  1953  		peerQueues peerBlocksQueues
  1954  		workers    = xsync.NewWorkerPool(opts.FetchSeriesBlocksBatchConcurrency())
  1955  	)
  1956  	for _, peer := range peers {
  1957  		size := opts.FetchSeriesBlocksBatchSize()
  1958  		drainEvery := 100 * time.Millisecond
  1959  		queue := newPeerBlocksQueue(peer, size, drainEvery, workers, func(batch []receivedBlockMetadata) {
  1960  			// No-op
  1961  		})
  1962  		peerQueues = append(peerQueues, queue)
  1963  	}
  1964  	return peerQueues
  1965  }
  1966  
  1967  func preparedMockPeers(peers ...*Mockpeer) []peer {
  1968  	var result []peer
  1969  	for i, peer := range peers {
  1970  		id := fmt.Sprintf("mockpeer%d", i)
  1971  		addr := fmt.Sprintf("%s:9000", id)
  1972  		peer.EXPECT().Host().Return(topology.NewHost(id, addr)).AnyTimes()
  1973  		result = append(result, peer)
  1974  	}
  1975  	return result
  1976  }
  1977  
  1978  type MockHostQueues []*MockhostQueue
  1979  
  1980  func (qs MockHostQueues) newHostQueueFn() newHostQueueFn {
  1981  	idx := uint64(0)
  1982  	return func(
  1983  		host topology.Host,
  1984  		opts hostQueueOpts,
  1985  	) (hostQueue, error) {
  1986  		return qs[atomic.AddUint64(&idx, 1)-1], nil
  1987  	}
  1988  }
  1989  
  1990  type MockTChanNodes []*rpc.MockTChanNode
  1991  
  1992  func (c MockTChanNodes) expectFetchMetadataAndReturn(
  1993  	result []testBlocksMetadata,
  1994  	opts AdminOptions,
  1995  ) {
  1996  	for _, client := range c {
  1997  		expectFetchMetadataAndReturn(client, result, opts)
  1998  	}
  1999  }
  2000  
  2001  func mockHostQueuesAndClientsForFetchBootstrapBlocks(
  2002  	ctrl *gomock.Controller,
  2003  	opts AdminOptions,
  2004  ) (MockHostQueues, MockTChanNodes) {
  2005  	var (
  2006  		hostQueues MockHostQueues
  2007  		clients    MockTChanNodes
  2008  	)
  2009  	hostShardSets := sessionTestHostAndShards(sessionTestShardSet())
  2010  	for i := 0; i < len(hostShardSets); i++ {
  2011  		host := hostShardSets[i].Host()
  2012  		hostQueue, client := defaultHostAndClientWithExpect(ctrl, host, opts)
  2013  		hostQueues = append(hostQueues, hostQueue)
  2014  		clients = append(clients, client)
  2015  	}
  2016  	return hostQueues, clients
  2017  }
  2018  
  2019  func defaultHostAndClientWithExpect(
  2020  	ctrl *gomock.Controller,
  2021  	host topology.Host,
  2022  	opts AdminOptions,
  2023  ) (*MockhostQueue, *rpc.MockTChanNode) {
  2024  	client := rpc.NewMockTChanNode(ctrl)
  2025  	connectionPool := NewMockconnectionPool(ctrl)
  2026  	connectionPool.EXPECT().NextClient().Return(client, &noopPooledChannel{}, nil).AnyTimes()
  2027  
  2028  	hostQueue := NewMockhostQueue(ctrl)
  2029  	hostQueue.EXPECT().Open()
  2030  	hostQueue.EXPECT().Host().Return(host).AnyTimes()
  2031  	hostQueue.EXPECT().ConnectionCount().Return(opts.MinConnectionCount()).Times(sessionTestShards)
  2032  	hostQueue.EXPECT().ConnectionPool().Return(connectionPool).AnyTimes()
  2033  	hostQueue.EXPECT().BorrowConnection(gomock.Any()).Do(func(fn WithConnectionFn) {
  2034  		fn(client, &noopPooledChannel{})
  2035  	}).Return(nil).AnyTimes()
  2036  	hostQueue.EXPECT().Close()
  2037  
  2038  	return hostQueue, client
  2039  }
  2040  
  2041  func resultMetadataFromBlocks(
  2042  	blocks []testBlocks,
  2043  ) []testBlocksMetadata {
  2044  	var result []testBlocksMetadata
  2045  	for _, b := range blocks {
  2046  		bm := []testBlockMetadata{}
  2047  		for _, bl := range b.blocks {
  2048  			size := int64(0)
  2049  			d := digest.NewDigest()
  2050  			if merged := bl.segments.merged; merged != nil {
  2051  				size += int64(len(merged.head) + len(merged.tail))
  2052  				d = d.Update(merged.head).Update(merged.tail)
  2053  			}
  2054  			for _, unmerged := range bl.segments.unmerged {
  2055  				size += int64(len(unmerged.head) + len(unmerged.tail))
  2056  				d = d.Update(unmerged.head).Update(unmerged.tail)
  2057  			}
  2058  			checksum := d.Sum32()
  2059  			m := testBlockMetadata{
  2060  				start:    bl.start,
  2061  				size:     &size,
  2062  				checksum: &checksum,
  2063  			}
  2064  			bm = append(bm, m)
  2065  		}
  2066  		m := testBlocksMetadata{
  2067  			id:     b.id,
  2068  			blocks: bm,
  2069  		}
  2070  		result = append(result, m)
  2071  	}
  2072  	return result
  2073  }
  2074  
  2075  func expectedRepairFetchRequestsAndResponses(
  2076  	blocks []testBlocks,
  2077  	batchSize int,
  2078  ) ([]fetchBlocksReq, [][]testBlocks) {
  2079  	requests := make([]fetchBlocksReq, 0, len(blocks))
  2080  	responses := make([][]testBlocks, 0, len(blocks))
  2081  	request := fetchBlocksReq{
  2082  		params: []fetchBlocksReqParam{},
  2083  	}
  2084  	response := []testBlocks{}
  2085  	for idx := 0; idx < len(blocks); idx++ {
  2086  		starts := make([]xtime.UnixNano, 0, len(blocks[idx].blocks))
  2087  		for j := 0; j < len(blocks[idx].blocks); j++ {
  2088  			starts = append(starts, blocks[idx].blocks[j].start)
  2089  		}
  2090  		if idx != 0 && (idx%batchSize) == 0 {
  2091  			requests = append(requests, request)
  2092  			responses = append(responses, response)
  2093  			request = fetchBlocksReq{
  2094  				params: []fetchBlocksReqParam{},
  2095  			}
  2096  			response = []testBlocks{}
  2097  		}
  2098  		request.params = append(request.params,
  2099  			fetchBlocksReqParam{
  2100  				id:     blocks[idx].id,
  2101  				starts: starts,
  2102  			})
  2103  		response = append(response, blocks[idx])
  2104  	}
  2105  	if len(response) > 0 {
  2106  		responses = append(responses, response)
  2107  		requests = append(requests, request)
  2108  	}
  2109  	return requests, responses
  2110  }
  2111  
  2112  func expectedReqsAndResultFromBlocks(
  2113  	t *testing.T,
  2114  	blocks []testBlocks,
  2115  	batchSize int,
  2116  	clientsParticipatingLen int,
  2117  	selectClientForBlockFn func(id ident.ID, blockIndex int) (clientIndex int),
  2118  ) ([][]fetchBlocksReq, [][][]testBlocks) {
  2119  	var (
  2120  		clientsExpectReqs   [][]fetchBlocksReq
  2121  		clientsBlocksResult [][][]testBlocks
  2122  		blockIdx            = 0
  2123  	)
  2124  	for i := 0; i < clientsParticipatingLen; i++ {
  2125  		clientsExpectReqs = append(clientsExpectReqs, []fetchBlocksReq{})
  2126  		clientsBlocksResult = append(clientsBlocksResult, [][]testBlocks{})
  2127  	}
  2128  
  2129  	// Round robin the blocks to clients to simulate our load balancing
  2130  	for len(blocks) > 0 {
  2131  		currBlock := blocks[0]
  2132  
  2133  		clientIdx := selectClientForBlockFn(currBlock.id, blockIdx)
  2134  		if clientIdx >= clientsParticipatingLen {
  2135  			msg := "client selected for block (%d) " +
  2136  				"is greater than clients partipating (%d)"
  2137  			require.FailNow(t, fmt.Sprintf(msg, blockIdx, clientIdx))
  2138  		}
  2139  
  2140  		expectReqs := clientsExpectReqs[clientIdx]
  2141  		blocksResult := clientsBlocksResult[clientIdx]
  2142  
  2143  		// Extend if batch is full
  2144  		if len(expectReqs) == 0 ||
  2145  			len(expectReqs[len(expectReqs)-1].params) == batchSize {
  2146  			clientsExpectReqs[clientIdx] =
  2147  				append(clientsExpectReqs[clientIdx], fetchBlocksReq{})
  2148  			expectReqs = clientsExpectReqs[clientIdx]
  2149  			clientsBlocksResult[clientIdx] =
  2150  				append(clientsBlocksResult[clientIdx], []testBlocks{})
  2151  			blocksResult = clientsBlocksResult[clientIdx]
  2152  		}
  2153  
  2154  		req := &expectReqs[len(expectReqs)-1]
  2155  
  2156  		starts := []xtime.UnixNano{}
  2157  		for i := 0; i < len(currBlock.blocks); i++ {
  2158  			starts = append(starts, currBlock.blocks[i].start)
  2159  		}
  2160  		param := fetchBlocksReqParam{
  2161  			id:     currBlock.id,
  2162  			starts: starts,
  2163  		}
  2164  		req.params = append(req.params, param)
  2165  		blocksResult[len(blocksResult)-1] = append(blocksResult[len(blocksResult)-1], currBlock)
  2166  
  2167  		clientsBlocksResult[clientIdx] = blocksResult
  2168  
  2169  		blocks = blocks[1:]
  2170  		blockIdx++
  2171  	}
  2172  
  2173  	return clientsExpectReqs, clientsBlocksResult
  2174  }
  2175  
  2176  func expectFetchMetadataAndReturn(
  2177  	client *rpc.MockTChanNode,
  2178  	result []testBlocksMetadata,
  2179  	opts AdminOptions,
  2180  ) {
  2181  	batchSize := opts.FetchSeriesBlocksBatchSize()
  2182  	totalCalls := int(math.Ceil(float64(len(result)) / float64(batchSize)))
  2183  	includeSizes := true
  2184  
  2185  	var calls []*gomock.Call
  2186  	for i := 0; i < totalCalls; i++ {
  2187  		var (
  2188  			ret      = &rpc.FetchBlocksMetadataRawV2Result_{}
  2189  			beginIdx = i * batchSize
  2190  		)
  2191  		for j := beginIdx; j < len(result) && j < beginIdx+batchSize; j++ {
  2192  			id := result[j].id.Bytes()
  2193  			for k := 0; k < len(result[j].blocks); k++ {
  2194  				bl := &rpc.BlockMetadataV2{}
  2195  				bl.ID = id
  2196  				bl.Start = int64(result[j].blocks[k].start)
  2197  				bl.Size = result[j].blocks[k].size
  2198  				if result[j].blocks[k].checksum != nil {
  2199  					checksum := int64(*result[j].blocks[k].checksum)
  2200  					bl.Checksum = &checksum
  2201  				}
  2202  				ret.Elements = append(ret.Elements, bl)
  2203  			}
  2204  		}
  2205  		if i != totalCalls-1 {
  2206  			// Include next page token if not last page
  2207  			ret.NextPageToken = []byte(fmt.Sprintf("token_%d", i+1))
  2208  		}
  2209  
  2210  		matcher := &fetchMetadataReqMatcher{
  2211  			shard:        0,
  2212  			limit:        int64(batchSize),
  2213  			includeSizes: &includeSizes,
  2214  			isV2:         true,
  2215  		}
  2216  		if i != 0 {
  2217  			matcher.pageToken = []byte(fmt.Sprintf("token_%d", i))
  2218  		}
  2219  
  2220  		call := client.EXPECT().FetchBlocksMetadataRawV2(gomock.Any(), matcher).Return(ret, nil)
  2221  		calls = append(calls, call)
  2222  	}
  2223  
  2224  	gomock.InOrder(calls...)
  2225  }
  2226  
  2227  type fetchMetadataReqMatcher struct {
  2228  	shard        int32
  2229  	limit        int64
  2230  	pageToken    []byte
  2231  	includeSizes *bool
  2232  	isV2         bool
  2233  }
  2234  
  2235  func (m *fetchMetadataReqMatcher) Matches(x interface{}) bool {
  2236  	req, ok := x.(*rpc.FetchBlocksMetadataRawV2Request)
  2237  	if !ok {
  2238  		return false
  2239  	}
  2240  
  2241  	if m.shard != req.Shard {
  2242  		return false
  2243  	}
  2244  
  2245  	if m.limit != req.Limit {
  2246  		return false
  2247  	}
  2248  
  2249  	if m.pageToken == nil {
  2250  		if req.PageToken != nil {
  2251  			return false
  2252  		}
  2253  	} else {
  2254  		if req.PageToken == nil {
  2255  			return false
  2256  		}
  2257  		if !bytes.Equal(req.PageToken, m.pageToken) {
  2258  			return false
  2259  		}
  2260  	}
  2261  
  2262  	if m.includeSizes == nil {
  2263  		if req.IncludeSizes != nil {
  2264  			return false
  2265  		}
  2266  	} else {
  2267  		if req.IncludeSizes == nil {
  2268  			return false
  2269  		}
  2270  		if *req.IncludeSizes != *m.includeSizes {
  2271  			return false
  2272  		}
  2273  	}
  2274  
  2275  	return true
  2276  }
  2277  
  2278  func (m *fetchMetadataReqMatcher) String() string {
  2279  	return "fetchMetadataReqMatcher"
  2280  }
  2281  
  2282  func expectFetchBlocksAndReturn(
  2283  	client *rpc.MockTChanNode,
  2284  	expect []fetchBlocksReq,
  2285  	result [][]testBlocks,
  2286  ) {
  2287  	for i := 0; i < len(expect); i++ {
  2288  		matcher := &fetchBlocksReqMatcher{req: expect[i]}
  2289  		ret := &rpc.FetchBlocksRawResult_{}
  2290  		for _, res := range result[i] {
  2291  			blocks := &rpc.Blocks{}
  2292  			blocks.ID = res.id.Bytes()
  2293  			for j := range res.blocks {
  2294  				bl := &rpc.Block{}
  2295  				bl.Start = int64(res.blocks[j].start)
  2296  				if res.blocks[j].segments != nil {
  2297  					segs := &rpc.Segments{}
  2298  					if res.blocks[j].segments.merged != nil {
  2299  						segs.Merged = &rpc.Segment{
  2300  							Head: res.blocks[j].segments.merged.head,
  2301  							Tail: res.blocks[j].segments.merged.tail,
  2302  						}
  2303  					}
  2304  					for k := range res.blocks[j].segments.unmerged {
  2305  						segs.Unmerged = append(segs.Unmerged, &rpc.Segment{
  2306  							Head: res.blocks[j].segments.unmerged[k].head,
  2307  							Tail: res.blocks[j].segments.unmerged[k].tail,
  2308  						})
  2309  					}
  2310  					bl.Segments = segs
  2311  				}
  2312  				if res.blocks[j].err != nil {
  2313  					bl.Err = &rpc.Error{}
  2314  					bl.Err.Type = res.blocks[j].err.errorType
  2315  					bl.Err.Message = res.blocks[j].err.message
  2316  				}
  2317  				blocks.Blocks = append(blocks.Blocks, bl)
  2318  			}
  2319  			ret.Elements = append(ret.Elements, blocks)
  2320  		}
  2321  
  2322  		client.EXPECT().FetchBlocksRaw(gomock.Any(), matcher).
  2323  			Do(func(_ interface{}, req *rpc.FetchBlocksRawRequest) {
  2324  				// The order of the elements in the request is non-deterministic (due to
  2325  				// concurrency) so inspect the request and re-order the response to
  2326  				// match by trying to match up values (their may be duplicate entries
  2327  				// for a given series ID so comparing IDs is not sufficient).
  2328  				retElements := make([]*rpc.Blocks, 0, len(ret.Elements))
  2329  				for _, elem := range req.Elements {
  2330  				inner:
  2331  					for _, retElem := range ret.Elements {
  2332  						if !bytes.Equal(elem.ID, retElem.ID) {
  2333  							continue
  2334  						}
  2335  						if len(elem.Starts) != len(retElem.Blocks) {
  2336  							continue
  2337  						}
  2338  
  2339  						for i, start := range elem.Starts {
  2340  							block := retElem.Blocks[i]
  2341  							if start != block.Start {
  2342  								continue inner
  2343  							}
  2344  						}
  2345  
  2346  						retElements = append(retElements, retElem)
  2347  					}
  2348  				}
  2349  				ret.Elements = retElements
  2350  			}).Return(ret, nil)
  2351  	}
  2352  }
  2353  
  2354  type fetchBlocksReqMatcher struct {
  2355  	req fetchBlocksReq
  2356  }
  2357  
  2358  func (m *fetchBlocksReqMatcher) Matches(x interface{}) bool {
  2359  	req, ok := x.(*rpc.FetchBlocksRawRequest)
  2360  	if !ok {
  2361  		return false
  2362  	}
  2363  
  2364  	params := m.req.params
  2365  	if len(params) != len(req.Elements) {
  2366  		return false
  2367  	}
  2368  
  2369  	for i := range params {
  2370  		// Possible for slices to be in different orders so try and match
  2371  		// them up intelligently.
  2372  		var found = false
  2373  	inner:
  2374  		for reqIdx, element := range req.Elements {
  2375  			reqID := ident.BinaryID(checked.NewBytes(element.ID, nil))
  2376  			if !params[i].id.Equal(reqID) {
  2377  				continue
  2378  			}
  2379  
  2380  			if len(params[i].starts) != len(req.Elements[reqIdx].Starts) {
  2381  				continue
  2382  			}
  2383  			for j := range params[i].starts {
  2384  				if int64(params[i].starts[j]) != req.Elements[reqIdx].Starts[j] {
  2385  					continue inner
  2386  				}
  2387  			}
  2388  
  2389  			found = true
  2390  		}
  2391  
  2392  		if !found {
  2393  			return false
  2394  		}
  2395  	}
  2396  
  2397  	return true
  2398  }
  2399  
  2400  func (m *fetchBlocksReqMatcher) String() string {
  2401  	return "fetchBlocksReqMatcher"
  2402  }
  2403  
  2404  type fetchBlocksReq struct {
  2405  	params []fetchBlocksReqParam
  2406  }
  2407  
  2408  type fetchBlocksReqParam struct {
  2409  	id     ident.ID
  2410  	starts []xtime.UnixNano
  2411  }
  2412  
  2413  type testBlocksMetadata struct {
  2414  	id     ident.ID
  2415  	blocks []testBlockMetadata
  2416  }
  2417  
  2418  type testBlockMetadata struct {
  2419  	start    xtime.UnixNano
  2420  	size     *int64
  2421  	checksum *uint32
  2422  }
  2423  
  2424  type testBlocks struct {
  2425  	id     ident.ID
  2426  	blocks []testBlock
  2427  }
  2428  
  2429  type testBlock struct {
  2430  	start    xtime.UnixNano
  2431  	segments *testBlockSegments
  2432  	err      *testBlockError
  2433  }
  2434  
  2435  type testBlockError struct {
  2436  	errorType rpc.ErrorType
  2437  	message   string
  2438  }
  2439  
  2440  type testBlockSegments struct {
  2441  	merged   *testBlockSegment
  2442  	unmerged []*testBlockSegment
  2443  }
  2444  
  2445  type testBlockSegment struct {
  2446  	head []byte
  2447  	tail []byte
  2448  }
  2449  
  2450  func assertFetchBootstrapBlocksResult(
  2451  	t *testing.T,
  2452  	expected []testBlocks,
  2453  	actual result.ShardResult,
  2454  ) {
  2455  	ctx := context.NewBackground()
  2456  	defer ctx.Close()
  2457  
  2458  	series := actual.AllSeries()
  2459  	require.Equal(t, len(expected), series.Len())
  2460  
  2461  	for i := range expected {
  2462  		id := expected[i].id
  2463  		entry, ok := series.Get(id)
  2464  		if !ok {
  2465  			require.Fail(t, fmt.Sprintf("blocks for series '%s' not present", id.String()))
  2466  			continue
  2467  		}
  2468  
  2469  		expectedLen := 0
  2470  		for _, block := range expected[i].blocks {
  2471  			if block.err != nil {
  2472  				continue
  2473  			}
  2474  			expectedLen++
  2475  		}
  2476  		require.Equal(t, expectedLen, entry.Blocks.Len())
  2477  
  2478  		for _, block := range expected[i].blocks {
  2479  			actualBlock, ok := entry.Blocks.BlockAt(block.start)
  2480  			if !ok {
  2481  				require.Fail(t, fmt.Sprintf("block for series '%s' start %v not present", id.String(), block.start))
  2482  				continue
  2483  			}
  2484  
  2485  			if block.segments.merged != nil {
  2486  				expectedData := append(block.segments.merged.head, block.segments.merged.tail...)
  2487  				stream, err := actualBlock.Stream(ctx)
  2488  				require.NoError(t, err)
  2489  				seg, err := stream.Segment()
  2490  				require.NoError(t, err)
  2491  				actualData := append(bytesFor(seg.Head), bytesFor(seg.Tail)...)
  2492  				require.Equal(t, expectedData, actualData)
  2493  			} else if block.segments.unmerged != nil {
  2494  				require.Fail(t, "unmerged comparison not supported")
  2495  			}
  2496  		}
  2497  	}
  2498  }
  2499  
  2500  func bytesFor(data checked.Bytes) []byte {
  2501  	if data == nil {
  2502  		return nil
  2503  	}
  2504  	return data.Bytes()
  2505  }
  2506  
  2507  func assertEnqueueChannel(
  2508  	t *testing.T,
  2509  	expected []receivedBlockMetadata,
  2510  	ch enqueueChannel,
  2511  ) {
  2512  	enqueueCh, ok := ch.(*enqueueCh)
  2513  	require.True(t, ok)
  2514  
  2515  	var distinct []receivedBlockMetadata
  2516  	for {
  2517  		var perPeerBlocksMetadata []receivedBlockMetadata
  2518  		enqueueChInputs := enqueueCh.read()
  2519  
  2520  		select {
  2521  		case perPeerBlocksMetadata = <-enqueueChInputs:
  2522  		default:
  2523  		}
  2524  		if perPeerBlocksMetadata == nil {
  2525  			break
  2526  		}
  2527  
  2528  		elem := perPeerBlocksMetadata[0]
  2529  		distinct = append(distinct, elem)
  2530  	}
  2531  
  2532  	require.Equal(t, len(expected), len(distinct))
  2533  	matched := make([]bool, len(expected))
  2534  	for i, expected := range expected {
  2535  		for _, actual := range distinct {
  2536  			found := expected.id.Equal(actual.id) &&
  2537  				expected.block.start.Equal(actual.block.start) &&
  2538  				expected.block.size == actual.block.size
  2539  			if found {
  2540  				matched[i] = true
  2541  				continue
  2542  			}
  2543  		}
  2544  	}
  2545  	for _, m := range matched {
  2546  		assert.True(t, m)
  2547  	}
  2548  
  2549  	close(enqueueCh.peersMetadataCh)
  2550  }