github.com/m3db/m3@v1.5.0/src/dbnode/storage/series/reader_test.go (about)

     1  // Copyright (c) 2017 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package series
    22  
    23  import (
    24  	"errors"
    25  	"strings"
    26  	"testing"
    27  
    28  	"github.com/golang/mock/gomock"
    29  	"github.com/stretchr/testify/assert"
    30  	"github.com/stretchr/testify/require"
    31  
    32  	"github.com/m3db/m3/src/dbnode/namespace"
    33  	"github.com/m3db/m3/src/dbnode/storage/block"
    34  	"github.com/m3db/m3/src/dbnode/ts"
    35  	"github.com/m3db/m3/src/dbnode/x/xio"
    36  	"github.com/m3db/m3/src/x/ident"
    37  	xtest "github.com/m3db/m3/src/x/test"
    38  	xtime "github.com/m3db/m3/src/x/time"
    39  )
    40  
    41  func TestReaderUsingRetrieverReadEncoded(t *testing.T) {
    42  	ctrl := xtest.NewController(t)
    43  	defer ctrl.Finish()
    44  
    45  	opts := newSeriesTestOptions()
    46  	ropts := opts.RetentionOptions()
    47  
    48  	end := xtime.ToUnixNano(opts.ClockOptions().NowFn()().Truncate(ropts.BlockSize()))
    49  	start := end.Add(-2 * ropts.BlockSize())
    50  
    51  	onRetrieveBlock := block.NewMockOnRetrieveBlock(ctrl)
    52  
    53  	retriever := NewMockQueryableBlockRetriever(ctrl)
    54  	retriever.EXPECT().IsBlockRetrievable(start).Return(true, nil)
    55  	retriever.EXPECT().IsBlockRetrievable(start.Add(ropts.BlockSize())).Return(true, nil)
    56  
    57  	var blockReaders []xio.BlockReader
    58  	curStart := start
    59  	for i := 0; i < 2; i++ {
    60  		reader := xio.NewMockSegmentReader(ctrl)
    61  		blockReaders = append(blockReaders, xio.BlockReader{
    62  			SegmentReader: reader,
    63  			Start:         curStart,
    64  		})
    65  		curStart = curStart.Add(ropts.BlockSize())
    66  	}
    67  
    68  	ctx := opts.ContextPool().Get()
    69  	defer ctx.Close()
    70  
    71  	retriever.EXPECT().
    72  		Stream(ctx, ident.NewIDMatcher("foo"),
    73  			start, onRetrieveBlock, gomock.Any()).
    74  		Return(blockReaders[0], nil)
    75  	retriever.EXPECT().
    76  		Stream(ctx, ident.NewIDMatcher("foo"),
    77  			start.Add(ropts.BlockSize()), onRetrieveBlock, gomock.Any()).
    78  		Return(blockReaders[1], nil)
    79  
    80  	reader := NewReaderUsingRetriever(
    81  		ident.StringID("foo"), retriever, onRetrieveBlock, nil, opts)
    82  
    83  	// Check reads as expected
    84  	iter, err := reader.ReadEncoded(ctx, start, end, namespace.Context{})
    85  	require.NoError(t, err)
    86  
    87  	count := 0
    88  	for iter.Next(ctx) {
    89  		require.Equal(t, 1, len(iter.Current()))
    90  		assert.Equal(t, blockReaders[count], iter.Current()[0])
    91  		count++
    92  	}
    93  	require.NoError(t, iter.Err())
    94  	require.Equal(t, 2, count)
    95  }
    96  
    97  type readTestCase struct {
    98  	title           string
    99  	times           []xtime.UnixNano
   100  	cachedBlocks    map[xtime.UnixNano]streamResponse
   101  	diskBlocks      map[xtime.UnixNano]streamResponse
   102  	bufferBlocks    map[xtime.UnixNano]block.FetchBlockResult
   103  	expectedResults []block.FetchBlockResult
   104  }
   105  
   106  type streamResponse struct {
   107  	blockReader xio.BlockReader
   108  	err         error
   109  }
   110  
   111  var (
   112  	opts      = newSeriesTestOptions()
   113  	ropts     = opts.RetentionOptions()
   114  	blockSize = ropts.BlockSize()
   115  	// Subtract a few blocksizes to make sure the test cases don't try and query into
   116  	// the future.
   117  	start = xtime.ToUnixNano(opts.ClockOptions().NowFn()()).
   118  		Truncate(blockSize).Add(-5 * blockSize)
   119  )
   120  
   121  var robustReaderTestCases = []readTestCase{
   122  	{
   123  		// Should return an empty slice if there is no data.
   124  		title: "Handle no data",
   125  		times: []xtime.UnixNano{start},
   126  	},
   127  	{
   128  		// Read one block from disk which should return an error.
   129  		title: "Handles disk read errors",
   130  		times: []xtime.UnixNano{start},
   131  		diskBlocks: map[xtime.UnixNano]streamResponse{
   132  			start: {
   133  				err: errors.New("some-error"),
   134  			},
   135  		},
   136  		expectedResults: []block.FetchBlockResult{
   137  			{
   138  				Start: start,
   139  				Err:   errors.New("some-error"),
   140  			},
   141  		},
   142  	},
   143  	{
   144  		// Read one block from the disk cache which should return an error.
   145  		title: "Handles disk cache read errors",
   146  		times: []xtime.UnixNano{start},
   147  		cachedBlocks: map[xtime.UnixNano]streamResponse{
   148  			start: {
   149  				err: errors.New("some-error"),
   150  			},
   151  		},
   152  		expectedResults: []block.FetchBlockResult{
   153  			{
   154  				Start: start,
   155  				Err:   errors.New("some-error"),
   156  			},
   157  		},
   158  	},
   159  	{
   160  		// Read one block from the buffer which should return an error.
   161  		title: "Handles buffer read errors",
   162  		times: []xtime.UnixNano{start},
   163  		bufferBlocks: map[xtime.UnixNano]block.FetchBlockResult{
   164  			start: {
   165  				Start: start,
   166  				Err:   errors.New("some-error"),
   167  			},
   168  		},
   169  		expectedResults: []block.FetchBlockResult{
   170  			{
   171  				Start: start,
   172  				Err:   errors.New("some-error"),
   173  			},
   174  		},
   175  	},
   176  	{
   177  		// Read one block from the disk cache.
   178  		title: "Handles disk cache reads (should not query disk)",
   179  		times: []xtime.UnixNano{start},
   180  		cachedBlocks: map[xtime.UnixNano]streamResponse{
   181  			start: {
   182  				blockReader: xio.BlockReader{
   183  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   184  					Start:         start,
   185  					BlockSize:     blockSize,
   186  				},
   187  			},
   188  		},
   189  		expectedResults: []block.FetchBlockResult{
   190  			{
   191  				Start:  start,
   192  				Blocks: []xio.BlockReader{{Start: start, BlockSize: blockSize}},
   193  			},
   194  		},
   195  	},
   196  	{
   197  		// Read two blocks, each of which should be returned from disk.
   198  		title: "Handles multiple disk reads",
   199  		times: []xtime.UnixNano{start, start.Add(blockSize)},
   200  		diskBlocks: map[xtime.UnixNano]streamResponse{
   201  			start: {
   202  				blockReader: xio.BlockReader{
   203  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   204  					Start:         start,
   205  					BlockSize:     blockSize,
   206  				},
   207  			},
   208  			start.Add(blockSize): {
   209  				blockReader: xio.BlockReader{
   210  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   211  					Start:         start.Add(blockSize),
   212  					BlockSize:     blockSize,
   213  				},
   214  			},
   215  		},
   216  		expectedResults: []block.FetchBlockResult{
   217  			{
   218  				Start:  start,
   219  				Blocks: []xio.BlockReader{{Start: start, BlockSize: blockSize}},
   220  			},
   221  			{
   222  				Start:  start.Add(blockSize),
   223  				Blocks: []xio.BlockReader{{Start: start.Add(blockSize), BlockSize: blockSize}},
   224  			},
   225  		},
   226  	},
   227  	{
   228  		// Read one block from the buffer.
   229  		title: "Handles buffer reads",
   230  		times: []xtime.UnixNano{start},
   231  		bufferBlocks: map[xtime.UnixNano]block.FetchBlockResult{
   232  			start: {
   233  				Start:  start,
   234  				Blocks: []xio.BlockReader{{Start: start, BlockSize: blockSize}},
   235  			},
   236  		},
   237  		expectedResults: []block.FetchBlockResult{
   238  			{
   239  				Start:  start,
   240  				Blocks: []xio.BlockReader{{Start: start, BlockSize: blockSize}},
   241  			},
   242  		},
   243  	},
   244  	{
   245  		title: "Combines data from disk cache and buffer for same blockstart",
   246  		times: []xtime.UnixNano{start},
   247  		cachedBlocks: map[xtime.UnixNano]streamResponse{
   248  			start: {
   249  				blockReader: xio.BlockReader{
   250  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   251  					Start:         start,
   252  					BlockSize:     blockSize,
   253  				},
   254  			},
   255  		},
   256  		bufferBlocks: map[xtime.UnixNano]block.FetchBlockResult{
   257  			start: {
   258  				Start:  start,
   259  				Blocks: []xio.BlockReader{{Start: start, BlockSize: blockSize}},
   260  			},
   261  		},
   262  		expectedResults: []block.FetchBlockResult{
   263  			{
   264  				Start: start,
   265  				Blocks: []xio.BlockReader{
   266  					// One from disk cache.
   267  					{Start: start, BlockSize: blockSize},
   268  					// One from buffer.
   269  					{Start: start, BlockSize: blockSize},
   270  				},
   271  			},
   272  		},
   273  	},
   274  	{
   275  		title: "Combines data from disk and buffer for same blockstart",
   276  		times: []xtime.UnixNano{start},
   277  		diskBlocks: map[xtime.UnixNano]streamResponse{
   278  			start: {
   279  				blockReader: xio.BlockReader{
   280  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   281  					Start:         start,
   282  					BlockSize:     blockSize,
   283  				},
   284  			},
   285  		},
   286  		bufferBlocks: map[xtime.UnixNano]block.FetchBlockResult{
   287  			start: {
   288  				Start:  start,
   289  				Blocks: []xio.BlockReader{{Start: start, BlockSize: blockSize}},
   290  			},
   291  		},
   292  		expectedResults: []block.FetchBlockResult{
   293  			{
   294  				Start: start,
   295  				Blocks: []xio.BlockReader{
   296  					// One from disk.
   297  					{Start: start, BlockSize: blockSize},
   298  					// One from buffer.
   299  					{Start: start, BlockSize: blockSize},
   300  				},
   301  			},
   302  		},
   303  	},
   304  	// Both disk cache and buffer have data for same blockstart but buffer has an
   305  	// error. The error should be propagated to the caller (not masked by the
   306  	// valid data from the disk cache).
   307  	{
   308  		title: "Handles buffer and disk cache merge with buffer error for same blockstart",
   309  		times: []xtime.UnixNano{start},
   310  		cachedBlocks: map[xtime.UnixNano]streamResponse{
   311  			start: {
   312  				blockReader: xio.BlockReader{
   313  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   314  					Start:         start,
   315  					BlockSize:     blockSize,
   316  				},
   317  			},
   318  		},
   319  		bufferBlocks: map[xtime.UnixNano]block.FetchBlockResult{
   320  			start: {
   321  				Start: start,
   322  				Err:   errors.New("some-error"),
   323  			},
   324  		},
   325  		expectedResults: []block.FetchBlockResult{
   326  			{
   327  				Start: start,
   328  				Err:   errors.New("some-error"),
   329  			},
   330  		},
   331  	},
   332  	// Both disk and buffer have data for same blockstart but buffer has an
   333  	// error. The error should be propagated to the caller (not masked by the
   334  	// valid data from the disk cache).
   335  	{
   336  		title: "Handles buffer and disk merge with buffer error for same blockstart",
   337  		times: []xtime.UnixNano{start},
   338  		cachedBlocks: map[xtime.UnixNano]streamResponse{
   339  			start: {
   340  				blockReader: xio.BlockReader{
   341  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   342  					Start:         start,
   343  					BlockSize:     blockSize,
   344  				},
   345  			},
   346  		},
   347  		bufferBlocks: map[xtime.UnixNano]block.FetchBlockResult{
   348  			start: {
   349  				Start: start,
   350  				Err:   errors.New("some-error"),
   351  			},
   352  		},
   353  		expectedResults: []block.FetchBlockResult{
   354  			{
   355  				Start: start,
   356  				Err:   errors.New("some-error"),
   357  			},
   358  		},
   359  	},
   360  	{
   361  		title: "Combines data from all sources for different block starts and same block starts",
   362  		times: []xtime.UnixNano{start, start.Add(blockSize), start.Add(2 * blockSize), start.Add(3 * blockSize)},
   363  		// Block 1 and 3 from disk cache.
   364  		cachedBlocks: map[xtime.UnixNano]streamResponse{
   365  			start: {
   366  				blockReader: xio.BlockReader{
   367  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   368  					Start:         start,
   369  					BlockSize:     blockSize,
   370  				},
   371  			},
   372  			start.Add(2 * blockSize): {
   373  				blockReader: xio.BlockReader{
   374  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   375  					Start:         start.Add(2 * blockSize),
   376  					BlockSize:     blockSize,
   377  				},
   378  			},
   379  		},
   380  		// blocks 2 and 4 from disk.
   381  		diskBlocks: map[xtime.UnixNano]streamResponse{
   382  			start.Add(blockSize): {
   383  				blockReader: xio.BlockReader{
   384  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   385  					Start:         start.Add(blockSize),
   386  					BlockSize:     blockSize,
   387  				},
   388  			},
   389  			start.Add(3 * blockSize): {
   390  				blockReader: xio.BlockReader{
   391  					SegmentReader: xio.NewSegmentReader(ts.Segment{}),
   392  					Start:         start.Add(3 * blockSize),
   393  					BlockSize:     blockSize,
   394  				},
   395  			},
   396  		},
   397  		// Blocks 1, 2, and 3 from buffer.
   398  		bufferBlocks: map[xtime.UnixNano]block.FetchBlockResult{
   399  			start: {
   400  				Start:  start,
   401  				Blocks: []xio.BlockReader{{Start: start, BlockSize: blockSize}},
   402  			},
   403  			start.Add(blockSize): {
   404  				Start:  start.Add(blockSize),
   405  				Blocks: []xio.BlockReader{{Start: start.Add(blockSize), BlockSize: blockSize}},
   406  			},
   407  			start.Add(2 * blockSize): {
   408  				Start:  start.Add(2 * blockSize),
   409  				Blocks: []xio.BlockReader{{Start: start.Add(2 * blockSize), BlockSize: blockSize}},
   410  			},
   411  		},
   412  		expectedResults: []block.FetchBlockResult{
   413  			{
   414  				Start: start,
   415  				Blocks: []xio.BlockReader{
   416  					// One from disk cache.
   417  					{Start: start, BlockSize: blockSize},
   418  					// One from buffer.
   419  					{Start: start, BlockSize: blockSize},
   420  				},
   421  			},
   422  			{
   423  				Start: start.Add(blockSize),
   424  				Blocks: []xio.BlockReader{
   425  					// One from disk.
   426  					{Start: start.Add(blockSize), BlockSize: blockSize},
   427  					// One from buffer.
   428  					{Start: start.Add(blockSize), BlockSize: blockSize},
   429  				},
   430  			},
   431  			{
   432  				Start: start.Add(2 * blockSize),
   433  				Blocks: []xio.BlockReader{
   434  					// One from disk cache.
   435  					{Start: start.Add(2 * blockSize), BlockSize: blockSize},
   436  					// One from buffer.
   437  					{Start: start.Add(2 * blockSize), BlockSize: blockSize},
   438  				},
   439  			},
   440  			{
   441  				Start: start.Add(3 * blockSize),
   442  				Blocks: []xio.BlockReader{
   443  					// One from disk.
   444  					{Start: start.Add(3 * blockSize), BlockSize: blockSize},
   445  				},
   446  			},
   447  		},
   448  	},
   449  }
   450  
   451  func TestReaderFetchBlocksRobust(t *testing.T) {
   452  	for _, tc := range robustReaderTestCases {
   453  		tc := tc
   454  		t.Run(tc.title, func(t *testing.T) {
   455  			ctrl := xtest.NewController(t)
   456  			defer ctrl.Finish()
   457  
   458  			var (
   459  				onRetrieveBlock = block.NewMockOnRetrieveBlock(ctrl)
   460  				retriever       = NewMockQueryableBlockRetriever(ctrl)
   461  				diskCache       = block.NewMockDatabaseSeriesBlocks(ctrl)
   462  				buffer          = NewMockdatabaseBuffer(ctrl)
   463  				bufferReturn    []block.FetchBlockResult
   464  			)
   465  
   466  			ctx := opts.ContextPool().Get()
   467  			defer ctx.Close()
   468  
   469  			// Setup mocks.
   470  			for _, currTime := range tc.times {
   471  				cachedBlocks, wasInDiskCache := tc.cachedBlocks[currTime]
   472  				if wasInDiskCache {
   473  					// If the data was in the disk cache then expect a read from it but don't expect
   474  					// disk reads.
   475  					b := block.NewMockDatabaseBlock(ctrl)
   476  					if cachedBlocks.err != nil {
   477  						b.EXPECT().Stream(ctx).Return(xio.BlockReader{}, cachedBlocks.err)
   478  					} else {
   479  						b.EXPECT().Stream(ctx).Return(cachedBlocks.blockReader, nil)
   480  					}
   481  					diskCache.EXPECT().BlockAt(currTime).Return(b, true)
   482  				} else {
   483  					// If the data was not in the disk cache then expect that and setup a query
   484  					// for disk.
   485  					diskCache.EXPECT().BlockAt(currTime).Return(nil, false)
   486  					diskBlocks, ok := tc.diskBlocks[currTime]
   487  					if !ok {
   488  						retriever.EXPECT().IsBlockRetrievable(currTime).Return(false, nil)
   489  					} else {
   490  						retriever.EXPECT().IsBlockRetrievable(currTime).Return(true, nil)
   491  						if diskBlocks.err != nil {
   492  							retriever.EXPECT().
   493  								Stream(ctx, ident.NewIDMatcher("foo"), currTime, nil, gomock.Any()).
   494  								Return(xio.BlockReader{}, diskBlocks.err)
   495  						} else {
   496  							retriever.EXPECT().
   497  								Stream(ctx, ident.NewIDMatcher("foo"), currTime, nil, gomock.Any()).
   498  								Return(diskBlocks.blockReader, nil)
   499  						}
   500  					}
   501  				}
   502  
   503  				// Prepare buffer response one block at a time.
   504  				bufferBlocks, wasInBuffer := tc.bufferBlocks[currTime]
   505  				if wasInBuffer {
   506  					bufferReturn = append(bufferReturn, bufferBlocks)
   507  				}
   508  			}
   509  
   510  			// Expect final buffer result (batched function call).
   511  			if len(tc.bufferBlocks) == 0 {
   512  				buffer.EXPECT().IsEmpty().Return(true)
   513  			} else {
   514  				buffer.EXPECT().IsEmpty().Return(false)
   515  				buffer.EXPECT().
   516  					FetchBlocks(ctx, tc.times, namespace.Context{}).
   517  					Return(bufferReturn)
   518  			}
   519  
   520  			reader := NewReaderUsingRetriever(
   521  				ident.StringID("foo"), retriever, onRetrieveBlock, nil, opts)
   522  
   523  			r, err := reader.fetchBlocksWithBlocksMapAndBuffer(ctx, tc.times, diskCache, buffer, namespace.Context{})
   524  			require.NoError(t, err)
   525  			require.Equal(t, len(tc.expectedResults), len(r))
   526  
   527  			for i, result := range r {
   528  				expectedResult := tc.expectedResults[i]
   529  				assert.Equal(t, expectedResult.Start, result.Start)
   530  
   531  				if expectedResult.Err != nil {
   532  					require.True(t, strings.Contains(result.Err.Error(), expectedResult.Err.Error()))
   533  				} else {
   534  					require.Equal(t, len(expectedResult.Blocks), len(result.Blocks))
   535  					for _, block := range result.Blocks {
   536  						require.Equal(t, expectedResult.Start, block.Start)
   537  					}
   538  				}
   539  			}
   540  		})
   541  	}
   542  }
   543  
   544  //nolint:scopelint
   545  func TestReaderReadEncodedRobust(t *testing.T) {
   546  	for _, tc := range robustReaderTestCases {
   547  		tc := tc
   548  		t.Run(tc.title, func(t *testing.T) {
   549  			ctrl := xtest.NewController(t)
   550  			defer ctrl.Finish()
   551  
   552  			var (
   553  				onRetrieveBlock = block.NewMockOnRetrieveBlock(ctrl)
   554  				retriever       = NewMockQueryableBlockRetriever(ctrl)
   555  				diskCache       = block.NewMockDatabaseSeriesBlocks(ctrl)
   556  				buffer          = NewMockdatabaseBuffer(ctrl)
   557  			)
   558  
   559  			ctx := opts.ContextPool().Get()
   560  			defer ctx.Close()
   561  
   562  			// Setup mocks.
   563  			for _, currTime := range tc.times {
   564  				cachedBlocks, wasInDiskCache := tc.cachedBlocks[currTime]
   565  				if wasInDiskCache {
   566  					// If the data was in the disk cache then expect a read from it but don't expect
   567  					// disk reads.
   568  					b := block.NewMockDatabaseBlock(ctrl)
   569  					b.EXPECT().SetLastReadTime(gomock.Any()).AnyTimes()
   570  					if cachedBlocks.err != nil {
   571  						b.EXPECT().Stream(ctx).Return(xio.BlockReader{}, cachedBlocks.err)
   572  					} else {
   573  						b.EXPECT().Stream(ctx).Return(cachedBlocks.blockReader, nil)
   574  					}
   575  					diskCache.EXPECT().BlockAt(currTime).Return(b, true)
   576  					if cachedBlocks.err != nil {
   577  						// Stop setting up mocks since the function will early return.
   578  						break
   579  					}
   580  				} else {
   581  					diskCache.EXPECT().BlockAt(currTime).Return(nil, false)
   582  				}
   583  
   584  				// Setup buffer mocks.
   585  				bufferBlocks, wasInBuffer := tc.bufferBlocks[currTime]
   586  				if wasInBuffer {
   587  					if bufferBlocks.Err != nil {
   588  						buffer.EXPECT().
   589  							ReadEncoded(ctx, currTime, currTime.Add(blockSize), namespace.Context{}).
   590  							Return(nil, bufferBlocks.Err)
   591  						// Stop setting up mocks since the function will early return.
   592  						break
   593  					} else {
   594  						buffer.EXPECT().
   595  							ReadEncoded(ctx, currTime, currTime.Add(blockSize), namespace.Context{}).
   596  							Return([][]xio.BlockReader{bufferBlocks.Blocks}, nil)
   597  					}
   598  				} else {
   599  					buffer.EXPECT().
   600  						ReadEncoded(ctx, currTime, currTime.Add(blockSize), namespace.Context{}).
   601  						Return(nil, nil)
   602  				}
   603  
   604  				if !wasInDiskCache {
   605  					// If the data was not in the disk cache then setup a query for disk.
   606  					diskBlocks, ok := tc.diskBlocks[currTime]
   607  					if !ok {
   608  						retriever.EXPECT().IsBlockRetrievable(currTime).Return(false, nil)
   609  					} else {
   610  						retriever.EXPECT().IsBlockRetrievable(currTime).Return(true, nil)
   611  						if diskBlocks.err != nil {
   612  							retriever.EXPECT().
   613  								Stream(ctx, ident.NewIDMatcher("foo"), currTime, onRetrieveBlock, gomock.Any()).
   614  								Return(xio.BlockReader{}, diskBlocks.err)
   615  						} else {
   616  							retriever.EXPECT().
   617  								Stream(ctx, ident.NewIDMatcher("foo"), currTime, onRetrieveBlock, gomock.Any()).
   618  								Return(diskBlocks.blockReader, nil)
   619  						}
   620  					}
   621  				}
   622  			}
   623  
   624  			var (
   625  				reader = NewReaderUsingRetriever(
   626  					ident.StringID("foo"), retriever, onRetrieveBlock, nil, opts)
   627  				start = tc.times[0]
   628  				// End is not inclusive so add blocksize to the last time.
   629  				end = tc.times[len(tc.times)-1].Add(blockSize)
   630  			)
   631  			iter, err := reader.readersWithBlocksMapAndBuffer(ctx, start, end, diskCache, buffer, namespace.Context{})
   632  
   633  			anyInMemErr := false
   634  			for _, sr := range tc.cachedBlocks {
   635  				if sr.err != nil {
   636  					anyInMemErr = true
   637  					break
   638  				}
   639  			}
   640  
   641  			for _, br := range tc.bufferBlocks {
   642  				if br.Err != nil {
   643  					anyInMemErr = true
   644  					break
   645  				}
   646  			}
   647  
   648  			if anyInMemErr {
   649  				require.Error(t, err)
   650  				return
   651  			}
   652  
   653  			r, err := iter.ToSlices(ctx)
   654  
   655  			for _, sr := range tc.diskBlocks {
   656  				if sr.err != nil {
   657  					require.Error(t, err)
   658  					return
   659  				}
   660  			}
   661  			require.NoError(t, err)
   662  			require.Equal(t, len(tc.expectedResults), len(r))
   663  
   664  			for i, result := range r {
   665  				expectedResult := tc.expectedResults[i]
   666  				for _, br := range result {
   667  					assert.Equal(t, expectedResult.Start, br.Start)
   668  				}
   669  				require.Equal(t, len(expectedResult.Blocks), len(result))
   670  			}
   671  		})
   672  	}
   673  }