github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/dbnode/integration/admin_session_fetch_blocks_test.go (about)

     1  // +build integration
     2  
     3  // Copyright (c) 2016 Uber Technologies, Inc.
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in
    13  // all copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    21  // THE SOFTWARE.
    22  
    23  package integration
    24  
    25  import (
    26  	"testing"
    27  
    28  	"github.com/m3db/m3/src/dbnode/integration/generate"
    29  	"github.com/m3db/m3/src/dbnode/namespace"
    30  	"github.com/m3db/m3/src/dbnode/storage/block"
    31  	"github.com/m3db/m3/src/dbnode/topology"
    32  	"github.com/m3db/m3/src/x/context"
    33  	"github.com/m3db/m3/src/x/ident"
    34  	xtime "github.com/m3db/m3/src/x/time"
    35  
    36  	"github.com/stretchr/testify/require"
    37  )
    38  
    39  func TestAdminSessionFetchBlocksFromPeers(t *testing.T) {
    40  	testAdminSessionFetchBlocksFromPeers(t, nil, nil)
    41  }
    42  
    43  func TestProtoAdminSessionFetchBlocksFromPeers(t *testing.T) {
    44  	testAdminSessionFetchBlocksFromPeers(t, setProtoTestOptions, setProtoTestInputConfig)
    45  }
    46  
    47  // This test writes data, and retrieves it using AdminSession endpoints
    48  // FetchMetadataBlocksFromPeers and FetchBlocksFromPeers. Verifying the
    49  // retrieved value is the same as the written.
    50  func testAdminSessionFetchBlocksFromPeers(t *testing.T, setTestOpts setTestOptions, updateInputConfig generate.UpdateBlockConfig) {
    51  	if testing.Short() {
    52  		t.SkipNow() // Just skip if we're doing a short run
    53  	}
    54  
    55  	// Test setup
    56  	testOpts := NewTestOptions(t)
    57  	if setTestOpts != nil {
    58  		testOpts = setTestOpts(t, testOpts)
    59  	}
    60  	testSetup, err := NewTestSetup(t, testOpts, nil)
    61  	require.NoError(t, err)
    62  	defer testSetup.Close()
    63  
    64  	md := testSetup.NamespaceMetadataOrFail(testNamespaces[0])
    65  	blockSize := md.Options().RetentionOptions().BlockSize()
    66  
    67  	// Start the server
    68  	log := testSetup.StorageOpts().InstrumentOptions().Logger()
    69  	require.NoError(t, testSetup.StartServer())
    70  
    71  	// Stop the server
    72  	defer func() {
    73  		require.NoError(t, testSetup.StopServer())
    74  		log.Debug("server is now down")
    75  	}()
    76  
    77  	// Write test data
    78  	now := testSetup.NowFn()()
    79  	seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
    80  	inputData := []generate.BlockConfig{
    81  		{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now},
    82  		{IDs: []string{"foo", "baz"}, NumPoints: 50, Start: now.Add(blockSize)},
    83  	}
    84  	if updateInputConfig != nil {
    85  		updateInputConfig(inputData)
    86  	}
    87  	for _, input := range inputData {
    88  		start := input.Start
    89  		testSetup.SetNowFn(start)
    90  		testData := generate.Block(input)
    91  		seriesMaps[start] = testData
    92  		require.NoError(t, testSetup.WriteBatch(testNamespaces[0], testData))
    93  	}
    94  	log.Debug("test data is now written")
    95  
    96  	// Advance time and sleep for a long enough time so data blocks are sealed during ticking
    97  	testSetup.SetNowFn(testSetup.NowFn()().Add(blockSize * 2))
    98  	later := testSetup.NowFn()()
    99  	testSetup.SleepFor10xTickMinimumInterval()
   100  
   101  	metadatasByShard := testSetupMetadatas(t, testSetup, testNamespaces[0], now, later)
   102  	observedSeriesMaps := testSetupToSeriesMaps(t, testSetup, md, metadatasByShard)
   103  
   104  	// Verify retrieved data matches what we've written
   105  	verifySeriesMapsEqual(t, seriesMaps, observedSeriesMaps)
   106  }
   107  
   108  func testSetupMetadatas(
   109  	t *testing.T,
   110  	testSetup TestSetup,
   111  	namespace ident.ID,
   112  	start xtime.UnixNano,
   113  	end xtime.UnixNano,
   114  ) map[uint32][]block.ReplicaMetadata {
   115  	// Retrieve written data using the AdminSession APIs
   116  	// FetchMetadataBlocksFromPeers/FetchBlocksFromPeers
   117  	adminClient := testSetup.M3DBVerificationAdminClient()
   118  	level := topology.ReadConsistencyLevelMajority
   119  	metadatasByShard, err := m3dbClientFetchBlocksMetadata(adminClient,
   120  		namespace, testSetup.ShardSet().AllIDs(), start, end, level)
   121  	require.NoError(t, err)
   122  	return metadatasByShard
   123  }
   124  
   125  func filterSeriesByShard(
   126  	testSetup TestSetup,
   127  	seriesMap map[xtime.UnixNano]generate.SeriesBlock,
   128  	desiredShards []uint32,
   129  ) map[xtime.UnixNano]generate.SeriesBlock {
   130  	filteredMap := make(map[xtime.UnixNano]generate.SeriesBlock)
   131  	for blockStart, series := range seriesMap {
   132  		filteredSeries := make([]generate.Series, 0, len(series))
   133  		for _, serie := range series {
   134  			shard := testSetup.ShardSet().Lookup(serie.ID)
   135  			for _, ss := range desiredShards {
   136  				if ss == shard {
   137  					filteredSeries = append(filteredSeries, serie)
   138  					break
   139  				}
   140  			}
   141  		}
   142  
   143  		if len(filteredSeries) > 0 {
   144  			filteredMap[blockStart] = filteredSeries
   145  		}
   146  	}
   147  
   148  	return filteredMap
   149  }
   150  
   151  func verifySeriesMapsEqual(
   152  	t *testing.T,
   153  	expectedSeriesMap map[xtime.UnixNano]generate.SeriesBlock,
   154  	observedSeriesMap map[xtime.UnixNano]generate.SeriesBlock,
   155  ) {
   156  	// ensure same length
   157  	require.Equal(t, len(expectedSeriesMap), len(observedSeriesMap))
   158  
   159  	// ensure same set of keys
   160  	for i := range expectedSeriesMap {
   161  		_, ok := observedSeriesMap[i]
   162  		require.True(t, ok, "%v is expected but not observed", i.ToTime().String())
   163  	}
   164  
   165  	// given same set of keys, ensure same values too
   166  	for i := range expectedSeriesMap {
   167  		expectedSeries := expectedSeriesMap[i]
   168  		observedSeries := observedSeriesMap[i]
   169  		require.Equal(t, len(expectedSeries), len(observedSeries))
   170  		for _, es := range expectedSeries {
   171  			found := false
   172  
   173  			for _, os := range observedSeries {
   174  				if !es.ID.Equal(os.ID) {
   175  					continue
   176  				}
   177  				found = true
   178  
   179  				// compare all the values in the series
   180  				require.Equal(t, len(es.Data), len(os.Data),
   181  					"data length mismatch for series - [time: %v, seriesID: %v]", i.ToTime().String(), es.ID.String())
   182  				for idx := range es.Data {
   183  					expectedData := es.Data[idx]
   184  					observedData := os.Data[idx]
   185  					require.Equal(t, expectedData.TimestampNanos, observedData.TimestampNanos,
   186  						"data mismatch for series - [time: %v, seriesID: %v, idx: %v]",
   187  						i.ToTime().String(), es.ID.String(), idx)
   188  					require.Equal(t, expectedData.Value, observedData.Value,
   189  						"data mismatch for series - [time: %v, seriesID: %v, idx: %v]",
   190  						i.ToTime().String(), es.ID.String(), idx)
   191  				}
   192  			}
   193  
   194  			require.True(t, found, "unable to find expected series - [time: %v, seriesID: %v]",
   195  				i.ToTime().String(), es.ID.String())
   196  		}
   197  	}
   198  }
   199  
   200  func testSetupToSeriesMaps(
   201  	t *testing.T,
   202  	testSetup TestSetup,
   203  	nsMetadata namespace.Metadata,
   204  	metadatasByShard map[uint32][]block.ReplicaMetadata,
   205  ) map[xtime.UnixNano]generate.SeriesBlock {
   206  	blockSize := nsMetadata.Options().RetentionOptions().BlockSize()
   207  	seriesMap := make(map[xtime.UnixNano]generate.SeriesBlock)
   208  	resultOpts := newDefaulTestResultOptions(testSetup.StorageOpts())
   209  	consistencyLevel := testSetup.StorageOpts().RepairOptions().RepairConsistencyLevel()
   210  	iterPool := testSetup.StorageOpts().ReaderIteratorPool()
   211  	session, err := testSetup.M3DBVerificationAdminClient().DefaultAdminSession()
   212  	require.NoError(t, err)
   213  	require.NotNil(t, session)
   214  	nsCtx := namespace.NewContextFrom(nsMetadata)
   215  
   216  	for shardID, metadatas := range metadatasByShard {
   217  		blocksIter, err := session.FetchBlocksFromPeers(nsMetadata, shardID,
   218  			consistencyLevel, metadatas, resultOpts)
   219  		require.NoError(t, err)
   220  		require.NotNil(t, blocksIter)
   221  
   222  		for blocksIter.Next() {
   223  			_, id, tags, blk := blocksIter.Current()
   224  			ctx := context.NewBackground()
   225  			reader, err := blk.Stream(ctx)
   226  			require.NoError(t, err)
   227  			readerIter := iterPool.Get()
   228  			readerIter.Reset(reader, nsCtx.Schema)
   229  
   230  			var datapoints []generate.TestValue
   231  			for readerIter.Next() {
   232  				datapoint, _, ann := readerIter.Current()
   233  				datapoints = append(datapoints, generate.TestValue{Datapoint: datapoint, Annotation: ann})
   234  			}
   235  			require.NoError(t, readerIter.Err())
   236  			require.NotEmpty(t, datapoints)
   237  
   238  			readerIter.Close()
   239  			ctx.Close()
   240  
   241  			firstTS := datapoints[0].TimestampNanos
   242  			seriesMapList := seriesMap[firstTS.Truncate(blockSize)]
   243  			seriesMapList = append(seriesMapList, generate.Series{
   244  				ID:   id,
   245  				Tags: tags,
   246  				Data: datapoints,
   247  			})
   248  			seriesMap[firstTS.Truncate(blockSize)] = seriesMapList
   249  		}
   250  		require.NoError(t, blocksIter.Err())
   251  	}
   252  	return seriesMap
   253  }