github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/cmd/services/m3coordinator/downsample/flush_handler_test.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package downsample
    22  
    23  import (
    24  	"bytes"
    25  	"sync"
    26  	"testing"
    27  
    28  	"github.com/m3db/m3/src/metrics/metric/aggregated"
    29  	"github.com/m3db/m3/src/metrics/metric/id"
    30  	"github.com/m3db/m3/src/metrics/policy"
    31  	"github.com/m3db/m3/src/query/models"
    32  	"github.com/m3db/m3/src/query/storage/mock"
    33  	"github.com/m3db/m3/src/x/ident"
    34  	"github.com/m3db/m3/src/x/instrument"
    35  	"github.com/m3db/m3/src/x/pool"
    36  	"github.com/m3db/m3/src/x/serialize"
    37  	xsync "github.com/m3db/m3/src/x/sync"
    38  	xtest "github.com/m3db/m3/src/x/test"
    39  
    40  	"github.com/golang/mock/gomock"
    41  	"github.com/stretchr/testify/assert"
    42  	"github.com/stretchr/testify/require"
    43  	"github.com/uber-go/tally"
    44  )
    45  
    46  func TestDownsamplerFlushHandlerCopiesTags(t *testing.T) {
    47  	ctrl := xtest.NewController(t)
    48  	defer ctrl.Finish()
    49  
    50  	store := mock.NewMockStorage()
    51  	pool := serialize.NewMockMetricTagsIteratorPool(ctrl)
    52  
    53  	workers := xsync.NewWorkerPool(1)
    54  	workers.Init()
    55  
    56  	instrumentOpts := instrument.NewOptions()
    57  
    58  	handler := newDownsamplerFlushHandler(store, pool,
    59  		workers, models.NewTagOptions(), instrumentOpts)
    60  	writer, err := handler.NewWriter(tally.NoopScope)
    61  	require.NoError(t, err)
    62  
    63  	var (
    64  		expectedID = []byte("foo")
    65  		tagName    = []byte("name")
    66  		tagValue   = []byte("value")
    67  		annotation = []byte("annotation")
    68  	)
    69  	iter := serialize.NewMockMetricTagsIterator(ctrl)
    70  	gomock.InOrder(
    71  		iter.EXPECT().Reset(expectedID),
    72  		iter.EXPECT().NumTags().Return(1),
    73  		iter.EXPECT().Next().Return(true),
    74  		iter.EXPECT().Current().Return(tagName, tagValue),
    75  		iter.EXPECT().Next().Return(false),
    76  		iter.EXPECT().Err().Return(nil),
    77  		iter.EXPECT().Close(),
    78  	)
    79  
    80  	pool.EXPECT().Get().Return(iter)
    81  
    82  	// Write metric
    83  	err = writer.Write(aggregated.ChunkedMetricWithStoragePolicy{
    84  		ChunkedMetric: aggregated.ChunkedMetric{
    85  			ChunkedID:  id.ChunkedID{Data: expectedID},
    86  			TimeNanos:  123,
    87  			Value:      42.42,
    88  			Annotation: annotation,
    89  		},
    90  		StoragePolicy: policy.MustParseStoragePolicy("1s:1d"),
    91  	})
    92  	require.NoError(t, err)
    93  
    94  	// Wait for flush
    95  	err = writer.Flush()
    96  	require.NoError(t, err)
    97  
    98  	// Inspect the write
    99  	writes := store.Writes()
   100  	require.Equal(t, 1, len(writes))
   101  
   102  	// Ensure tag pointers _DO_NOT_ match but equal to same content
   103  	tags := writes[0].Tags().Tags
   104  	require.Equal(t, 1, len(tags))
   105  
   106  	tag := tags[0]
   107  	assert.True(t, bytes.Equal(tagName, tag.Name))
   108  	assert.True(t, bytes.Equal(tagValue, tag.Value))
   109  	assert.False(t, xtest.ByteSlicesBackedBySameData(tagName, tag.Name))
   110  	assert.False(t, xtest.ByteSlicesBackedBySameData(tagValue, tag.Value))
   111  
   112  	assert.Equal(t, annotation, writes[0].Annotation())
   113  }
   114  
   115  func graphiteTags(
   116  	t *testing.T, first string, encPool serialize.TagEncoderPool) []byte {
   117  	enc := encPool.Get()
   118  	defer enc.Finalize()
   119  
   120  	err := enc.Encode(ident.MustNewTagStringsIterator(
   121  		"__g0__", first,
   122  		"__g1__", "y",
   123  		"__g2__", "z",
   124  		string(MetricsOptionIDSchemeTagName), string(GraphiteIDSchemeTagValue),
   125  	))
   126  
   127  	require.NoError(t, err)
   128  	data, ok := enc.Data()
   129  	require.True(t, ok)
   130  	return append(make([]byte, 0, data.Len()), data.Bytes()...)
   131  }
   132  
   133  func TestDownsamplerFlushHandlerHighConcurrencyNoTagMixing(t *testing.T) {
   134  	ctrl := xtest.NewController(t)
   135  	defer ctrl.Finish()
   136  
   137  	store := mock.NewMockStorage()
   138  
   139  	size := 10
   140  	decodeOpts := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{
   141  		CheckBytesWrapperPoolSize: &size,
   142  	})
   143  
   144  	poolOpts := pool.NewObjectPoolOptions()
   145  	tagDecoderPool := serialize.NewTagDecoderPool(decodeOpts, poolOpts)
   146  	tagDecoderPool.Init()
   147  
   148  	pool := serialize.NewMetricTagsIteratorPool(tagDecoderPool, poolOpts)
   149  	pool.Init()
   150  
   151  	workers := xsync.NewWorkerPool(1)
   152  	workers.Init()
   153  
   154  	instrumentOpts := instrument.NewOptions()
   155  
   156  	handler := newDownsamplerFlushHandler(store, pool,
   157  		workers, models.NewTagOptions(), instrumentOpts)
   158  	writer, err := handler.NewWriter(tally.NoopScope)
   159  	require.NoError(t, err)
   160  
   161  	encodeOpts := serialize.NewTagEncoderOptions()
   162  	encPool := serialize.NewTagEncoderPool(encodeOpts, poolOpts)
   163  	encPool.Init()
   164  
   165  	xBytes := graphiteTags(t, "x", encPool)
   166  	fooBytes := graphiteTags(t, "foo", encPool)
   167  
   168  	var wg sync.WaitGroup
   169  	for i := 0; i < 100; i++ {
   170  		wg.Add(1)
   171  		xData := append(make([]byte, 0, len(xBytes)), xBytes...)
   172  		fooData := append(make([]byte, 0, len(fooBytes)), fooBytes...)
   173  		go func() {
   174  			defer wg.Done()
   175  			err := writer.Write(aggregated.ChunkedMetricWithStoragePolicy{
   176  				ChunkedMetric: aggregated.ChunkedMetric{
   177  					ChunkedID: id.ChunkedID{Data: xData},
   178  					TimeNanos: 123,
   179  					Value:     42.42,
   180  				},
   181  				StoragePolicy: policy.MustParseStoragePolicy("1s:1d"),
   182  			})
   183  			require.NoError(t, err)
   184  
   185  			err = writer.Write(aggregated.ChunkedMetricWithStoragePolicy{
   186  				ChunkedMetric: aggregated.ChunkedMetric{
   187  					ChunkedID: id.ChunkedID{Data: fooData},
   188  					TimeNanos: 123,
   189  					Value:     42.42,
   190  				},
   191  				StoragePolicy: policy.MustParseStoragePolicy("1s:1d"),
   192  			})
   193  			require.NoError(t, err)
   194  		}()
   195  	}
   196  
   197  	wg.Wait()
   198  	// Wait for flush
   199  	err = writer.Flush()
   200  	require.NoError(t, err)
   201  
   202  	// Inspect the write
   203  	writes := store.Writes()
   204  	require.Equal(t, 200, len(writes))
   205  
   206  	seenMap := make(map[string]int, 10)
   207  	for _, w := range writes {
   208  		str := w.Tags().String()
   209  		seenMap[str] = seenMap[str] + 1
   210  	}
   211  
   212  	assert.Equal(t, map[string]int{
   213  		"__g0__: foo, __g1__: y, __g2__: z": 100,
   214  		"__g0__: x, __g1__: y, __g2__: z":   100,
   215  	}, seenMap)
   216  }