github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/aggregator/client/writer_test.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package client
    22  
    23  import (
    24  	"bytes"
    25  	"errors"
    26  	"fmt"
    27  	"io"
    28  	"math"
    29  	"sort"
    30  	"strings"
    31  	"sync"
    32  	"testing"
    33  	"time"
    34  
    35  	"github.com/m3db/m3/src/metrics/encoding"
    36  	"github.com/m3db/m3/src/metrics/encoding/protobuf"
    37  	"github.com/m3db/m3/src/metrics/metadata"
    38  	"github.com/m3db/m3/src/metrics/metric"
    39  	"github.com/m3db/m3/src/metrics/metric/aggregated"
    40  	"github.com/m3db/m3/src/metrics/metric/id"
    41  	"github.com/m3db/m3/src/metrics/metric/unaggregated"
    42  	"github.com/m3db/m3/src/x/clock"
    43  	"github.com/m3db/m3/src/x/instrument"
    44  
    45  	"github.com/golang/mock/gomock"
    46  	"github.com/stretchr/testify/require"
    47  	"github.com/uber-go/tally"
    48  )
    49  
    50  func TestWriterWriteClosed(t *testing.T) {
    51  	payload := payloadUnion{
    52  		payloadType: untimedType,
    53  		untimed: untimedPayload{
    54  			metric:    testCounter,
    55  			metadatas: testStagedMetadatas,
    56  		},
    57  	}
    58  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
    59  	w.closed = true
    60  	require.Equal(t, errInstanceWriterClosed, w.Write(0, payload))
    61  }
    62  
    63  func TestWriterWriteUntimedCounterEncodeError(t *testing.T) {
    64  	ctrl := gomock.NewController(t)
    65  	defer ctrl.Finish()
    66  
    67  	errTestEncodeMetric := errors.New("error encoding metrics")
    68  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
    69  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
    70  		encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
    71  		encoder.EXPECT().Len().Return(0)
    72  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
    73  			Type: encoding.CounterWithMetadatasType,
    74  			CounterWithMetadatas: unaggregated.CounterWithMetadatas{
    75  				Counter:         testCounter.Counter(),
    76  				StagedMetadatas: testStagedMetadatas,
    77  			},
    78  		}).Return(errTestEncodeMetric)
    79  		encoder.EXPECT().Truncate(0).Return(nil)
    80  		return &lockedEncoder{UnaggregatedEncoder: encoder}
    81  	}
    82  
    83  	payload := payloadUnion{
    84  		payloadType: untimedType,
    85  		untimed: untimedPayload{
    86  			metric:    testCounter,
    87  			metadatas: testStagedMetadatas,
    88  		},
    89  	}
    90  	require.Equal(t, errTestEncodeMetric, w.Write(0, payload))
    91  }
    92  
    93  func TestWriterWriteUntimedCounterEncoderExists(t *testing.T) {
    94  	ctrl := gomock.NewController(t)
    95  	defer ctrl.Finish()
    96  
    97  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
    98  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
    99  	gomock.InOrder(
   100  		encoder.EXPECT().Len().Return(0),
   101  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   102  			Type: encoding.CounterWithMetadatasType,
   103  			CounterWithMetadatas: unaggregated.CounterWithMetadatas{
   104  				Counter:         testCounter.Counter(),
   105  				StagedMetadatas: testStagedMetadatas,
   106  			},
   107  		}).Return(nil),
   108  		encoder.EXPECT().Len().Return(4),
   109  	)
   110  	w.encodersByShard[0] = &lockedEncoder{UnaggregatedEncoder: encoder}
   111  
   112  	payload := payloadUnion{
   113  		payloadType: untimedType,
   114  		untimed: untimedPayload{
   115  			metric:    testCounter,
   116  			metadatas: testStagedMetadatas,
   117  		},
   118  	}
   119  	require.NoError(t, w.Write(0, payload))
   120  	require.Equal(t, 1, len(w.encodersByShard))
   121  }
   122  
   123  func TestWriterWriteUntimedCounterEncoderDoesNotExist(t *testing.T) {
   124  	ctrl := gomock.NewController(t)
   125  	defer ctrl.Finish()
   126  
   127  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   128  	gomock.InOrder(
   129  		encoder.EXPECT().Len().Return(3),
   130  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   131  			Type: encoding.CounterWithMetadatasType,
   132  			CounterWithMetadatas: unaggregated.CounterWithMetadatas{
   133  				Counter:         testCounter.Counter(),
   134  				StagedMetadatas: testStagedMetadatas,
   135  			},
   136  		}).Return(nil),
   137  		encoder.EXPECT().Len().Return(7),
   138  	)
   139  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
   140  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   141  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   142  	}
   143  
   144  	payload := payloadUnion{
   145  		payloadType: untimedType,
   146  		untimed: untimedPayload{
   147  			metric:    testCounter,
   148  			metadatas: testStagedMetadatas,
   149  		},
   150  	}
   151  	require.NoError(t, w.Write(0, payload))
   152  }
   153  
   154  func TestWriterWriteUntimedCounterWithFlushingZeroSizeBefore(t *testing.T) {
   155  	ctrl := gomock.NewController(t)
   156  	defer ctrl.Finish()
   157  
   158  	var (
   159  		stream      = protobuf.NewBuffer([]byte{1, 2, 3, 4, 5, 6, 7}, nil)
   160  		enqueuedBuf protobuf.Buffer
   161  	)
   162  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   163  	gomock.InOrder(
   164  		encoder.EXPECT().Len().Return(0),
   165  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   166  			Type: encoding.CounterWithMetadatasType,
   167  			CounterWithMetadatas: unaggregated.CounterWithMetadatas{
   168  				Counter:         testCounter.Counter(),
   169  				StagedMetadatas: testStagedMetadatas,
   170  			},
   171  		}).Return(nil),
   172  		encoder.EXPECT().Len().Return(7),
   173  		encoder.EXPECT().Relinquish().Return(stream),
   174  	)
   175  	queue := NewMockinstanceQueue(ctrl)
   176  	queue.EXPECT().
   177  		Enqueue(gomock.Any()).
   178  		DoAndReturn(func(buf protobuf.Buffer) error {
   179  			enqueuedBuf = buf
   180  			return nil
   181  		})
   182  	w := newInstanceWriter(testPlacementInstance, testOptions().SetMaxBatchSize(3)).(*writer)
   183  	w.queue = queue
   184  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   185  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   186  	}
   187  
   188  	payload := payloadUnion{
   189  		payloadType: untimedType,
   190  		untimed: untimedPayload{
   191  			metric:    testCounter,
   192  			metadatas: testStagedMetadatas,
   193  		},
   194  	}
   195  	require.NoError(t, w.Write(0, payload))
   196  
   197  	enc, exists := w.encodersByShard[0]
   198  	require.True(t, exists)
   199  	require.NotNil(t, enc)
   200  	require.Equal(t, 1, len(w.encodersByShard))
   201  	require.Equal(t, []byte{1, 2, 3, 4, 5, 6, 7}, enqueuedBuf.Bytes())
   202  }
   203  
   204  func TestWriterWriteUntimedCounterWithFlushingPositiveSizeBefore(t *testing.T) {
   205  	ctrl := gomock.NewController(t)
   206  	defer ctrl.Finish()
   207  
   208  	var (
   209  		stream      = protobuf.NewBuffer([]byte{1, 2, 3, 4, 5, 6, 7}, nil)
   210  		enqueuedBuf protobuf.Buffer
   211  	)
   212  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   213  	gomock.InOrder(
   214  		encoder.EXPECT().Len().Return(3),
   215  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   216  			Type: encoding.CounterWithMetadatasType,
   217  			CounterWithMetadatas: unaggregated.CounterWithMetadatas{
   218  				Counter:         testCounter.Counter(),
   219  				StagedMetadatas: testStagedMetadatas,
   220  			},
   221  		}).Return(nil),
   222  		encoder.EXPECT().Len().Return(7),
   223  		encoder.EXPECT().Relinquish().Return(stream),
   224  	)
   225  	queue := NewMockinstanceQueue(ctrl)
   226  	queue.EXPECT().
   227  		Enqueue(gomock.Any()).
   228  		DoAndReturn(func(buf protobuf.Buffer) error {
   229  			enqueuedBuf = buf
   230  			return nil
   231  		})
   232  	w := newInstanceWriter(testPlacementInstance, testOptions().SetMaxBatchSize(3)).(*writer)
   233  	w.queue = queue
   234  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   235  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   236  	}
   237  
   238  	payload := payloadUnion{
   239  		payloadType: untimedType,
   240  		untimed: untimedPayload{
   241  			metric:    testCounter,
   242  			metadatas: testStagedMetadatas,
   243  		},
   244  	}
   245  	require.NoError(t, w.Write(0, payload))
   246  
   247  	enc, exists := w.encodersByShard[0]
   248  	require.True(t, exists)
   249  	require.NotNil(t, enc)
   250  	require.Equal(t, 1, len(w.encodersByShard))
   251  	require.Equal(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7}, enqueuedBuf.Bytes())
   252  }
   253  
   254  func TestWriterWriteUntimedBatchTimerNoBatchSizeLimit(t *testing.T) {
   255  	ctrl := gomock.NewController(t)
   256  	defer ctrl.Finish()
   257  
   258  	numValues := 65536
   259  	timerValues := make([]float64, numValues)
   260  	for i := 0; i < numValues; i++ {
   261  		timerValues[i] = float64(i)
   262  	}
   263  	testLargeBatchTimer := unaggregated.MetricUnion{
   264  		Type:          metric.TimerType,
   265  		ID:            []byte("testLargeBatchTimer"),
   266  		BatchTimerVal: timerValues,
   267  	}
   268  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   269  	gomock.InOrder(
   270  		encoder.EXPECT().Len().Return(3),
   271  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   272  			Type: encoding.BatchTimerWithMetadatasType,
   273  			BatchTimerWithMetadatas: unaggregated.BatchTimerWithMetadatas{
   274  				BatchTimer:      testLargeBatchTimer.BatchTimer(),
   275  				StagedMetadatas: testStagedMetadatas,
   276  			},
   277  		}).Return(nil),
   278  		encoder.EXPECT().Len().Return(7),
   279  	)
   280  	opts := testOptions().SetMaxTimerBatchSize(0)
   281  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   282  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   283  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   284  	}
   285  
   286  	payload := payloadUnion{
   287  		payloadType: untimedType,
   288  		untimed: untimedPayload{
   289  			metric:    testLargeBatchTimer,
   290  			metadatas: testStagedMetadatas,
   291  		},
   292  	}
   293  	require.NoError(t, w.Write(0, payload))
   294  }
   295  
   296  func TestWriterWriteUntimedBatchTimerSmallBatchSize(t *testing.T) {
   297  	ctrl := gomock.NewController(t)
   298  	defer ctrl.Finish()
   299  
   300  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   301  	gomock.InOrder(
   302  		encoder.EXPECT().Len().Return(3),
   303  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   304  			Type: encoding.BatchTimerWithMetadatasType,
   305  			BatchTimerWithMetadatas: unaggregated.BatchTimerWithMetadatas{
   306  				BatchTimer:      testBatchTimer.BatchTimer(),
   307  				StagedMetadatas: testStagedMetadatas,
   308  			},
   309  		}).Return(nil),
   310  		encoder.EXPECT().Len().Return(7),
   311  	)
   312  	opts := testOptions().SetMaxTimerBatchSize(140)
   313  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   314  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   315  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   316  	}
   317  
   318  	payload := payloadUnion{
   319  		payloadType: untimedType,
   320  		untimed: untimedPayload{
   321  			metric:    testBatchTimer,
   322  			metadatas: testStagedMetadatas,
   323  		},
   324  	}
   325  	require.NoError(t, w.Write(0, payload))
   326  }
   327  
   328  func TestWriterWriteUntimedBatchTimerLargeBatchSize(t *testing.T) {
   329  	ctrl := gomock.NewController(t)
   330  	defer ctrl.Finish()
   331  
   332  	numValues := 65536
   333  	timerValues := make([]float64, numValues)
   334  	for i := 0; i < numValues; i++ {
   335  		timerValues[i] = float64(i)
   336  	}
   337  	testLargeBatchTimer := unaggregated.MetricUnion{
   338  		Type:          metric.TimerType,
   339  		ID:            []byte("testLargeBatchTimer"),
   340  		BatchTimerVal: timerValues,
   341  	}
   342  
   343  	var (
   344  		msgTypeRes         []encoding.UnaggregatedMessageType
   345  		idRes              []id.RawID
   346  		valueRes           [][]float64
   347  		metadataRes        []metadata.StagedMetadatas
   348  		maxBatchSize       = 140
   349  		expectedNumBatches = int(math.Ceil(float64(numValues) / float64(maxBatchSize)))
   350  	)
   351  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   352  	encoder.EXPECT().Len().Return(7).MinTimes(2)
   353  	encoder.EXPECT().
   354  		EncodeMessage(gomock.Any()).
   355  		DoAndReturn(func(msg encoding.UnaggregatedMessageUnion) error {
   356  			msgTypeRes = append(msgTypeRes, msg.Type)
   357  			idRes = append(idRes, msg.BatchTimerWithMetadatas.ID)
   358  			valueRes = append(valueRes, msg.BatchTimerWithMetadatas.Values)
   359  			metadataRes = append(metadataRes, msg.BatchTimerWithMetadatas.StagedMetadatas)
   360  			return nil
   361  		}).Times(expectedNumBatches)
   362  	encoder.EXPECT().Relinquish()
   363  
   364  	opts := testOptions().SetMaxTimerBatchSize(maxBatchSize)
   365  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   366  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   367  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   368  	}
   369  
   370  	payload := payloadUnion{
   371  		payloadType: untimedType,
   372  		untimed: untimedPayload{
   373  			metric:    testLargeBatchTimer,
   374  			metadatas: testStagedMetadatas,
   375  		},
   376  	}
   377  	require.NoError(t, w.Write(0, payload))
   378  	require.NoError(t, w.Flush())
   379  
   380  	var (
   381  		expectedMsgTypes  []encoding.UnaggregatedMessageType
   382  		expectedIDs       []id.RawID
   383  		expectedValues    [][]float64
   384  		expectedMetadatas []metadata.StagedMetadatas
   385  	)
   386  	for i := 0; i < expectedNumBatches; i++ {
   387  		start := i * maxBatchSize
   388  		end := start + maxBatchSize
   389  		if end > numValues {
   390  			end = numValues
   391  		}
   392  		expectedMsgTypes = append(expectedMsgTypes, encoding.BatchTimerWithMetadatasType)
   393  		expectedValues = append(expectedValues, timerValues[start:end])
   394  		expectedIDs = append(expectedIDs, id.RawID("testLargeBatchTimer"))
   395  		expectedMetadatas = append(expectedMetadatas, testStagedMetadatas)
   396  	}
   397  	require.Equal(t, expectedMsgTypes, msgTypeRes)
   398  	require.Equal(t, expectedIDs, idRes)
   399  	require.Equal(t, expectedValues, valueRes)
   400  	require.Equal(t, expectedMetadatas, metadataRes)
   401  }
   402  
   403  func TestWriterWriteUntimedLargeBatchTimerUsesMultipleBuffers(t *testing.T) {
   404  	const (
   405  		numValues  = 1400
   406  		testIDName = "testLargeBatchTimer"
   407  	)
   408  
   409  	timerValues := make([]float64, numValues)
   410  	for i := 0; i < numValues; i++ {
   411  		timerValues[i] = float64(i)
   412  	}
   413  
   414  	var (
   415  		testLargeBatchTimer = unaggregated.MetricUnion{
   416  			Type:          metric.TimerType,
   417  			ID:            []byte(testIDName),
   418  			BatchTimerVal: timerValues,
   419  		}
   420  		payload = payloadUnion{
   421  			payloadType: untimedType,
   422  			untimed: untimedPayload{
   423  				metric:    testLargeBatchTimer,
   424  				metadatas: testStagedMetadatas,
   425  			},
   426  		}
   427  		testScope = tally.NewTestScope("", nil)
   428  		iOpts     = instrument.NewOptions().SetMetricsScope(testScope)
   429  		opts      = testOptions().
   430  				SetMaxBatchSize(1000).
   431  				SetMaxTimerBatchSize(10).
   432  				SetInstrumentOptions(iOpts)
   433  
   434  		w            = newInstanceWriter(testPlacementInstance, opts).(*writer)
   435  		q            = w.queue.(*queue)
   436  		payloadCount int
   437  	)
   438  
   439  	q.writeFn = func(payload []byte) error {
   440  		payloadCount += strings.Count(string(payload), testIDName)
   441  		return nil
   442  	}
   443  
   444  	require.NoError(t, w.Write(0, payload))
   445  	require.NoError(t, w.Flush())
   446  	time.Sleep(1 * time.Second) // TODO: remove once queue is sync
   447  	require.NoError(t, w.Close())
   448  
   449  	enqueuedCounter := testScope.Snapshot().Counters()["buffers+action=enqueued"]
   450  	require.NotNil(t, enqueuedCounter)
   451  	// Expect 1 byte buffer to be enqueued to write to network,
   452  	// but timer itself should be split to multiple protobuf payloads.
   453  	require.Equal(t, int64(1), enqueuedCounter.Value())
   454  	require.Equal(t, 140, payloadCount)
   455  }
   456  
   457  func TestWriterWriteUntimedBatchTimerWriteError(t *testing.T) {
   458  	ctrl := gomock.NewController(t)
   459  	defer ctrl.Finish()
   460  
   461  	numValues := 7
   462  	timerValues := make([]float64, numValues)
   463  	for i := 0; i < numValues; i++ {
   464  		timerValues[i] = float64(i)
   465  	}
   466  	testLargeBatchTimer := unaggregated.MetricUnion{
   467  		Type:          metric.TimerType,
   468  		ID:            []byte("testLargeBatchTimer"),
   469  		BatchTimerVal: timerValues,
   470  	}
   471  
   472  	errTestWrite = errors.New("test write error")
   473  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   474  	gomock.InOrder(
   475  		encoder.EXPECT().Len().Return(3),
   476  		encoder.EXPECT().
   477  			EncodeMessage(gomock.Any()).
   478  			Return(nil),
   479  		encoder.EXPECT().
   480  			EncodeMessage(gomock.Any()).
   481  			Return(errTestWrite),
   482  
   483  		encoder.EXPECT().Truncate(3).Return(nil),
   484  	)
   485  	opts := testOptions().SetMaxTimerBatchSize(3)
   486  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   487  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   488  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   489  	}
   490  
   491  	payload := payloadUnion{
   492  		payloadType: untimedType,
   493  		untimed: untimedPayload{
   494  			metric:    testLargeBatchTimer,
   495  			metadatas: testStagedMetadatas,
   496  		},
   497  	}
   498  	require.Equal(t, errTestWrite, w.Write(0, payload))
   499  }
   500  
   501  func TestWriterWriteUntimedBatchTimerEnqueueError(t *testing.T) {
   502  	ctrl := gomock.NewController(t)
   503  	defer ctrl.Finish()
   504  
   505  	errTestEnqueue := errors.New("test enqueue error")
   506  	queue := NewMockinstanceQueue(ctrl)
   507  	queue.EXPECT().Enqueue(gomock.Any()).Return(errTestEnqueue)
   508  	opts := testOptions().
   509  		SetMaxTimerBatchSize(1).
   510  		SetMaxBatchSize(1)
   511  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   512  	w.queue = queue
   513  
   514  	payload := payloadUnion{
   515  		payloadType: untimedType,
   516  		untimed: untimedPayload{
   517  			metric:    testBatchTimer,
   518  			metadatas: testStagedMetadatas,
   519  		},
   520  	}
   521  	require.Equal(t, errTestEnqueue, w.Write(0, payload))
   522  }
   523  
   524  func TestWriterWriteUntimedGauge(t *testing.T) {
   525  	ctrl := gomock.NewController(t)
   526  	defer ctrl.Finish()
   527  
   528  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   529  	gomock.InOrder(
   530  		encoder.EXPECT().Len().Return(3),
   531  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   532  			Type: encoding.GaugeWithMetadatasType,
   533  			GaugeWithMetadatas: unaggregated.GaugeWithMetadatas{
   534  				Gauge:           testGauge.Gauge(),
   535  				StagedMetadatas: testStagedMetadatas,
   536  			},
   537  		}).Return(nil),
   538  		encoder.EXPECT().Len().Return(7),
   539  	)
   540  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
   541  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   542  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   543  	}
   544  
   545  	payload := payloadUnion{
   546  		payloadType: untimedType,
   547  		untimed: untimedPayload{
   548  			metric:    testGauge,
   549  			metadatas: testStagedMetadatas,
   550  		},
   551  	}
   552  	require.NoError(t, w.Write(0, payload))
   553  }
   554  
   555  func TestWriterWriteForwardedWithFlushingZeroSizeBefore(t *testing.T) {
   556  	ctrl := gomock.NewController(t)
   557  	defer ctrl.Finish()
   558  
   559  	var (
   560  		stream      = protobuf.NewBuffer([]byte{1, 2, 3, 4, 5, 6, 7}, nil)
   561  		enqueuedBuf protobuf.Buffer
   562  	)
   563  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   564  	gomock.InOrder(
   565  		encoder.EXPECT().Len().Return(0),
   566  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   567  			Type: encoding.ForwardedMetricWithMetadataType,
   568  			ForwardedMetricWithMetadata: aggregated.ForwardedMetricWithMetadata{
   569  				ForwardedMetric: testForwarded,
   570  				ForwardMetadata: testForwardMetadata,
   571  			},
   572  		}).Return(nil),
   573  		encoder.EXPECT().Len().Return(7),
   574  		encoder.EXPECT().Relinquish().Return(stream),
   575  	)
   576  	queue := NewMockinstanceQueue(ctrl)
   577  	queue.EXPECT().
   578  		Enqueue(gomock.Any()).
   579  		DoAndReturn(func(buf protobuf.Buffer) error {
   580  			enqueuedBuf = buf
   581  			return nil
   582  		})
   583  	w := newInstanceWriter(testPlacementInstance, testOptions().SetMaxBatchSize(3)).(*writer)
   584  	w.queue = queue
   585  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   586  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   587  	}
   588  
   589  	payload := payloadUnion{
   590  		payloadType: forwardedType,
   591  		forwarded: forwardedPayload{
   592  			metric:   testForwarded,
   593  			metadata: testForwardMetadata,
   594  		},
   595  	}
   596  	require.NoError(t, w.Write(0, payload))
   597  
   598  	enc, exists := w.encodersByShard[0]
   599  	require.True(t, exists)
   600  	require.NotNil(t, enc)
   601  	require.Equal(t, 1, len(w.encodersByShard))
   602  	require.Equal(t, []byte{1, 2, 3, 4, 5, 6, 7}, enqueuedBuf.Bytes())
   603  }
   604  
   605  func TestWriterWriteForwardedWithFlushingPositiveSizeBefore(t *testing.T) {
   606  	ctrl := gomock.NewController(t)
   607  	defer ctrl.Finish()
   608  
   609  	var (
   610  		stream      = protobuf.NewBuffer([]byte{1, 2, 3, 4, 5, 6, 7}, nil)
   611  		enqueuedBuf protobuf.Buffer
   612  	)
   613  	encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   614  	gomock.InOrder(
   615  		encoder.EXPECT().Len().Return(3),
   616  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   617  			Type: encoding.ForwardedMetricWithMetadataType,
   618  			ForwardedMetricWithMetadata: aggregated.ForwardedMetricWithMetadata{
   619  				ForwardedMetric: testForwarded,
   620  				ForwardMetadata: testForwardMetadata,
   621  			},
   622  		}).Return(nil),
   623  		encoder.EXPECT().Len().Return(7),
   624  		encoder.EXPECT().Relinquish().Return(stream),
   625  	)
   626  	queue := NewMockinstanceQueue(ctrl)
   627  	queue.EXPECT().
   628  		Enqueue(gomock.Any()).
   629  		DoAndReturn(func(buf protobuf.Buffer) error {
   630  			enqueuedBuf = buf
   631  			return nil
   632  		})
   633  	w := newInstanceWriter(testPlacementInstance, testOptions().SetMaxBatchSize(3)).(*writer)
   634  	w.queue = queue
   635  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   636  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   637  	}
   638  
   639  	payload := payloadUnion{
   640  		payloadType: forwardedType,
   641  		forwarded: forwardedPayload{
   642  			metric:   testForwarded,
   643  			metadata: testForwardMetadata,
   644  		},
   645  	}
   646  	require.NoError(t, w.Write(0, payload))
   647  
   648  	enc, exists := w.encodersByShard[0]
   649  	require.True(t, exists)
   650  	require.NotNil(t, enc)
   651  	require.Equal(t, 1, len(w.encodersByShard))
   652  	require.Equal(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7}, enqueuedBuf.Bytes())
   653  }
   654  
   655  func TestWriterWriteForwardedEncodeError(t *testing.T) {
   656  	ctrl := gomock.NewController(t)
   657  	defer ctrl.Finish()
   658  
   659  	errTestEncodeMetric := errors.New("error encoding metrics")
   660  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
   661  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   662  		encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   663  		encoder.EXPECT().Len().Return(0)
   664  		encoder.EXPECT().EncodeMessage(encoding.UnaggregatedMessageUnion{
   665  			Type: encoding.ForwardedMetricWithMetadataType,
   666  			ForwardedMetricWithMetadata: aggregated.ForwardedMetricWithMetadata{
   667  				ForwardedMetric: testForwarded,
   668  				ForwardMetadata: testForwardMetadata,
   669  			},
   670  		}).Return(errTestEncodeMetric)
   671  		encoder.EXPECT().Truncate(0).Return(nil)
   672  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   673  	}
   674  
   675  	payload := payloadUnion{
   676  		payloadType: forwardedType,
   677  		forwarded: forwardedPayload{
   678  			metric:   testForwarded,
   679  			metadata: testForwardMetadata,
   680  		},
   681  	}
   682  	require.Equal(t, errTestEncodeMetric, w.Write(0, payload))
   683  }
   684  
   685  func TestWriterWriteForwardedEnqueueError(t *testing.T) {
   686  	ctrl := gomock.NewController(t)
   687  	defer ctrl.Finish()
   688  
   689  	errTestEnqueue := errors.New("test enqueue error")
   690  	queue := NewMockinstanceQueue(ctrl)
   691  	queue.EXPECT().Enqueue(gomock.Any()).Return(errTestEnqueue)
   692  	opts := testOptions().
   693  		SetMaxTimerBatchSize(1).
   694  		SetMaxBatchSize(1)
   695  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   696  	w.queue = queue
   697  
   698  	payload := payloadUnion{
   699  		payloadType: forwardedType,
   700  		forwarded: forwardedPayload{
   701  			metric:   testForwarded,
   702  			metadata: testForwardMetadata,
   703  		},
   704  	}
   705  	require.Equal(t, errTestEnqueue, w.Write(0, payload))
   706  }
   707  
   708  func TestWriterFlushClosed(t *testing.T) {
   709  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
   710  	w.closed = true
   711  	require.Equal(t, errInstanceWriterClosed, w.Flush())
   712  }
   713  
   714  func TestWriterFlushPartialError(t *testing.T) {
   715  	ctrl := gomock.NewController(t)
   716  	defer ctrl.Finish()
   717  
   718  	var (
   719  		enqueueIdx   int
   720  		enqueued     []byte
   721  		errTestFlush = errors.New("test flush error")
   722  	)
   723  	queue := NewMockinstanceQueue(ctrl)
   724  	queue.EXPECT().
   725  		Enqueue(gomock.Any()).
   726  		DoAndReturn(func(buf protobuf.Buffer) error {
   727  			enqueued = append(enqueued, buf.Bytes()...)
   728  			enqueueIdx++
   729  			if enqueueIdx == 1 {
   730  				return errTestFlush
   731  			}
   732  			return nil
   733  		}).
   734  		Times(2)
   735  	queue.EXPECT().Flush().MinTimes(1)
   736  	opts := testOptions()
   737  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   738  	w.queue = queue
   739  
   740  	encoderIdx := 0
   741  	w.newLockedEncoderFn = func(protobuf.UnaggregatedOptions) *lockedEncoder {
   742  		encoder := protobuf.NewMockUnaggregatedEncoder(ctrl)
   743  		switch encoderIdx {
   744  		case 0:
   745  			encoder.EXPECT().Len().Return(0)
   746  		case 1:
   747  			encoder.EXPECT().Len().Return(2)
   748  			encoder.EXPECT().Relinquish().Return(protobuf.NewBuffer([]byte{1, 2}, nil))
   749  		case 2:
   750  			encoder.EXPECT().Len().Return(4)
   751  			encoder.EXPECT().Relinquish().Return(protobuf.NewBuffer([]byte{3, 4, 5, 6}, nil))
   752  		}
   753  		encoderIdx++
   754  		return &lockedEncoder{UnaggregatedEncoder: encoder}
   755  	}
   756  	for i := 0; i < 3; i++ {
   757  		w.encodersByShard[uint32(i)] = w.newLockedEncoderFn(opts.EncoderOptions())
   758  	}
   759  	err := w.Flush()
   760  	require.Error(t, err)
   761  	require.True(t, strings.Contains(err.Error(), errTestFlush.Error()))
   762  	sort.Slice(enqueued, func(i, j int) bool { return enqueued[i] < enqueued[j] })
   763  	require.Equal(t, []byte{1, 2, 3, 4, 5, 6}, enqueued)
   764  }
   765  
   766  func TestWriterCloseAlreadyClosed(t *testing.T) {
   767  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
   768  	w.closed = true
   769  	require.Equal(t, errInstanceWriterClosed, w.Close())
   770  }
   771  
   772  func TestWriterCloseSuccess(t *testing.T) {
   773  	w := newInstanceWriter(testPlacementInstance, testOptions()).(*writer)
   774  	require.NoError(t, w.Close())
   775  }
   776  
   777  func TestWriterConcurrentWriteStress(t *testing.T) {
   778  	params := []struct {
   779  		maxInputBatchSize int
   780  		maxTimerBatchSize int
   781  		maxBatchSize      int
   782  	}{
   783  		// High likelihood of counter/gauge encoding triggering a flush in between
   784  		// releasing and re-acquiring locks when encoding large timer batches.
   785  		{
   786  			maxInputBatchSize: 150,
   787  			maxTimerBatchSize: 150,
   788  			maxBatchSize:      1000,
   789  		},
   790  		// Large timer batches.
   791  		{
   792  			maxInputBatchSize: 1000,
   793  			maxTimerBatchSize: 140,
   794  			maxBatchSize:      1440,
   795  		},
   796  	}
   797  
   798  	for _, param := range params {
   799  		testWriterConcurrentWriteStress(
   800  			t,
   801  			param.maxInputBatchSize,
   802  			param.maxTimerBatchSize,
   803  			param.maxBatchSize,
   804  		)
   805  	}
   806  }
   807  
   808  func testWriterConcurrentWriteStress(
   809  	t *testing.T,
   810  	maxInputBatchSize int,
   811  	maxTimerBatchSize int,
   812  	maxBatchSize int,
   813  ) {
   814  	ctrl := gomock.NewController(t)
   815  	defer ctrl.Finish()
   816  
   817  	var (
   818  		numIter       = 3000
   819  		shard         = uint32(0)
   820  		counters      = make([]unaggregated.Counter, numIter)
   821  		timers        = make([]unaggregated.BatchTimer, numIter)
   822  		gauges        = make([]unaggregated.Gauge, numIter)
   823  		forwarded     = make([]aggregated.ForwardedMetric, numIter)
   824  		passthroughed = make([]aggregated.Metric, numIter)
   825  		resultsLock   sync.Mutex
   826  		results       [][]byte
   827  	)
   828  
   829  	// Construct metrics input.
   830  	for i := 0; i < numIter; i++ {
   831  		counters[i] = unaggregated.Counter{
   832  			ID:    []byte(fmt.Sprintf("counter%d", i)),
   833  			Value: int64(i),
   834  		}
   835  		gauges[i] = unaggregated.Gauge{
   836  			ID:    []byte(fmt.Sprintf("gauge%d", i)),
   837  			Value: float64(i),
   838  		}
   839  		batchSize := numIter - i
   840  		if batchSize > maxInputBatchSize {
   841  			batchSize = maxInputBatchSize
   842  		}
   843  		timerVals := make([]float64, batchSize)
   844  		for j := i; j < i+batchSize; j++ {
   845  			timerVals[j-i] = float64(j)
   846  		}
   847  		timers[i] = unaggregated.BatchTimer{
   848  			ID:     []byte(fmt.Sprintf("timer%d", i)),
   849  			Values: timerVals,
   850  		}
   851  		forwardedVals := []float64{float64(i) - 0.5, float64(i), float64(i) + 0.5}
   852  		forwarded[i] = aggregated.ForwardedMetric{
   853  			Type:      metric.GaugeType,
   854  			ID:        []byte(fmt.Sprintf("forwarded%d", i)),
   855  			TimeNanos: int64(i),
   856  			Values:    forwardedVals,
   857  		}
   858  		passthroughed[i] = aggregated.Metric{
   859  			Type:      metric.GaugeType,
   860  			ID:        []byte(fmt.Sprintf("passthroughed%d", i)),
   861  			TimeNanos: int64(i),
   862  			Value:     float64(i),
   863  		}
   864  	}
   865  
   866  	queue := NewMockinstanceQueue(ctrl)
   867  	queue.EXPECT().
   868  		Enqueue(gomock.Any()).
   869  		DoAndReturn(func(buf protobuf.Buffer) error {
   870  			bytes := buf.Bytes()
   871  			cloned := make([]byte, len(bytes))
   872  			copy(cloned, bytes)
   873  			resultsLock.Lock()
   874  			results = append(results, cloned)
   875  			resultsLock.Unlock()
   876  			return nil
   877  		}).
   878  		AnyTimes()
   879  	queue.EXPECT().Flush().MinTimes(1)
   880  	opts := testOptions().
   881  		SetMaxTimerBatchSize(maxTimerBatchSize).
   882  		SetMaxBatchSize(maxBatchSize)
   883  	w := newInstanceWriter(testPlacementInstance, opts).(*writer)
   884  	w.queue = queue
   885  
   886  	var wg sync.WaitGroup
   887  	wg.Add(5)
   888  
   889  	go func() {
   890  		defer wg.Done()
   891  
   892  		for i := 0; i < numIter; i++ {
   893  			mu := unaggregated.MetricUnion{
   894  				Type:       metric.CounterType,
   895  				ID:         counters[i].ID,
   896  				CounterVal: counters[i].Value,
   897  			}
   898  			payload := payloadUnion{
   899  				payloadType: untimedType,
   900  				untimed: untimedPayload{
   901  					metric:    mu,
   902  					metadatas: testStagedMetadatas,
   903  				},
   904  			}
   905  			require.NoError(t, w.Write(shard, payload))
   906  		}
   907  	}()
   908  
   909  	go func() {
   910  		defer wg.Done()
   911  
   912  		for i := 0; i < numIter; i++ {
   913  			mu := unaggregated.MetricUnion{
   914  				Type:          metric.TimerType,
   915  				ID:            timers[i].ID,
   916  				BatchTimerVal: timers[i].Values,
   917  			}
   918  			payload := payloadUnion{
   919  				payloadType: untimedType,
   920  				untimed: untimedPayload{
   921  					metric:    mu,
   922  					metadatas: testStagedMetadatas,
   923  				},
   924  			}
   925  			require.NoError(t, w.Write(shard, payload))
   926  		}
   927  	}()
   928  
   929  	go func() {
   930  		defer wg.Done()
   931  
   932  		for i := 0; i < numIter; i++ {
   933  			mu := unaggregated.MetricUnion{
   934  				Type:     metric.GaugeType,
   935  				ID:       gauges[i].ID,
   936  				GaugeVal: gauges[i].Value,
   937  			}
   938  			payload := payloadUnion{
   939  				payloadType: untimedType,
   940  				untimed: untimedPayload{
   941  					metric:    mu,
   942  					metadatas: testStagedMetadatas,
   943  				},
   944  			}
   945  			require.NoError(t, w.Write(shard, payload))
   946  		}
   947  	}()
   948  
   949  	go func() {
   950  		defer wg.Done()
   951  
   952  		for i := 0; i < numIter; i++ {
   953  			payload := payloadUnion{
   954  				payloadType: forwardedType,
   955  				forwarded: forwardedPayload{
   956  					metric:   forwarded[i],
   957  					metadata: testForwardMetadata,
   958  				},
   959  			}
   960  			require.NoError(t, w.Write(shard, payload))
   961  		}
   962  	}()
   963  
   964  	go func() {
   965  		defer wg.Done()
   966  
   967  		for i := 0; i < numIter; i++ {
   968  			payload := payloadUnion{
   969  				payloadType: passthroughType,
   970  				passthrough: passthroughPayload{
   971  					metric:        passthroughed[i],
   972  					storagePolicy: testPassthroughMetadata,
   973  				},
   974  			}
   975  			require.NoError(t, w.Write(shard, payload))
   976  		}
   977  	}()
   978  
   979  	wg.Wait()
   980  	w.Flush()
   981  
   982  	var (
   983  		resCounters      = make([]unaggregated.Counter, 0, numIter)
   984  		resTimers        = make([]unaggregated.BatchTimer, 0, numIter)
   985  		resGauges        = make([]unaggregated.Gauge, 0, numIter)
   986  		resForwarded     = make([]aggregated.ForwardedMetric, 0, numIter)
   987  		resPassthroughed = make([]aggregated.Metric, 0, numIter)
   988  	)
   989  	for i := 0; i < len(results); i++ {
   990  		buf := bytes.NewBuffer(results[i])
   991  		iter := protobuf.NewUnaggregatedIterator(buf, protobuf.NewUnaggregatedOptions())
   992  		for iter.Next() {
   993  			msgResult := iter.Current()
   994  			switch msgResult.Type {
   995  			case encoding.CounterWithMetadatasType:
   996  				require.Equal(t, testStagedMetadatas, msgResult.CounterWithMetadatas.StagedMetadatas)
   997  				metric := cloneMetric(msgResult.CounterWithMetadatas.Counter.ToUnion())
   998  				resCounters = append(resCounters, metric.Counter())
   999  			case encoding.BatchTimerWithMetadatasType:
  1000  				require.Equal(t, testStagedMetadatas, msgResult.BatchTimerWithMetadatas.StagedMetadatas)
  1001  				metric := cloneMetric(msgResult.BatchTimerWithMetadatas.BatchTimer.ToUnion())
  1002  				resTimers = append(resTimers, metric.BatchTimer())
  1003  			case encoding.GaugeWithMetadatasType:
  1004  				require.Equal(t, testStagedMetadatas, msgResult.GaugeWithMetadatas.StagedMetadatas)
  1005  				metric := cloneMetric(msgResult.GaugeWithMetadatas.Gauge.ToUnion())
  1006  				resGauges = append(resGauges, metric.Gauge())
  1007  			case encoding.ForwardedMetricWithMetadataType:
  1008  				require.Equal(t, testForwardMetadata, msgResult.ForwardedMetricWithMetadata.ForwardMetadata)
  1009  				metric := cloneForwardedMetric(msgResult.ForwardedMetricWithMetadata.ForwardedMetric)
  1010  				resForwarded = append(resForwarded, metric)
  1011  			case encoding.PassthroughMetricWithMetadataType:
  1012  				require.Equal(t, testPassthroughMetadata, msgResult.PassthroughMetricWithMetadata.StoragePolicy)
  1013  				metric := clonePassthroughedMetric(msgResult.PassthroughMetricWithMetadata.Metric)
  1014  				resPassthroughed = append(resPassthroughed, metric)
  1015  			default:
  1016  				require.Fail(t, "unrecognized message type %v", msgResult.Type)
  1017  			}
  1018  		}
  1019  		require.Equal(t, io.EOF, iter.Err())
  1020  	}
  1021  
  1022  	// Sort counters for comparison purposes.
  1023  	sort.Slice(counters, func(i, j int) bool {
  1024  		return bytes.Compare(counters[i].ID, counters[j].ID) < 0
  1025  	})
  1026  	sort.Slice(resCounters, func(i, j int) bool {
  1027  		return bytes.Compare(resCounters[i].ID, resCounters[j].ID) < 0
  1028  	})
  1029  	require.Equal(t, counters, resCounters)
  1030  
  1031  	// Sort timers for comparison purposes.
  1032  	sort.Slice(timers, func(i, j int) bool {
  1033  		return bytes.Compare(timers[i].ID, timers[j].ID) < 0
  1034  	})
  1035  	sort.Slice(resTimers, func(i, j int) bool {
  1036  		return bytes.Compare(resTimers[i].ID, resTimers[j].ID) < 0
  1037  	})
  1038  	// Merge timers if necessary for comparison since they may be split into multiple batches.
  1039  	mergedResTimers := make([]unaggregated.BatchTimer, 0, numIter)
  1040  	curr := 0
  1041  	for i := 0; i < len(resTimers); i++ {
  1042  		if bytes.Equal(resTimers[curr].ID, resTimers[i].ID) {
  1043  			continue
  1044  		}
  1045  		var mergedValues []float64
  1046  		for j := curr; j < i; j++ {
  1047  			mergedValues = append(mergedValues, resTimers[j].Values...)
  1048  		}
  1049  		sort.Float64s(mergedValues)
  1050  		mergedResTimers = append(mergedResTimers, unaggregated.BatchTimer{
  1051  			ID:     resTimers[curr].ID,
  1052  			Values: mergedValues,
  1053  		})
  1054  		curr = i
  1055  	}
  1056  	if curr < len(resTimers) {
  1057  		var mergedValues []float64
  1058  		for j := curr; j < len(resTimers); j++ {
  1059  			mergedValues = append(mergedValues, resTimers[j].Values...)
  1060  		}
  1061  		sort.Float64s(mergedValues)
  1062  		mergedResTimers = append(mergedResTimers, unaggregated.BatchTimer{
  1063  			ID:     resTimers[curr].ID,
  1064  			Values: mergedValues,
  1065  		})
  1066  	}
  1067  	require.Equal(t, timers, mergedResTimers)
  1068  
  1069  	// Sort gauges for comparison purposes.
  1070  	sort.Slice(gauges, func(i, j int) bool {
  1071  		return bytes.Compare(gauges[i].ID, gauges[j].ID) < 0
  1072  	})
  1073  	sort.Slice(resGauges, func(i, j int) bool {
  1074  		return bytes.Compare(resGauges[i].ID, resGauges[j].ID) < 0
  1075  	})
  1076  	require.Equal(t, gauges, resGauges)
  1077  
  1078  	// Sort forwarded for comparison purposes.
  1079  	sort.Slice(forwarded, func(i, j int) bool {
  1080  		return bytes.Compare(forwarded[i].ID, forwarded[j].ID) < 0
  1081  	})
  1082  	sort.Slice(resForwarded, func(i, j int) bool {
  1083  		return bytes.Compare(resForwarded[i].ID, resForwarded[j].ID) < 0
  1084  	})
  1085  	require.Equal(t, forwarded, resForwarded)
  1086  }
  1087  
  1088  func TestRefCountedWriter(t *testing.T) {
  1089  	opts := testOptions()
  1090  	w := newRefCountedWriter(testPlacementInstance, opts)
  1091  	w.IncRef()
  1092  
  1093  	require.False(t, w.instanceWriter.(*writer).closed)
  1094  	w.DecRef()
  1095  	require.True(t, clock.WaitUntil(func() bool {
  1096  		wr := w.instanceWriter.(*writer)
  1097  		wr.Lock()
  1098  		defer wr.Unlock()
  1099  		return wr.closed
  1100  	}, 3*time.Second))
  1101  }
  1102  
  1103  func cloneMetric(m unaggregated.MetricUnion) unaggregated.MetricUnion {
  1104  	mu := m
  1105  	clonedID := make(id.RawID, len(m.ID))
  1106  	copy(clonedID, m.ID)
  1107  	mu.ID = clonedID
  1108  	if m.Type == metric.TimerType {
  1109  		clonedTimerVal := make([]float64, len(m.BatchTimerVal))
  1110  		copy(clonedTimerVal, m.BatchTimerVal)
  1111  		mu.BatchTimerVal = clonedTimerVal
  1112  	}
  1113  	return mu
  1114  }
  1115  
  1116  func cloneForwardedMetric(m aggregated.ForwardedMetric) aggregated.ForwardedMetric {
  1117  	cloned := m
  1118  	cloned.ID = append([]byte(nil), m.ID...)
  1119  	cloned.Values = append([]float64(nil), m.Values...)
  1120  	return cloned
  1121  }
  1122  
  1123  func clonePassthroughedMetric(m aggregated.Metric) aggregated.Metric {
  1124  	cloned := m
  1125  	cloned.ID = append([]byte(nil), m.ID...)
  1126  	cloned.Value = m.Value
  1127  	return cloned
  1128  }