github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/query/server/query_test.go (about)

     1  // Copyright (c) 2018 Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  package server
    22  
    23  import (
    24  	"context"
    25  	"fmt"
    26  	"io/ioutil"
    27  	"net"
    28  	"net/http"
    29  	"os"
    30  	"sync"
    31  	"testing"
    32  	"time"
    33  
    34  	clusterclient "github.com/m3db/m3/src/cluster/client"
    35  	"github.com/m3db/m3/src/cluster/kv/mem"
    36  	"github.com/m3db/m3/src/cmd/services/m3query/config"
    37  	"github.com/m3db/m3/src/dbnode/client"
    38  	"github.com/m3db/m3/src/metrics/generated/proto/metricpb"
    39  	"github.com/m3db/m3/src/metrics/policy"
    40  	"github.com/m3db/m3/src/msg/generated/proto/msgpb"
    41  	m3msgproto "github.com/m3db/m3/src/msg/protocol/proto"
    42  	"github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote"
    43  	"github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote/test"
    44  	rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
    45  	"github.com/m3db/m3/src/query/storage/m3"
    46  	"github.com/m3db/m3/src/query/storage/promremote/promremotetest"
    47  	xclock "github.com/m3db/m3/src/x/clock"
    48  	xconfig "github.com/m3db/m3/src/x/config"
    49  	"github.com/m3db/m3/src/x/ident"
    50  	"github.com/m3db/m3/src/x/instrument"
    51  	"github.com/m3db/m3/src/x/serialize"
    52  	xtest "github.com/m3db/m3/src/x/test"
    53  
    54  	"github.com/gogo/protobuf/proto"
    55  	"github.com/golang/mock/gomock"
    56  	"github.com/prometheus/prometheus/promql"
    57  	"github.com/stretchr/testify/assert"
    58  	"github.com/stretchr/testify/require"
    59  	"go.uber.org/atomic"
    60  	"google.golang.org/grpc"
    61  )
    62  
    63  var configYAML = `
    64  clusters:
    65    - namespaces:
    66        - namespace: prometheus_metrics
    67          type: unaggregated
    68          retention: 48h
    69        - namespace: prometheus_metrics_1m_aggregated
    70          type: aggregated
    71          retention: 120h
    72          resolution: 1m
    73          downsample:
    74            all: false
    75  
    76  ingest:
    77    ingester:
    78      workerPoolSize: 100
    79      opPool:
    80        size: 100
    81      retry:
    82        maxRetries: 3
    83        jitter: true
    84      logSampleRate: 0.01
    85    m3msg:
    86      server:
    87        listenAddress: "0.0.0.0:0"
    88        retry:
    89          maxBackoff: 10s
    90          jitter: true
    91  
    92  tagOptions:
    93    metricName: "_new"
    94    idScheme: quoted
    95  
    96  readWorkerPoolPolicy:
    97    grow: true
    98    size: 100
    99    shards: 100
   100    killProbability: 0.3
   101  
   102  writeWorkerPoolPolicy:
   103    grow: true
   104    size: 100
   105    shards: 100
   106    killProbability: 0.3
   107  `
   108  
   109  func TestMultiProcessSetsProcessLabel(t *testing.T) {
   110  	ctrl := gomock.NewController(xtest.Reporter{T: t})
   111  	defer ctrl.Finish()
   112  
   113  	cfg := configFromYAML(t, configYAML)
   114  
   115  	metricsPort := 8765
   116  	cfg.Metrics.PrometheusReporter.ListenAddress = fmt.Sprintf("127.0.0.1:%d", metricsPort)
   117  	cfg.MultiProcess = config.MultiProcessConfiguration{
   118  		Enabled: true,
   119  		Count:   2,
   120  	}
   121  	detailedMetrics := instrument.DetailedExtendedMetrics
   122  	cfg.Metrics.ExtendedMetrics = &detailedMetrics
   123  
   124  	multiProcessInstance := multiProcessProcessID()
   125  	if multiProcessInstance == "" {
   126  		// Parent process just needs to ensure that it spawns the child and
   127  		// that the child exits cleanly.
   128  		// The child process will ensure that everything comes up properly
   129  		// and that the metrics have the correct labels on it.
   130  		result := Run(RunOptions{Config: cfg})
   131  		assert.True(t, result.MultiProcessRun)
   132  		assert.True(t, result.MultiProcessIsParentCleanExit)
   133  		return
   134  	}
   135  
   136  	// Override the client creation
   137  	require.Equal(t, 1, len(cfg.Clusters))
   138  
   139  	session := client.NewMockSession(ctrl)
   140  	session.EXPECT().Close().AnyTimes()
   141  
   142  	dbClient := client.NewMockClient(ctrl)
   143  	dbClient.EXPECT().DefaultSession().Return(session, nil).AnyTimes()
   144  
   145  	cfg.Clusters[0].NewClientFromConfig = func(
   146  		cfg client.Configuration,
   147  		params client.ConfigurationParameters,
   148  		custom ...client.CustomAdminOption,
   149  	) (client.Client, error) {
   150  		return dbClient, nil
   151  	}
   152  
   153  	downsamplerReadyCh := make(chan struct{}, 1)
   154  	resultCh := make(chan RunResult, 1)
   155  	opts := runServerOpts{cfg: cfg, ctrl: ctrl, downsamplerReadyCh: downsamplerReadyCh, runResultCh: resultCh}
   156  	_, stopServer := runServer(t, opts)
   157  	defer func() {
   158  		stopServer()
   159  		result := <-resultCh
   160  		assert.True(t, result.MultiProcessRun)
   161  		assert.False(t, result.MultiProcessIsParentCleanExit)
   162  	}()
   163  
   164  	r, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/metrics", metricsPort)) //nolint
   165  	require.NoError(t, err)
   166  	defer r.Body.Close()
   167  	bodyBytes, err := ioutil.ReadAll(r.Body)
   168  	require.NoError(t, err)
   169  	metricsResponse := string(bodyBytes)
   170  	assert.Contains(t, metricsResponse, "coordinator_runtime_memory_allocated{multiprocess_id=\"1\"}")
   171  	assert.Contains(t, metricsResponse, "coordinator_ingest_success{multiprocess_id=\"1\"}")
   172  }
   173  
   174  func TestWriteH1(t *testing.T) {
   175  	ctrl := gomock.NewController(xtest.Reporter{T: t})
   176  	defer ctrl.Finish()
   177  
   178  	cfg := configFromYAML(t, configYAML)
   179  
   180  	testWrite(t, cfg, ctrl)
   181  }
   182  
   183  func TestWriteH2C(t *testing.T) {
   184  	ctrl := gomock.NewController(xtest.Reporter{T: t})
   185  	defer ctrl.Finish()
   186  
   187  	cfg := configFromYAML(t, configYAML)
   188  
   189  	cfg.HTTP.EnableH2C = true
   190  
   191  	testWrite(t, cfg, ctrl)
   192  }
   193  
   194  func TestIngestH1(t *testing.T) {
   195  	ctrl := gomock.NewController(xtest.Reporter{T: t})
   196  	defer ctrl.Finish()
   197  
   198  	cfg := configFromYAML(t, configYAML)
   199  
   200  	testIngest(t, cfg, ctrl)
   201  }
   202  
   203  func TestIngestH2C(t *testing.T) {
   204  	ctrl := gomock.NewController(xtest.Reporter{T: t})
   205  	defer ctrl.Finish()
   206  
   207  	cfg := configFromYAML(t, configYAML)
   208  
   209  	cfg.HTTP.EnableH2C = true
   210  
   211  	testIngest(t, cfg, ctrl)
   212  }
   213  
   214  func TestPromRemoteBackend(t *testing.T) {
   215  	ctrl := gomock.NewController(xtest.Reporter{T: t})
   216  	defer ctrl.Finish()
   217  
   218  	externalFakePromServer := promremotetest.NewServer(t)
   219  	defer externalFakePromServer.Close()
   220  
   221  	cfg := configFromYAML(t, fmt.Sprintf(`
   222  prometheusRemoteBackend:
   223    endpoints: 
   224    - name: defaultEndpointForTests
   225      address: "%s"
   226  
   227  backend: prom-remote
   228  
   229  tagOptions:
   230    allowTagNameDuplicates: true
   231  `, externalFakePromServer.WriteAddr()))
   232  
   233  	require.Equal(t, config.PromRemoteStorageType, cfg.Backend)
   234  
   235  	addr, stopServer := runServer(t, runServerOpts{cfg: cfg, ctrl: ctrl})
   236  	defer stopServer()
   237  
   238  	promReq := test.GeneratePromWriteRequest()
   239  	promReqBody := test.GeneratePromWriteRequestBody(t, promReq)
   240  	requestURL := fmt.Sprintf("http://%s%s", addr, remote.PromWriteURL)
   241  	newRequest := func() *http.Request {
   242  		req, err := http.NewRequestWithContext(
   243  			context.TODO(),
   244  			http.MethodPost,
   245  			requestURL,
   246  			promReqBody,
   247  		)
   248  		require.NoError(t, err)
   249  		return req
   250  	}
   251  
   252  	t.Run("write request", func(t *testing.T) {
   253  		defer externalFakePromServer.Reset()
   254  		resp, err := http.DefaultClient.Do(newRequest())
   255  		require.NoError(t, err)
   256  
   257  		assert.NotNil(t, externalFakePromServer.GetLastWriteRequest())
   258  		require.NoError(t, resp.Body.Close())
   259  	})
   260  
   261  	t.Run("bad request propagates", func(t *testing.T) {
   262  		defer externalFakePromServer.Reset()
   263  		externalFakePromServer.SetError("badRequest", http.StatusBadRequest)
   264  
   265  		resp, err := http.DefaultClient.Do(newRequest())
   266  		require.NoError(t, err)
   267  
   268  		assert.Equal(t, 400, resp.StatusCode)
   269  		require.NoError(t, resp.Body.Close())
   270  	})
   271  }
   272  
   273  func TestGRPCBackend(t *testing.T) {
   274  	lis, err := net.Listen("tcp", "127.0.0.1:0")
   275  	require.NoError(t, err)
   276  	grpcAddr := lis.Addr().String()
   277  	cfg := configFromYAML(t, fmt.Sprintf(`
   278  rpc:
   279    remoteListenAddresses: ["%s"]
   280  
   281  backend: grpc
   282  
   283  tagOptions:
   284    metricName: "bar"
   285    idScheme: prepend_meta
   286  
   287  readWorkerPoolPolicy:
   288    grow: true
   289    size: 100
   290    shards: 1000
   291    killProbability: 0.3
   292  
   293  writeWorkerPoolPolicy:
   294    grow: true
   295    size: 100
   296    shards: 1000
   297    killProbability: 0.3
   298  `, grpcAddr))
   299  
   300  	ctrl := gomock.NewController(xtest.Reporter{T: t})
   301  	defer ctrl.Finish()
   302  
   303  	s := grpc.NewServer()
   304  	defer s.GracefulStop()
   305  	qs := newQueryServer()
   306  	rpc.RegisterQueryServer(s, qs)
   307  	go func() {
   308  		_ = s.Serve(lis)
   309  	}()
   310  
   311  	// No clusters
   312  	require.Equal(t, 0, len(cfg.Clusters))
   313  	require.Equal(t, config.GRPCStorageType, cfg.Backend)
   314  
   315  	addr, stopServer := runServer(t, runServerOpts{cfg: cfg, ctrl: ctrl})
   316  	defer stopServer()
   317  
   318  	// Send Prometheus read request
   319  	promReq := test.GeneratePromReadRequest()
   320  	promReqBody := test.GeneratePromReadRequestBody(t, promReq)
   321  	req, err := http.NewRequestWithContext(
   322  		context.TODO(),
   323  		http.MethodPost,
   324  		fmt.Sprintf("http://%s%s", addr, remote.PromReadURL),
   325  		promReqBody,
   326  	)
   327  	require.NoError(t, err)
   328  
   329  	resp, err := http.DefaultClient.Do(req)
   330  	require.NoError(t, err)
   331  	require.NoError(t, resp.Body.Close())
   332  	assert.Equal(t, qs.reads, 1)
   333  }
   334  
   335  func testWrite(t *testing.T, cfg config.Configuration, ctrl *gomock.Controller) {
   336  	// Override the client creation
   337  	require.Equal(t, 1, len(cfg.Clusters))
   338  
   339  	session := client.NewMockSession(ctrl)
   340  	for _, value := range []float64{1, 2} {
   341  		session.EXPECT().WriteTagged(ident.NewIDMatcher("prometheus_metrics"),
   342  			ident.NewIDMatcher(`{_new="first",biz="baz",foo="bar"}`),
   343  			gomock.Any(),
   344  			gomock.Any(),
   345  			value,
   346  			gomock.Any(),
   347  			nil)
   348  	}
   349  	for _, value := range []float64{3, 4} {
   350  		session.EXPECT().WriteTagged(ident.NewIDMatcher("prometheus_metrics"),
   351  			ident.NewIDMatcher(`{_new="second",bar="baz",foo="qux"}`),
   352  			gomock.Any(),
   353  			gomock.Any(),
   354  			value,
   355  			gomock.Any(),
   356  			nil)
   357  	}
   358  	session.EXPECT().Close().AnyTimes()
   359  
   360  	dbClient := client.NewMockClient(ctrl)
   361  	dbClient.EXPECT().DefaultSession().Return(session, nil).AnyTimes()
   362  
   363  	cfg.Clusters[0].NewClientFromConfig = func(
   364  		cfg client.Configuration,
   365  		params client.ConfigurationParameters,
   366  		custom ...client.CustomAdminOption,
   367  	) (client.Client, error) {
   368  		return dbClient, nil
   369  	}
   370  
   371  	downsamplerReadyCh := make(chan struct{}, 1)
   372  	addr, stopServer := runServer(t, runServerOpts{cfg: cfg, ctrl: ctrl, downsamplerReadyCh: downsamplerReadyCh})
   373  	defer stopServer()
   374  
   375  	// Send Prometheus write request
   376  	promReq := test.GeneratePromWriteRequest()
   377  	promReqBody := test.GeneratePromWriteRequestBody(t, promReq)
   378  	req, err := http.NewRequest(http.MethodPost,
   379  		fmt.Sprintf("http://%s%s", addr, remote.PromWriteURL), promReqBody)
   380  	require.NoError(t, err)
   381  
   382  	res, err := http.DefaultClient.Do(req)
   383  	require.NoError(t, err)
   384  	require.Equal(t, http.StatusOK, res.StatusCode)
   385  }
   386  
   387  // testIngest will test an M3Msg being ingested by the coordinator, it also
   388  // makes sure that the tag options is correctly propagated from the config
   389  // all the way to the M3Msg ingester and when written to the DB will include
   390  // the correctly formed ID.
   391  func testIngest(t *testing.T, cfg config.Configuration, ctrl *gomock.Controller) {
   392  	// Override the client creation
   393  	require.Equal(t, 1, len(cfg.Clusters))
   394  
   395  	numWrites := atomic.NewInt32(0)
   396  
   397  	session := client.NewMockSession(ctrl)
   398  	session.EXPECT().
   399  		WriteTagged(ident.NewIDMatcher("prometheus_metrics_1m_aggregated"),
   400  			ident.NewIDMatcher(`{_new="first",biz="baz",foo="bar"}`),
   401  			gomock.Any(),
   402  			gomock.Any(),
   403  			42.0,
   404  			gomock.Any(),
   405  			nil).
   406  		Do(func(_, _, _, _, _, _, _ interface{}) {
   407  			numWrites.Add(1)
   408  		})
   409  	session.EXPECT().Close().AnyTimes()
   410  
   411  	dbClient := client.NewMockClient(ctrl)
   412  	dbClient.EXPECT().DefaultSession().Return(session, nil).AnyTimes()
   413  
   414  	cfg.Clusters[0].NewClientFromConfig = m3.NewClientFromConfig(
   415  		func(
   416  			cfg client.Configuration,
   417  			params client.ConfigurationParameters,
   418  			custom ...client.CustomAdminOption,
   419  		) (client.Client, error) {
   420  			return dbClient, nil
   421  		})
   422  
   423  	var (
   424  		m3msgListenerCh    = make(chan net.Listener, 1)
   425  		downsamplerReadyCh = make(chan struct{}, 1)
   426  		runOpts            = runServerOpts{
   427  			cfg:                cfg,
   428  			ctrl:               ctrl,
   429  			downsamplerReadyCh: downsamplerReadyCh,
   430  			m3msgListenerCh:    m3msgListenerCh,
   431  		}
   432  	)
   433  
   434  	_, stopServer := runServer(t, runOpts)
   435  	defer stopServer()
   436  
   437  	// Send ingest message.
   438  	tagEncoderPool := serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(), nil)
   439  	tagEncoderPool.Init()
   440  	tagEncoder := tagEncoderPool.Get()
   441  	err := tagEncoder.Encode(ident.MustNewTagStringsIterator(
   442  		"_new", "first",
   443  		"biz", "baz",
   444  		"foo", "bar"))
   445  	require.NoError(t, err)
   446  	id, ok := tagEncoder.Data()
   447  	require.True(t, ok)
   448  	sp, err := policy.MustParseStoragePolicy("1m:120h").Proto()
   449  	require.NoError(t, err)
   450  
   451  	// Copy message.
   452  	message, err := proto.Marshal(&metricpb.AggregatedMetric{
   453  		Metric: metricpb.TimedMetricWithStoragePolicy{
   454  			TimedMetric: metricpb.TimedMetric{
   455  				Type:      metricpb.MetricType_GAUGE,
   456  				Id:        id.Bytes(),
   457  				TimeNanos: time.Now().UnixNano(),
   458  				Value:     42,
   459  			},
   460  			StoragePolicy: *sp,
   461  		},
   462  	})
   463  	require.NoError(t, err)
   464  
   465  	// Encode as m3msg protobuf message.
   466  	encoder := m3msgproto.NewEncoder(m3msgproto.NewOptions())
   467  	err = encoder.Encode(&msgpb.Message{
   468  		Value: message,
   469  	})
   470  	require.NoError(t, err)
   471  	m3msgListener := <-m3msgListenerCh
   472  	conn, err := net.Dial("tcp", m3msgListener.Addr().String())
   473  	require.NoError(t, err)
   474  	_, err = conn.Write(encoder.Bytes())
   475  	require.NoError(t, err)
   476  
   477  	// Now wait for write.
   478  	xclock.WaitUntil(func() bool {
   479  		return numWrites.Load() == 1
   480  	}, 30*time.Second)
   481  
   482  }
   483  
   484  func TestCreateEnginesWithResolutionBasedLookbacks(t *testing.T) {
   485  	var (
   486  		defaultLookback      = 10 * time.Minute
   487  		resolutionMultiplier = 2
   488  		clusters             = m3.ClustersStaticConfiguration{
   489  			{
   490  				Namespaces: []m3.ClusterStaticNamespaceConfiguration{
   491  					{Resolution: 5 * time.Minute},
   492  					{Resolution: 10 * time.Minute},
   493  				},
   494  			},
   495  			{
   496  				Namespaces: []m3.ClusterStaticNamespaceConfiguration{
   497  					{Resolution: 5 * time.Minute},
   498  					{Resolution: 15 * time.Minute},
   499  				},
   500  			},
   501  		}
   502  		newEngineFn = func(lookback time.Duration) (*promql.Engine, error) {
   503  			return promql.NewEngine(promql.EngineOpts{}), nil
   504  		}
   505  
   506  		expecteds = []time.Duration{defaultLookback, 20 * time.Minute, 30 * time.Minute}
   507  	)
   508  	defaultEngine, err := newEngineFn(defaultLookback)
   509  	require.NoError(t, err)
   510  
   511  	enginesByLookback, err := createEnginesWithResolutionBasedLookbacks(
   512  		defaultLookback,
   513  		defaultEngine,
   514  		clusters,
   515  		resolutionMultiplier,
   516  		newEngineFn,
   517  	)
   518  	require.NoError(t, err)
   519  
   520  	engine, ok := enginesByLookback[defaultLookback]
   521  	require.True(t, ok)
   522  	assert.Equal(t, defaultEngine, engine)
   523  
   524  	for _, expected := range expecteds {
   525  		engine, ok = enginesByLookback[expected]
   526  		require.True(t, ok)
   527  		assert.NotNil(t, engine)
   528  	}
   529  }
   530  
   531  type closeFn func()
   532  
   533  func newTestFile(t *testing.T, fileName, contents string) (*os.File, closeFn) {
   534  	tmpFile, err := ioutil.TempFile("", fileName)
   535  	require.NoError(t, err)
   536  
   537  	_, err = tmpFile.Write([]byte(contents))
   538  	require.NoError(t, err)
   539  
   540  	return tmpFile, func() {
   541  		assert.NoError(t, tmpFile.Close())
   542  		assert.NoError(t, os.Remove(tmpFile.Name()))
   543  	}
   544  }
   545  
   546  func configFromYAML(t *testing.T, partYAML string) config.Configuration {
   547  	cfgYAML := fmt.Sprintf(`
   548  listenAddress: 127.0.0.1:0
   549  
   550  logging:
   551    level: info
   552  
   553  metrics:
   554    scope:
   555      prefix: "coordinator"
   556    prometheus:
   557      handlerPath: /metrics
   558      listenAddress: "127.0.0.1:0"
   559      onError: stderr
   560    sanitization: prometheus
   561    samplingRate: 1.0
   562  
   563  %s
   564  `, partYAML)
   565  
   566  	configFile, closeFile := newTestFile(t, "config_backend.yaml", cfgYAML)
   567  	defer closeFile()
   568  	var cfg config.Configuration
   569  	err := xconfig.LoadFile(&cfg, configFile.Name(), xconfig.Options{})
   570  	require.NoError(t, err)
   571  	return cfg
   572  }
   573  
   574  type runServerOpts struct {
   575  	cfg                config.Configuration
   576  	ctrl               *gomock.Controller
   577  	downsamplerReadyCh chan struct{}
   578  	m3msgListenerCh    chan net.Listener
   579  	runResultCh        chan RunResult
   580  }
   581  
   582  func runServer(t *testing.T, opts runServerOpts) (string, closeFn) {
   583  	var (
   584  		interruptCh     = make(chan error)
   585  		doneCh          = make(chan struct{})
   586  		listenerCh      = make(chan net.Listener, 1)
   587  		clusterClient   = clusterclient.NewMockClient(opts.ctrl)
   588  		clusterClientCh chan clusterclient.Client
   589  	)
   590  
   591  	if len(opts.cfg.Clusters) > 0 || opts.cfg.ClusterManagement.Etcd != nil {
   592  		clusterClientCh = make(chan clusterclient.Client, 1)
   593  		store := mem.NewStore()
   594  		clusterClient.EXPECT().KV().Return(store, nil).MaxTimes(2)
   595  		clusterClientCh <- clusterClient
   596  	}
   597  
   598  	go func() {
   599  		r := Run(RunOptions{
   600  			Config:             opts.cfg,
   601  			InterruptCh:        interruptCh,
   602  			ListenerCh:         listenerCh,
   603  			ClusterClient:      clusterClientCh,
   604  			DownsamplerReadyCh: opts.downsamplerReadyCh,
   605  			M3MsgListenerCh:    opts.m3msgListenerCh,
   606  		})
   607  		doneCh <- struct{}{}
   608  		if opts.runResultCh != nil {
   609  			opts.runResultCh <- r
   610  		}
   611  	}()
   612  
   613  	if opts.downsamplerReadyCh != nil {
   614  		// Wait for downsampler to be ready.
   615  		<-opts.downsamplerReadyCh
   616  	}
   617  
   618  	// Wait for listener
   619  	listener := <-listenerCh
   620  	addr := listener.Addr().String()
   621  
   622  	// Wait for server to come up
   623  	waitForServerHealthy(t, addr)
   624  
   625  	return addr, func() {
   626  		// Ensure close server performs as expected
   627  		interruptCh <- fmt.Errorf("interrupt")
   628  		<-doneCh
   629  	}
   630  }
   631  
   632  func waitForServerHealthy(t *testing.T, addr string) {
   633  	maxWait := 10 * time.Second
   634  	startAt := time.Now()
   635  	for time.Since(startAt) < maxWait {
   636  		req, err := http.NewRequestWithContext(context.TODO(), "GET", fmt.Sprintf("http://%s/health", addr), nil)
   637  		require.NoError(t, err)
   638  		res, err := http.DefaultClient.Do(req)
   639  		if res != nil {
   640  			require.NoError(t, res.Body.Close())
   641  		}
   642  		if err != nil || res.StatusCode != http.StatusOK {
   643  			time.Sleep(100 * time.Millisecond)
   644  			continue
   645  		}
   646  		return
   647  	}
   648  	require.FailNow(t, "waited for server healthy longer than limit: "+
   649  		maxWait.String())
   650  }
   651  
   652  var _ rpc.QueryServer = &queryServer{}
   653  
   654  type queryServer struct {
   655  	up                            time.Time
   656  	reads, searches, tagCompletes int
   657  	mu                            sync.Mutex
   658  }
   659  
   660  func newQueryServer() *queryServer {
   661  	return &queryServer{up: time.Now()}
   662  }
   663  
   664  func (s *queryServer) Health(
   665  	ctx context.Context,
   666  	req *rpc.HealthRequest,
   667  ) (*rpc.HealthResponse, error) {
   668  	up := time.Since(s.up)
   669  	return &rpc.HealthResponse{
   670  		UptimeDuration:    up.String(),
   671  		UptimeNanoseconds: int64(up),
   672  	}, nil
   673  }
   674  
   675  func (s *queryServer) Fetch(
   676  	*rpc.FetchRequest,
   677  	rpc.Query_FetchServer,
   678  ) error {
   679  	s.mu.Lock()
   680  	defer s.mu.Unlock()
   681  	s.reads++
   682  	return nil
   683  }
   684  
   685  func (s *queryServer) Search(
   686  	*rpc.SearchRequest,
   687  	rpc.Query_SearchServer,
   688  ) error {
   689  	s.mu.Lock()
   690  	defer s.mu.Unlock()
   691  	s.searches++
   692  	return nil
   693  }
   694  
   695  func (s *queryServer) CompleteTags(
   696  	*rpc.CompleteTagsRequest,
   697  	rpc.Query_CompleteTagsServer,
   698  ) error {
   699  	s.mu.Lock()
   700  	defer s.mu.Unlock()
   701  	s.tagCompletes++
   702  	return nil
   703  }