github.com/m3db/m3@v1.5.0/src/integration/prometheus/prometheus.go (about)

     1  // Copyright (c) 2021  Uber Technologies, Inc.
     2  //
     3  // Permission is hereby granted, free of charge, to any person obtaining a copy
     4  // of this software and associated documentation files (the "Software"), to deal
     5  // in the Software without restriction, including without limitation the rights
     6  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     7  // copies of the Software, and to permit persons to whom the Software is
     8  // furnished to do so, subject to the following conditions:
     9  //
    10  // The above copyright notice and this permission notice shall be included in
    11  // all copies or substantial portions of the Software.
    12  //
    13  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    14  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    15  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    16  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    17  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    18  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    19  // THE SOFTWARE.
    20  
    21  // Package prometheus contains resources for starting a docker-backed
    22  // Prometheus.
    23  package prometheus
    24  
    25  import (
    26  	"encoding/json"
    27  	"errors"
    28  	"fmt"
    29  	"net/http"
    30  	"regexp"
    31  	"strconv"
    32  	"strings"
    33  	"testing"
    34  	"time"
    35  
    36  	"github.com/prometheus/common/model"
    37  	"github.com/stretchr/testify/require"
    38  	"go.uber.org/zap"
    39  
    40  	"github.com/m3db/m3/src/cluster/generated/proto/kvpb"
    41  	"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
    42  	"github.com/m3db/m3/src/dbnode/kvconfig"
    43  	"github.com/m3db/m3/src/integration/resources"
    44  	"github.com/m3db/m3/src/integration/resources/docker"
    45  	"github.com/m3db/m3/src/query/api/v1/handler/database"
    46  	"github.com/m3db/m3/src/query/api/v1/options"
    47  	"github.com/m3db/m3/src/query/generated/proto/prompb"
    48  	"github.com/m3db/m3/src/query/storage"
    49  	"github.com/m3db/m3/src/x/headers"
    50  	"github.com/m3db/m3/src/x/ident"
    51  	"github.com/m3db/m3/src/x/pool"
    52  	"github.com/m3db/m3/src/x/serialize"
    53  	xtime "github.com/m3db/m3/src/x/time"
    54  )
    55  
    56  const (
    57  	// TestPrometheusDBNodeConfig is the test config for the dbnode.
    58  	TestPrometheusDBNodeConfig = `
    59  db: {}
    60  `
    61  
    62  	// TestPrometheusCoordinatorConfig is the test config for the coordinator.
    63  	TestPrometheusCoordinatorConfig = `
    64  limits:
    65    perQuery:
    66      maxFetchedSeries: 100
    67  
    68  query:
    69    restrictTags:
    70      match:
    71        - name: restricted_metrics_type
    72          type: NOTEQUAL
    73          value: hidden
    74      strip:
    75      - restricted_metrics_type
    76  
    77  lookbackDuration: 10m
    78  `
    79  )
    80  
    81  // TODO:
    82  // - Refactor RunTest to be a method on a struct and provide resources to constructor
    83  // - Extract query limit and timeout status code as params to RunTest
    84  
    85  // RunTest contains the logic for running the prometheus test.
    86  func RunTest(t *testing.T, m3 resources.M3Resources, prom resources.ExternalResources) {
    87  	logger, err := zap.NewDevelopment()
    88  	require.NoError(t, err)
    89  
    90  	logger.Info("running prometheus tests")
    91  
    92  	p := prom.(*docker.Prometheus)
    93  
    94  	testPrometheusRemoteRead(t, p, logger)
    95  	testPrometheusRemoteWriteMultiNamespaces(t, p, logger)
    96  	testPrometheusRemoteWriteEmptyLabelNameReturns400(t, m3.Coordinator(), logger)
    97  	testPrometheusRemoteWriteEmptyLabelValueReturns400(t, m3.Coordinator(), logger)
    98  	testPrometheusRemoteWriteDuplicateLabelReturns400(t, m3.Coordinator(), logger)
    99  	testPrometheusRemoteWriteTooOldReturns400(t, m3.Coordinator(), logger)
   100  	testPrometheusRemoteWriteRetrictMetricsType(t, m3.Coordinator(), logger)
   101  	testQueryLookbackApplied(t, m3.Coordinator(), logger)
   102  	testQueryLimitsApplied(t, m3.Coordinator(), logger)
   103  	testQueryRestrictMetricsType(t, m3.Coordinator(), logger)
   104  	testQueryTimeouts(t, m3.Coordinator(), logger)
   105  	testPrometheusQueryNativeTimeout(t, m3.Coordinator(), logger)
   106  	testQueryRestrictTags(t, m3.Coordinator(), logger)
   107  	testPrometheusRemoteWriteMapTags(t, m3.Coordinator(), logger)
   108  	testSeries(t, m3.Coordinator(), logger)
   109  	testLabelQueryLimitsApplied(t, m3.Coordinator(), logger)
   110  	testLabels(t, m3.Coordinator(), logger)
   111  	testQueryLimitsGlobalApplied(t, m3.Coordinator(), logger)
   112  	testGlobalAggregateLimits(t, m3.Coordinator(), logger)
   113  
   114  	// Correctness tests
   115  	testParseThreshold(t, m3.Coordinator(), logger)
   116  	testReplace(t, m3.Coordinator(), logger)
   117  	testEmptyMatcher(t, m3.Coordinator(), logger)
   118  	testDebugPromReturnsDuplicates(t, m3, logger)
   119  }
   120  
   121  func testPrometheusRemoteRead(t *testing.T, p *docker.Prometheus, logger *zap.Logger) {
   122  	// Ensure Prometheus can proxy a Prometheus query
   123  	logger.Info("testing prometheus remote read")
   124  	verifyPrometheusQuery(t, p, "prometheus_remote_storage_samples_total", 100)
   125  }
   126  
   127  func testPrometheusRemoteWriteMultiNamespaces(
   128  	t *testing.T,
   129  	p *docker.Prometheus,
   130  	logger *zap.Logger,
   131  ) {
   132  	logger.Info("testing remote write to multiple namespaces")
   133  
   134  	// Make sure we're proxying writes to the unaggregated namespace
   135  	query := fmt.Sprintf(
   136  		"database_write_tagged_success{namespace=\"%v\"}", resources.UnaggName,
   137  	)
   138  	verifyPrometheusQuery(t, p, query, 0)
   139  
   140  	// Make sure we're proxying writes to the aggregated namespace
   141  	query = fmt.Sprintf(
   142  		"database_write_tagged_success{namespace=\"%v\"}", resources.AggName,
   143  	)
   144  	verifyPrometheusQuery(t, p, query, 0)
   145  }
   146  
   147  func testPrometheusRemoteWriteEmptyLabelNameReturns400(
   148  	t *testing.T,
   149  	coordinator resources.Coordinator,
   150  	logger *zap.Logger,
   151  ) {
   152  	logger.Info("test write empty name for a label returns HTTP 400")
   153  	err := coordinator.WriteProm("foo_metric", map[string]string{
   154  		"non_empty_name": "foo",
   155  		"":               "bar",
   156  	}, []prompb.Sample{
   157  		{
   158  			Value:     42,
   159  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
   160  		},
   161  	}, nil)
   162  	require.Error(t, err)
   163  	require.Contains(t, err.Error(), "400")
   164  }
   165  
   166  func testPrometheusRemoteWriteEmptyLabelValueReturns400(
   167  	t *testing.T,
   168  	coordinator resources.Coordinator,
   169  	logger *zap.Logger,
   170  ) {
   171  	logger.Info("test write empty value for a label returns HTTP 400")
   172  	err := coordinator.WriteProm("foo_metric", map[string]string{
   173  		"foo":            "bar",
   174  		"non_empty_name": "",
   175  	}, []prompb.Sample{
   176  		{
   177  			Value:     42,
   178  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
   179  		},
   180  	}, nil)
   181  	require.Error(t, err)
   182  	require.Contains(t, err.Error(), "400")
   183  }
   184  
   185  func testPrometheusRemoteWriteDuplicateLabelReturns400(
   186  	t *testing.T,
   187  	coordinator resources.Coordinator,
   188  	logger *zap.Logger,
   189  ) {
   190  	logger.Info("test write with duplicate labels returns HTTP 400")
   191  	writeRequest := prompb.WriteRequest{
   192  		Timeseries: []prompb.TimeSeries{
   193  			{
   194  				Labels: []prompb.Label{
   195  					{
   196  						Name:  []byte(model.MetricNameLabel),
   197  						Value: []byte("foo_metric"),
   198  					},
   199  					{
   200  						Name:  []byte("dupe_name"),
   201  						Value: []byte("foo"),
   202  					},
   203  					{
   204  						Name:  []byte("non_dupe_name"),
   205  						Value: []byte("bar"),
   206  					},
   207  					{
   208  						Name:  []byte("dupe_name"),
   209  						Value: []byte("baz"),
   210  					},
   211  				},
   212  				Samples: []prompb.Sample{
   213  					{
   214  						Value:     42,
   215  						Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
   216  					},
   217  				},
   218  			},
   219  		},
   220  	}
   221  	err := coordinator.WritePromWithRequest(writeRequest, nil)
   222  	require.Error(t, err)
   223  	require.Contains(t, err.Error(), "400")
   224  }
   225  
   226  func testPrometheusRemoteWriteTooOldReturns400(
   227  	t *testing.T,
   228  	coordinator resources.Coordinator,
   229  	logger *zap.Logger,
   230  ) {
   231  	logger.Info("test write into the past returns HTTP 400")
   232  	err := coordinator.WriteProm("foo_metric", nil, []prompb.Sample{
   233  		{
   234  			Value:     3.142,
   235  			Timestamp: storage.TimeToPromTimestamp(xtime.Now().Add(-1 * time.Hour)),
   236  		},
   237  	}, nil)
   238  	require.Error(t, err)
   239  	require.Contains(t, err.Error(), "400")
   240  }
   241  
   242  func testPrometheusRemoteWriteRetrictMetricsType(
   243  	t *testing.T,
   244  	coordinator resources.Coordinator,
   245  	logger *zap.Logger,
   246  ) {
   247  	logger.Info("test write with unaggregated metrics type works as expected")
   248  	err := coordinator.WriteProm("bar_metric", nil, []prompb.Sample{
   249  		{
   250  			Value:     42.42,
   251  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
   252  		},
   253  	}, resources.Headers{
   254  		headers.MetricsTypeHeader: []string{"unaggregated"},
   255  	})
   256  	require.NoError(t, err)
   257  
   258  	logger.Info("test write with aggregated metrics type works as expected")
   259  	err = coordinator.WriteProm("bar_metric", nil, []prompb.Sample{
   260  		{
   261  			Value:     84.84,
   262  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
   263  		},
   264  	}, resources.Headers{
   265  		headers.MetricsTypeHeader:          []string{"aggregated"},
   266  		headers.MetricsStoragePolicyHeader: []string{"5s:6h"},
   267  	})
   268  	require.NoError(t, err)
   269  }
   270  
   271  func testQueryLookbackApplied(
   272  	t *testing.T,
   273  	coordinator resources.Coordinator,
   274  	logger *zap.Logger,
   275  ) {
   276  	// NB: this test depends on the config in m3coordinator.yml for this test
   277  	// and the following config value "lookbackDuration: 10m".
   278  	logger.Info("test lookback config respected")
   279  
   280  	err := coordinator.WriteProm("lookback_test", nil, []prompb.Sample{
   281  		{
   282  			Value:     42.42,
   283  			Timestamp: storage.TimeToPromTimestamp(xtime.Now().Add(-9 * time.Minute)),
   284  		},
   285  	}, resources.Headers{
   286  		headers.MetricsTypeHeader: []string{"unaggregated"},
   287  	})
   288  	require.NoError(t, err)
   289  
   290  	requireRangeQuerySuccess(t,
   291  		coordinator,
   292  		resources.RangeQueryRequest{
   293  			Query: "lookback_test",
   294  			Start: time.Now().Add(-10 * time.Minute),
   295  			End:   time.Now(),
   296  			Step:  15 * time.Second,
   297  		},
   298  		nil,
   299  		func(res model.Matrix) error {
   300  			if len(res) == 0 || len(res[0].Values) == 0 {
   301  				return errors.New("no samples found")
   302  			}
   303  
   304  			latestTS := res[0].Values[len(res[0].Values)-1].Timestamp.Time()
   305  			nowMinusTwoSteps := time.Now().Add(-30 * time.Second)
   306  			if latestTS.After(nowMinusTwoSteps) {
   307  				return nil
   308  			}
   309  
   310  			return errors.New("latest timestamp is not within two steps from now")
   311  		},
   312  	)
   313  }
   314  
   315  func testQueryLimitsApplied(
   316  	t *testing.T,
   317  	coordinator resources.Coordinator,
   318  	logger *zap.Logger,
   319  ) {
   320  	logger.Info("test query series limit with coordinator limit header " +
   321  		"(default errors without RequireExhaustive disabled)")
   322  	requireError(t, func() error {
   323  		_, err := coordinator.InstantQuery(resources.QueryRequest{
   324  			Query: "{metrics_storage=\"m3db_remote\"}",
   325  		}, resources.Headers{
   326  			headers.LimitMaxSeriesHeader: []string{"10"},
   327  		})
   328  		return err
   329  	}, "query exceeded limit")
   330  
   331  	logger.Info("test query series limit with require-exhaustive headers false")
   332  	requireInstantQuerySuccess(t,
   333  		coordinator,
   334  		resources.QueryRequest{
   335  			Query: "database_write_tagged_success",
   336  		},
   337  		resources.Headers{
   338  			headers.LimitMaxSeriesHeader:         []string{"2"},
   339  			headers.LimitRequireExhaustiveHeader: []string{"false"},
   340  		},
   341  		func(res model.Vector) error {
   342  			if len(res) != 2 {
   343  				return fmt.Errorf("expected two results. received %d", len(res))
   344  			}
   345  
   346  			return nil
   347  		})
   348  
   349  	logger.Info("test query series limit with require-exhaustive headers true " +
   350  		"(below limit therefore no error)")
   351  	requireInstantQuerySuccess(t,
   352  		coordinator,
   353  		resources.QueryRequest{
   354  			Query: "database_write_tagged_success",
   355  		},
   356  		resources.Headers{
   357  			headers.LimitMaxSeriesHeader:         []string{"4"},
   358  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   359  		},
   360  		func(res model.Vector) error {
   361  			if len(res) != 3 {
   362  				return fmt.Errorf("expected three results. received %d", len(res))
   363  			}
   364  
   365  			return nil
   366  		})
   367  
   368  	logger.Info("test query series limit with require-exhaustive headers " +
   369  		"true (above limit therefore error)")
   370  	requireError(t, func() error {
   371  		_, err := coordinator.InstantQuery(resources.QueryRequest{
   372  			Query: "database_write_tagged_success",
   373  		}, resources.Headers{
   374  			headers.LimitMaxSeriesHeader:         []string{"3"},
   375  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   376  		})
   377  		return err
   378  	}, "query exceeded limit")
   379  
   380  	requireError(t, func() error {
   381  		_, err := coordinator.InstantQuery(resources.QueryRequest{
   382  			Query: "database_write_tagged_success",
   383  		}, resources.Headers{
   384  			headers.LimitMaxSeriesHeader:         []string{"3"},
   385  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   386  		})
   387  		return err
   388  	}, "400")
   389  
   390  	logger.Info("test query docs limit with require-exhaustive headers false")
   391  	requireInstantQuerySuccess(t,
   392  		coordinator,
   393  		resources.QueryRequest{
   394  			Query: "database_write_tagged_success",
   395  		},
   396  		resources.Headers{
   397  			headers.LimitMaxDocsHeader:           []string{"1"},
   398  			headers.LimitRequireExhaustiveHeader: []string{"false"},
   399  		},
   400  		func(res model.Vector) error {
   401  			// NB(nate): docs limit is imprecise so will not match exact number of series
   402  			// returned
   403  			if len(res) != 3 {
   404  				return fmt.Errorf("expected three results. received %d", len(res))
   405  			}
   406  
   407  			return nil
   408  		})
   409  
   410  	logger.Info("test query docs limit with require-exhaustive headers true " +
   411  		"(below limit therefore no error)")
   412  	requireInstantQuerySuccess(t,
   413  		coordinator,
   414  		resources.QueryRequest{
   415  			Query: "database_write_tagged_success",
   416  		},
   417  		resources.Headers{
   418  			headers.LimitMaxDocsHeader:           []string{"4"},
   419  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   420  		},
   421  		func(res model.Vector) error {
   422  			// NB(nate): docs limit is imprecise so will not match exact number of series
   423  			// returned
   424  			if len(res) != 3 {
   425  				return fmt.Errorf("expected three results. received %d", len(res))
   426  			}
   427  
   428  			return nil
   429  		})
   430  
   431  	logger.Info("test query docs limit with require-exhaustive headers " +
   432  		"true (above limit therefore error)")
   433  	requireError(t, func() error {
   434  		_, err := coordinator.InstantQuery(resources.QueryRequest{
   435  			Query: "database_write_tagged_success",
   436  		}, resources.Headers{
   437  			headers.LimitMaxDocsHeader:           []string{"1"},
   438  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   439  		})
   440  		return err
   441  	}, "query exceeded limit")
   442  
   443  	requireError(t, func() error {
   444  		_, err := coordinator.InstantQuery(resources.QueryRequest{
   445  			Query: "database_write_tagged_success",
   446  		}, resources.Headers{
   447  			headers.LimitMaxDocsHeader:           []string{"1"},
   448  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   449  		})
   450  		return err
   451  	}, "400")
   452  
   453  	logger.Info("test query returned datapoints limit - zero limit disabled")
   454  	requireRangeQuerySuccess(t,
   455  		coordinator,
   456  		resources.RangeQueryRequest{
   457  			Query: "database_write_tagged_success",
   458  			Start: time.Now().Add(-100 * time.Minute),
   459  			End:   time.Now(),
   460  			Step:  15 * time.Second,
   461  		},
   462  		resources.Headers{
   463  			headers.LimitMaxReturnedDatapointsHeader: []string{"0"},
   464  		},
   465  		func(res model.Matrix) error {
   466  			if len(res) != 3 {
   467  				return fmt.Errorf("expected three results. received %d", len(res))
   468  			}
   469  
   470  			return nil
   471  		})
   472  
   473  	logger.Info("test query returned series limit - zero limit disabled")
   474  	requireRangeQuerySuccess(t,
   475  		coordinator,
   476  		resources.RangeQueryRequest{
   477  			Query: "database_write_tagged_success",
   478  			Start: time.Now().Add(-100 * time.Minute),
   479  			End:   time.Now(),
   480  			Step:  15 * time.Second,
   481  		},
   482  		resources.Headers{
   483  			headers.LimitMaxReturnedSeriesHeader: []string{"0"},
   484  		},
   485  		func(res model.Matrix) error {
   486  			if len(res) != 3 {
   487  				return fmt.Errorf("expected three results. received %d", len(res))
   488  			}
   489  
   490  			return nil
   491  		})
   492  
   493  	logger.Info("test query returned series limit - above limit disabled")
   494  	requireRangeQuerySuccess(t,
   495  		coordinator,
   496  		resources.RangeQueryRequest{
   497  			Query: "database_write_tagged_success",
   498  			Start: time.Now().Add(-100 * time.Minute),
   499  			End:   time.Now(),
   500  			Step:  15 * time.Second,
   501  		},
   502  		resources.Headers{
   503  			headers.LimitMaxReturnedSeriesHeader: []string{"4"},
   504  		},
   505  		func(res model.Matrix) error {
   506  			if len(res) != 3 {
   507  				return fmt.Errorf("expected three results. received %d", len(res))
   508  			}
   509  
   510  			return nil
   511  		})
   512  
   513  	logger.Info("test query returned series limit - at limit")
   514  	requireRangeQuerySuccess(t,
   515  		coordinator,
   516  		resources.RangeQueryRequest{
   517  			Query: "database_write_tagged_success",
   518  			Start: time.Now().Add(-100 * time.Minute),
   519  			End:   time.Now(),
   520  			Step:  15 * time.Second,
   521  		},
   522  		resources.Headers{
   523  			headers.LimitMaxReturnedSeriesHeader: []string{"3"},
   524  		},
   525  		func(res model.Matrix) error {
   526  			if len(res) != 3 {
   527  				return fmt.Errorf("expected three results. received %d", len(res))
   528  			}
   529  
   530  			return nil
   531  		})
   532  
   533  	logger.Info("test query returned series limit - below limit")
   534  	requireRangeQuerySuccess(t,
   535  		coordinator,
   536  		resources.RangeQueryRequest{
   537  			Query: "database_write_tagged_success",
   538  			Start: time.Now().Add(-100 * time.Minute),
   539  			End:   time.Now(),
   540  			Step:  15 * time.Second,
   541  		},
   542  		resources.Headers{
   543  			headers.LimitMaxReturnedSeriesHeader: []string{"2"},
   544  		},
   545  		func(res model.Matrix) error {
   546  			if len(res) != 2 {
   547  				return fmt.Errorf("expected two results. received %d", len(res))
   548  			}
   549  
   550  			return nil
   551  		})
   552  
   553  	// Test writes to prep for testing returned series metadata limits
   554  	for i := 0; i < 3; i++ {
   555  		err := coordinator.WriteProm("metadata_test_series", map[string]string{
   556  			"metadata_test_label": fmt.Sprintf("series_label_%d", i),
   557  		}, []prompb.Sample{
   558  			{
   559  				Value:     42.42,
   560  				Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
   561  			},
   562  		}, nil)
   563  		require.NoError(t, err)
   564  	}
   565  
   566  	logger.Info("test query returned series metadata limit - zero limit disabled")
   567  	requireLabelValuesSuccess(t,
   568  		coordinator,
   569  		resources.LabelValuesRequest{
   570  			MetadataRequest: resources.MetadataRequest{
   571  				Match: "metadata_test_series",
   572  			},
   573  			LabelName: "metadata_test_label",
   574  		},
   575  		resources.Headers{
   576  			headers.LimitMaxReturnedSeriesMetadataHeader: []string{"0"},
   577  		},
   578  		func(res model.LabelValues) error {
   579  			if len(res) != 3 {
   580  				return fmt.Errorf("expected three results. received %d", len(res))
   581  			}
   582  
   583  			return nil
   584  		})
   585  
   586  	logger.Info("test query returned series metadata limit - above limit disabled")
   587  	requireLabelValuesSuccess(t,
   588  		coordinator,
   589  		resources.LabelValuesRequest{
   590  			MetadataRequest: resources.MetadataRequest{
   591  				Match: "metadata_test_series",
   592  			},
   593  			LabelName: "metadata_test_label",
   594  		},
   595  		resources.Headers{
   596  			headers.LimitMaxReturnedSeriesMetadataHeader: []string{"4"},
   597  		},
   598  		func(res model.LabelValues) error {
   599  			if len(res) != 3 {
   600  				return fmt.Errorf("expected three results. received %d", len(res))
   601  			}
   602  
   603  			return nil
   604  		})
   605  
   606  	logger.Info("test query returned series metadata limit - at limit")
   607  	requireLabelValuesSuccess(t,
   608  		coordinator,
   609  		resources.LabelValuesRequest{
   610  			MetadataRequest: resources.MetadataRequest{
   611  				Match: "metadata_test_series",
   612  			},
   613  			LabelName: "metadata_test_label",
   614  		},
   615  		resources.Headers{
   616  			headers.LimitMaxReturnedSeriesMetadataHeader: []string{"3"},
   617  		},
   618  		func(res model.LabelValues) error {
   619  			if len(res) != 3 {
   620  				return fmt.Errorf("expected three results. received %d", len(res))
   621  			}
   622  
   623  			return nil
   624  		})
   625  
   626  	logger.Info("test query returned series metadata limit - below limit")
   627  	requireLabelValuesSuccess(t,
   628  		coordinator,
   629  		resources.LabelValuesRequest{
   630  			MetadataRequest: resources.MetadataRequest{
   631  				Match: "metadata_test_series",
   632  			},
   633  			LabelName: "metadata_test_label",
   634  		},
   635  		resources.Headers{
   636  			headers.LimitMaxReturnedSeriesMetadataHeader: []string{"2"},
   637  		},
   638  		func(res model.LabelValues) error {
   639  			if len(res) != 2 {
   640  				return fmt.Errorf("expected two results. received %d", len(res))
   641  			}
   642  
   643  			return nil
   644  		})
   645  
   646  	logger.Info("test query time range limit with coordinator defaults")
   647  	requireRangeQuerySuccess(t,
   648  		coordinator,
   649  		resources.RangeQueryRequest{
   650  			Query: "database_write_tagged_success",
   651  			Start: time.Time{},
   652  			End:   time.Now(),
   653  			Step:  15 * time.Second,
   654  		},
   655  		nil,
   656  		func(res model.Matrix) error {
   657  			if len(res) == 0 {
   658  				return errors.New("expected results to be greater than 0")
   659  			}
   660  
   661  			return nil
   662  		})
   663  
   664  	logger.Info("test query time range limit with require-exhaustive headers false")
   665  	requireRangeQuerySuccess(t,
   666  		coordinator,
   667  		resources.RangeQueryRequest{
   668  			Query: "database_write_tagged_success",
   669  			Start: time.Unix(0, 0),
   670  			End:   time.Now(),
   671  			Step:  15 * time.Second,
   672  		},
   673  		resources.Headers{
   674  			headers.LimitMaxRangeHeader:          []string{"4h"},
   675  			headers.LimitRequireExhaustiveHeader: []string{"false"},
   676  		},
   677  		func(res model.Matrix) error {
   678  			if len(res) == 0 {
   679  				return errors.New("expected results to be greater than 0")
   680  			}
   681  
   682  			return nil
   683  		})
   684  
   685  	logger.Info("test query time range limit with require-exhaustive headers true " +
   686  		"(above limit therefore error)")
   687  	requireError(t, func() error {
   688  		_, err := coordinator.RangeQuery(resources.RangeQueryRequest{
   689  			Query: "database_write_tagged_success",
   690  			Start: time.Unix(0, 0),
   691  			End:   time.Now(),
   692  			Step:  15 * time.Second,
   693  		}, resources.Headers{
   694  			headers.LimitMaxRangeHeader:          []string{"4h"},
   695  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   696  		})
   697  		return err
   698  	}, "query exceeded limit")
   699  	requireError(t, func() error {
   700  		_, err := coordinator.RangeQuery(resources.RangeQueryRequest{
   701  			Query: "database_write_tagged_success",
   702  			Start: time.Unix(0, 0),
   703  			End:   time.Now(),
   704  			Step:  15 * time.Second,
   705  		}, resources.Headers{
   706  			headers.LimitMaxRangeHeader:          []string{"4h"},
   707  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   708  		})
   709  		return err
   710  	}, "400")
   711  
   712  	logger.Info("test query time range limit with coordinator defaults")
   713  	requireLabelValuesSuccess(t,
   714  		coordinator,
   715  		resources.LabelValuesRequest{
   716  			MetadataRequest: resources.MetadataRequest{
   717  				Match: "metadata_test_series",
   718  			},
   719  			LabelName: "metadata_test_label",
   720  		},
   721  		resources.Headers{
   722  			headers.LimitMaxReturnedSeriesMetadataHeader: []string{"2"},
   723  		},
   724  		func(res model.LabelValues) error {
   725  			if len(res) == 0 {
   726  				return errors.New("expected results to be greater than 0")
   727  			}
   728  
   729  			return nil
   730  		})
   731  
   732  	logger.Info("test query time range limit with require-exhaustive headers false")
   733  	requireLabelValuesSuccess(t,
   734  		coordinator,
   735  		resources.LabelValuesRequest{
   736  			MetadataRequest: resources.MetadataRequest{
   737  				Match: "metadata_test_series",
   738  			},
   739  			LabelName: "metadata_test_label",
   740  		},
   741  		resources.Headers{
   742  			headers.LimitMaxRangeHeader:          []string{"4h"},
   743  			headers.LimitRequireExhaustiveHeader: []string{"false"},
   744  		},
   745  		func(res model.LabelValues) error {
   746  			if len(res) == 0 {
   747  				return errors.New("expected results to be greater than 0")
   748  			}
   749  
   750  			return nil
   751  		})
   752  
   753  	logger.Info("test query time range limit with require-exhaustive headers true " +
   754  		"(above limit therefore error)")
   755  	requireError(t, func() error {
   756  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
   757  			MetadataRequest: resources.MetadataRequest{
   758  				Match: "metadata_test_series",
   759  			},
   760  			LabelName: "metadata_test_label",
   761  		}, resources.Headers{
   762  			headers.LimitMaxRangeHeader:          []string{"4h"},
   763  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   764  		})
   765  		return err
   766  	}, "query exceeded limit")
   767  	requireError(t, func() error {
   768  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
   769  			MetadataRequest: resources.MetadataRequest{
   770  				Match: "metadata_test_series",
   771  			},
   772  			LabelName: "metadata_test_label",
   773  		}, resources.Headers{
   774  			headers.LimitMaxRangeHeader:          []string{"4h"},
   775  			headers.LimitRequireExhaustiveHeader: []string{"true"},
   776  		})
   777  		return err
   778  	}, "400")
   779  }
   780  
   781  func testQueryRestrictMetricsType(
   782  	t *testing.T,
   783  	coordinator resources.Coordinator,
   784  	logger *zap.Logger,
   785  ) {
   786  	logger.Info("test query restrict to unaggregated metrics type (instant)")
   787  	requireNativeInstantQuerySuccess(t,
   788  		coordinator,
   789  		resources.QueryRequest{
   790  			Query: "bar_metric",
   791  		},
   792  		resources.Headers{
   793  			headers.MetricsTypeHeader: []string{"unaggregated"},
   794  		},
   795  		func(res model.Vector) error {
   796  			if len(res) == 0 {
   797  				return errors.New("expected results. received none")
   798  			}
   799  
   800  			if res[0].Value != 42.42 {
   801  				return fmt.Errorf("expected 42.42. received %v", res[0].Value)
   802  			}
   803  
   804  			return nil
   805  		})
   806  
   807  	logger.Info("test query restrict to unaggregated metrics type (range)")
   808  	requireNativeRangeQuerySuccess(t,
   809  		coordinator,
   810  		resources.RangeQueryRequest{
   811  			Query: "bar_metric",
   812  			Start: time.Now().Add(-1 * time.Hour),
   813  			End:   time.Now(),
   814  			Step:  30 * time.Second,
   815  		},
   816  		resources.Headers{
   817  			headers.MetricsTypeHeader: []string{"unaggregated"},
   818  		},
   819  		func(res model.Matrix) error {
   820  			if len(res) == 0 {
   821  				return errors.New("expected results. received none")
   822  			}
   823  
   824  			if len(res[0].Values) == 0 {
   825  				return errors.New("expected values for initial result. received none")
   826  			}
   827  
   828  			if res[0].Values[0].Value != 42.42 {
   829  				return fmt.Errorf("expected 42.42. received %v", res[0].Values[0].Value)
   830  			}
   831  
   832  			return nil
   833  		})
   834  
   835  	logger.Info("test query restrict to aggregated metrics type (instant)")
   836  	requireNativeInstantQuerySuccess(t,
   837  		coordinator,
   838  		resources.QueryRequest{
   839  			Query: "bar_metric",
   840  		},
   841  		resources.Headers{
   842  			headers.MetricsTypeHeader:          []string{"aggregated"},
   843  			headers.MetricsStoragePolicyHeader: []string{"5s:6h"},
   844  		},
   845  		func(res model.Vector) error {
   846  			if len(res) == 0 {
   847  				return errors.New("expected results. received none")
   848  			}
   849  
   850  			if res[0].Value != 84.84 {
   851  				return fmt.Errorf("expected 84.84. received %v", res[0].Value)
   852  			}
   853  
   854  			return nil
   855  		})
   856  
   857  	logger.Info("test query restrict to aggregated metrics type (range)")
   858  	requireNativeRangeQuerySuccess(t,
   859  		coordinator,
   860  		resources.RangeQueryRequest{
   861  			Query: "bar_metric",
   862  			Start: time.Now().Add(-1 * time.Hour),
   863  			End:   time.Now(),
   864  			Step:  30 * time.Second,
   865  		},
   866  		resources.Headers{
   867  			headers.MetricsTypeHeader:          []string{"aggregated"},
   868  			headers.MetricsStoragePolicyHeader: []string{"5s:6h"},
   869  		},
   870  		func(res model.Matrix) error {
   871  			if len(res) == 0 {
   872  				return errors.New("expected results. received none")
   873  			}
   874  
   875  			if len(res[0].Values) == 0 {
   876  				return errors.New("expected values for initial result. received none")
   877  			}
   878  
   879  			if res[0].Values[0].Value != 84.84 {
   880  				return fmt.Errorf("expected 84.84. received %v", res[0].Values[0].Value)
   881  			}
   882  
   883  			return nil
   884  		})
   885  }
   886  
   887  func testQueryTimeouts(
   888  	t *testing.T,
   889  	coordinator resources.Coordinator,
   890  	logger *zap.Logger,
   891  ) {
   892  	tests := func(timeout, message string) {
   893  		logger.Info(message)
   894  		requireError(t, func() error {
   895  			_, err := coordinator.InstantQuery(resources.QueryRequest{
   896  				Query: "database_write_tagged_success",
   897  			}, resources.Headers{
   898  				headers.TimeoutHeader: []string{timeout},
   899  			})
   900  			return err
   901  		}, "504")
   902  
   903  		requireError(t, func() error {
   904  			_, err := coordinator.RangeQuery(resources.RangeQueryRequest{
   905  				Query: "database_write_tagged_success",
   906  				Start: time.Unix(0, 0),
   907  				End:   time.Now(),
   908  			}, resources.Headers{
   909  				headers.TimeoutHeader: []string{timeout},
   910  			})
   911  			return err
   912  		}, "504")
   913  
   914  		requireError(t, func() error {
   915  			_, err := coordinator.LabelNames(resources.LabelNamesRequest{},
   916  				resources.Headers{
   917  					headers.TimeoutHeader: []string{timeout},
   918  				})
   919  			return err
   920  		}, "504")
   921  
   922  		requireError(t, func() error {
   923  			_, err := coordinator.LabelValues(resources.LabelValuesRequest{
   924  				LabelName: "__name__",
   925  			}, resources.Headers{
   926  				headers.TimeoutHeader: []string{timeout},
   927  			})
   928  			return err
   929  		}, "504")
   930  	}
   931  
   932  	tests("1ns", "test timeouts at the coordinator layer")
   933  	tests("1ms", "test timeouts at the coordinator -> m3db layer")
   934  }
   935  
   936  func testPrometheusQueryNativeTimeout(
   937  	t *testing.T,
   938  	coordinator resources.Coordinator,
   939  	logger *zap.Logger,
   940  ) {
   941  	logger.Info("test query gateway timeout (instant)")
   942  	requireError(t, func() error {
   943  		_, err := coordinator.InstantQueryWithEngine(resources.QueryRequest{
   944  			Query: "bar_metric",
   945  		}, options.M3QueryEngine, resources.Headers{
   946  			headers.TimeoutHeader:     []string{"1ms"},
   947  			headers.MetricsTypeHeader: []string{"unaggregated"},
   948  		})
   949  		return err
   950  	}, "504")
   951  
   952  	logger.Info("test query gateway timeout (range)")
   953  	requireError(t, func() error {
   954  		_, err := coordinator.RangeQueryWithEngine(resources.RangeQueryRequest{
   955  			Query: "bar_metric",
   956  			Start: time.Now().Add(-1 * time.Hour),
   957  			End:   time.Now(),
   958  			Step:  30 * time.Second,
   959  		}, options.M3QueryEngine, resources.Headers{
   960  			headers.TimeoutHeader:     []string{"1ms"},
   961  			headers.MetricsTypeHeader: []string{"unaggregated"},
   962  		})
   963  		return err
   964  	}, "504")
   965  }
   966  
   967  func testQueryRestrictTags(
   968  	t *testing.T,
   969  	coordinator resources.Coordinator,
   970  	logger *zap.Logger,
   971  ) {
   972  	// Test the default restrict tags is applied when directly querying
   973  	// coordinator (restrict tags set to hide any restricted_metrics_type="hidden"
   974  	// in m3coordinator.yml)
   975  
   976  	// First write some hidden metrics.
   977  	logger.Info("test write with unaggregated metrics type works as expected")
   978  	require.NoError(t, coordinator.WriteProm("some_hidden_metric", map[string]string{
   979  		"restricted_metrics_type": "hidden",
   980  		"foo_tag":                 "foo_tag_value",
   981  	}, []prompb.Sample{
   982  		{
   983  			Value:     42.42,
   984  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
   985  		},
   986  	}, nil))
   987  
   988  	// Check that we can see them with zero restrictions applied as an
   989  	// override (we do this check first so that when we test that they
   990  	// don't appear by default we know that the metrics are already visible).
   991  	logger.Info("test restrict by tags with header override to remove restrict works")
   992  	requireInstantQuerySuccess(t, coordinator, resources.QueryRequest{
   993  		Query: "{restricted_metrics_type=\"hidden\"}",
   994  	}, resources.Headers{
   995  		headers.RestrictByTagsJSONHeader: []string{"{}"},
   996  	}, func(res model.Vector) error {
   997  		if len(res) != 1 {
   998  			return fmt.Errorf("expected 1 result, got %v", len(res))
   999  		}
  1000  		return nil
  1001  	})
  1002  
  1003  	// Now test that the defaults will hide the metrics altogether.
  1004  	logger.Info("test restrict by tags with coordinator defaults")
  1005  	requireInstantQuerySuccess(t, coordinator, resources.QueryRequest{
  1006  		Query: "{restricted_metrics_type=\"hidden\"}",
  1007  	}, nil, func(res model.Vector) error {
  1008  		if len(res) != 0 {
  1009  			return fmt.Errorf("expected no results, got %v", len(res))
  1010  		}
  1011  		return nil
  1012  	})
  1013  }
  1014  
  1015  func testPrometheusRemoteWriteMapTags(
  1016  	t *testing.T,
  1017  	coordinator resources.Coordinator,
  1018  	logger *zap.Logger,
  1019  ) {
  1020  	logger.Info("test map tags header works as expected")
  1021  	require.NoError(t, coordinator.WriteProm("bar_metric", nil, []prompb.Sample{
  1022  		{
  1023  			Value:     42.42,
  1024  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
  1025  		},
  1026  	}, resources.Headers{
  1027  		headers.MetricsTypeHeader:   []string{"unaggregated"},
  1028  		headers.MapTagsByJSONHeader: []string{`{"tagMappers":[{"write":{"tag":"globaltag","value":"somevalue"}}]}`},
  1029  	}))
  1030  
  1031  	requireNativeInstantQuerySuccess(t, coordinator, resources.QueryRequest{
  1032  		Query: "bar_metric",
  1033  	}, resources.Headers{
  1034  		headers.MetricsTypeHeader: []string{"unaggregated"},
  1035  	}, func(res model.Vector) error {
  1036  		if len(res) == 0 {
  1037  			return errors.New("expecting results, got none")
  1038  		}
  1039  
  1040  		if val, ok := res[0].Metric["globaltag"]; !ok || val != "somevalue" {
  1041  			return fmt.Errorf("expected metric with globaltag=somevalue, got=%+v", res[0].Metric)
  1042  		}
  1043  
  1044  		return nil
  1045  	})
  1046  }
  1047  
  1048  func testSeries(t *testing.T, coordinator resources.Coordinator, logger *zap.Logger) {
  1049  	logger.Info("test series match endpoint")
  1050  	requireSeriesSuccess(t, coordinator, resources.SeriesRequest{
  1051  		MetadataRequest: resources.MetadataRequest{
  1052  			Match: "prometheus_remote_storage_samples_total",
  1053  			Start: time.Unix(0, 0),
  1054  			End:   time.Now().Add(1 * time.Hour),
  1055  		},
  1056  	}, nil, func(res []model.Metric) error {
  1057  		if len(res) != 1 {
  1058  			return fmt.Errorf("expected 1 result, got %v", len(res))
  1059  		}
  1060  		return nil
  1061  	})
  1062  
  1063  	requireSeriesSuccess(t, coordinator, resources.SeriesRequest{
  1064  		MetadataRequest: resources.MetadataRequest{
  1065  			Match: "prometheus_remote_storage_samples_total",
  1066  		},
  1067  	}, nil, func(res []model.Metric) error {
  1068  		if len(res) != 1 {
  1069  			return fmt.Errorf("expected 1 result, got %v", len(res))
  1070  		}
  1071  		return nil
  1072  	})
  1073  
  1074  	// NB(nate): Use raw RunQuery method here since we want to use a custom format for start
  1075  	// and end
  1076  	queryAndParms := "api/v1/series?match[]=prometheus_remote_storage_samples_total&start=" +
  1077  		"-292273086-05-16T16:47:06Z&end=292277025-08-18T07:12:54.999999999Z"
  1078  	require.NoError(t, coordinator.RunQuery(
  1079  		func(status int, headers map[string][]string, resp string, err error) error {
  1080  			if status != http.StatusOK {
  1081  				return fmt.Errorf("expected 200, got %d. body=%v", status, resp)
  1082  			}
  1083  			var parsedResp seriesResponse
  1084  			if err := json.Unmarshal([]byte(resp), &parsedResp); err != nil {
  1085  				return err
  1086  			}
  1087  
  1088  			if len(parsedResp.Data) != 1 {
  1089  				return fmt.Errorf("expected 1 result, got %d", len(parsedResp.Data))
  1090  			}
  1091  
  1092  			return nil
  1093  		}, queryAndParms, nil))
  1094  }
  1095  
  1096  func testLabelQueryLimitsApplied(
  1097  	t *testing.T,
  1098  	coordinator resources.Coordinator,
  1099  	logger *zap.Logger,
  1100  ) {
  1101  	logger.Info("test label limits with require-exhaustive headers true " +
  1102  		"(below limit therefore no error)")
  1103  	requireLabelValuesSuccess(t,
  1104  		coordinator,
  1105  		resources.LabelValuesRequest{
  1106  			LabelName: "__name__",
  1107  		},
  1108  		resources.Headers{
  1109  			headers.LimitMaxSeriesHeader:         []string{"10000"},
  1110  			headers.LimitRequireExhaustiveHeader: []string{"true"},
  1111  		},
  1112  		func(res model.LabelValues) error {
  1113  			// NB(nate): just checking for a 200 and this method only gets called in that case
  1114  			return nil
  1115  		})
  1116  
  1117  	logger.Info("test label series limit with coordinator limit header (default " +
  1118  		"requires exhaustive so error)")
  1119  	requireError(t, func() error {
  1120  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
  1121  			LabelName: "__name__",
  1122  		}, resources.Headers{
  1123  			headers.LimitMaxSeriesHeader: []string{"1"},
  1124  		})
  1125  		return err
  1126  	}, "query exceeded limit")
  1127  
  1128  	logger.Info("test label series limit with require-exhaustive headers false")
  1129  	requireLabelValuesSuccess(t,
  1130  		coordinator,
  1131  		resources.LabelValuesRequest{
  1132  			LabelName: "__name__",
  1133  		},
  1134  		resources.Headers{
  1135  			headers.LimitMaxSeriesHeader:         []string{"2"},
  1136  			headers.LimitRequireExhaustiveHeader: []string{"false"},
  1137  		},
  1138  		func(res model.LabelValues) error {
  1139  			if len(res) != 1 {
  1140  				return fmt.Errorf("expected 1 result, got %d", len(res))
  1141  			}
  1142  			return nil
  1143  		})
  1144  
  1145  	logger.Info("Test label series limit with require-exhaustive headers " +
  1146  		"true (above limit therefore error)")
  1147  	requireError(t, func() error {
  1148  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
  1149  			LabelName: "__name__",
  1150  		}, resources.Headers{
  1151  			headers.LimitMaxSeriesHeader:         []string{"2"},
  1152  			headers.LimitRequireExhaustiveHeader: []string{"true"},
  1153  		})
  1154  		return err
  1155  	}, "query exceeded limit")
  1156  	requireError(t, func() error {
  1157  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
  1158  			LabelName: "__name__",
  1159  		}, resources.Headers{
  1160  			headers.LimitMaxSeriesHeader:         []string{"2"},
  1161  			headers.LimitRequireExhaustiveHeader: []string{"true"},
  1162  		})
  1163  		return err
  1164  	}, "400")
  1165  
  1166  	logger.Info("test label docs limit with coordinator limit header " +
  1167  		"(default requires exhaustive so error)")
  1168  	requireError(t, func() error {
  1169  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
  1170  			LabelName: "__name__",
  1171  		}, resources.Headers{
  1172  			headers.LimitMaxDocsHeader: []string{"1"},
  1173  		})
  1174  		return err
  1175  	}, "query exceeded limit")
  1176  
  1177  	logger.Info("test label docs limit with require-exhaustive headers false")
  1178  	requireLabelValuesSuccess(t,
  1179  		coordinator,
  1180  		resources.LabelValuesRequest{
  1181  			LabelName: "__name__",
  1182  		},
  1183  		resources.Headers{
  1184  			headers.LimitMaxDocsHeader:           []string{"2"},
  1185  			headers.LimitRequireExhaustiveHeader: []string{"false"},
  1186  		},
  1187  		func(res model.LabelValues) error {
  1188  			if len(res) != 1 {
  1189  				return fmt.Errorf("expected 1 result, got %d", len(res))
  1190  			}
  1191  			return nil
  1192  		})
  1193  
  1194  	logger.Info("Test label docs limit with require-exhaustive headers " +
  1195  		"true (above limit therefore error)")
  1196  	requireError(t, func() error {
  1197  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
  1198  			LabelName: "__name__",
  1199  		}, resources.Headers{
  1200  			headers.LimitMaxSeriesHeader:         []string{"1"},
  1201  			headers.LimitRequireExhaustiveHeader: []string{"true"},
  1202  		})
  1203  		return err
  1204  	}, "query exceeded limit")
  1205  	requireError(t, func() error {
  1206  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
  1207  			LabelName: "__name__",
  1208  		}, resources.Headers{
  1209  			headers.LimitMaxSeriesHeader:         []string{"1"},
  1210  			headers.LimitRequireExhaustiveHeader: []string{"true"},
  1211  		})
  1212  		return err
  1213  	}, "400")
  1214  }
  1215  
  1216  func testLabels(
  1217  	t *testing.T,
  1218  	coordinator resources.Coordinator,
  1219  	logger *zap.Logger,
  1220  ) {
  1221  	logger.Info("test label APIs")
  1222  	require.NoError(t, coordinator.WriteProm("label_metric", map[string]string{
  1223  		"name_0": "value_0_1",
  1224  		"name_1": "value_1_1",
  1225  		"name_2": "value_2_1",
  1226  	}, []prompb.Sample{
  1227  		{
  1228  			Value:     42.42,
  1229  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
  1230  		},
  1231  	}, nil))
  1232  
  1233  	require.NoError(t, coordinator.WriteProm("label_metric_2", map[string]string{
  1234  		"name_0": "value_0_2",
  1235  		"name_1": "value_1_2",
  1236  	}, []prompb.Sample{
  1237  		{
  1238  			Value:     42.42,
  1239  			Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
  1240  		},
  1241  	}, nil))
  1242  
  1243  	requireLabelNamesSuccess(t, coordinator, resources.LabelNamesRequest{}, nil,
  1244  		func(res model.LabelNames) error {
  1245  			var nameLabels model.LabelNames
  1246  			for _, label := range res {
  1247  				// nolint:staticcheck
  1248  				matched, err := regexp.MatchString("name_[012]", string(label))
  1249  				if err != nil {
  1250  					return err
  1251  				}
  1252  				if matched {
  1253  					nameLabels = append(nameLabels, label)
  1254  				}
  1255  			}
  1256  			if len(nameLabels) != 3 {
  1257  				return fmt.Errorf("expected 3 results, got %d", len(nameLabels))
  1258  			}
  1259  			return nil
  1260  		})
  1261  
  1262  	requireLabelNamesSuccess(t, coordinator, resources.LabelNamesRequest{
  1263  		MetadataRequest: resources.MetadataRequest{
  1264  			Match: "label_metric",
  1265  		},
  1266  	}, nil, func(res model.LabelNames) error {
  1267  		if len(res) != 4 {
  1268  			return fmt.Errorf("expected 4 results, got %d", len(res))
  1269  		}
  1270  		return nil
  1271  	})
  1272  
  1273  	requireLabelNamesSuccess(t, coordinator, resources.LabelNamesRequest{
  1274  		MetadataRequest: resources.MetadataRequest{
  1275  			Match: "label_metric_2",
  1276  		},
  1277  	}, nil, func(res model.LabelNames) error {
  1278  		if len(res) != 3 {
  1279  			return fmt.Errorf("expected 3 results, got %d", len(res))
  1280  		}
  1281  		return nil
  1282  	})
  1283  
  1284  	requireLabelValuesSuccess(t, coordinator, resources.LabelValuesRequest{
  1285  		LabelName: "name_1",
  1286  	}, nil, func(res model.LabelValues) error {
  1287  		if len(res) != 2 {
  1288  			return fmt.Errorf("expected 2 results, got %d", len(res))
  1289  		}
  1290  		return nil
  1291  	})
  1292  
  1293  	tests := func(match string, length int, val string) {
  1294  		requireLabelValuesSuccess(t, coordinator, resources.LabelValuesRequest{
  1295  			MetadataRequest: resources.MetadataRequest{
  1296  				Match: match,
  1297  			},
  1298  			LabelName: "name_1",
  1299  		}, nil, func(res model.LabelValues) error {
  1300  			if len(res) != length {
  1301  				return fmt.Errorf("expected %d results, got %d", length, len(res))
  1302  			}
  1303  			return nil
  1304  		})
  1305  
  1306  		requireLabelValuesSuccess(t, coordinator, resources.LabelValuesRequest{
  1307  			MetadataRequest: resources.MetadataRequest{
  1308  				Match: match,
  1309  			},
  1310  			LabelName: "name_1",
  1311  		}, nil, func(res model.LabelValues) error {
  1312  			if string(res[0]) != val {
  1313  				return fmt.Errorf("expected %s, got %s", val, res[0])
  1314  			}
  1315  			return nil
  1316  		})
  1317  	}
  1318  	tests("label_metric", 1, "value_1_1")
  1319  	tests("label_metric_2", 1, "value_1_2")
  1320  }
  1321  
  1322  func testQueryLimitsGlobalApplied(
  1323  	t *testing.T,
  1324  	coordinator resources.Coordinator,
  1325  	logger *zap.Logger,
  1326  ) {
  1327  	logger.Info("test global query limits")
  1328  	labelVals := []string{"series_label_0", "series_label_1", "series_label_2"}
  1329  	for _, val := range labelVals {
  1330  		require.NoError(t, coordinator.WriteProm("metadata_test_series", map[string]string{
  1331  			"query_global_limit_test": val,
  1332  		}, []prompb.Sample{
  1333  			{
  1334  				Value:     42.42,
  1335  				Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
  1336  			},
  1337  		}, nil))
  1338  	}
  1339  
  1340  	// Set global limits
  1341  	setSeriesDiskReadLimits(t, coordinator, kvpb.QueryLimits{
  1342  		MaxRecentlyQueriedSeriesDiskRead: &kvpb.QueryLimit{Limit: 1, LookbackSeconds: 5},
  1343  	})
  1344  
  1345  	// Test that global limits are tripped
  1346  	requireError(t, func() error {
  1347  		_, err := coordinator.InstantQuery(resources.QueryRequest{
  1348  			Query: `{query_global_limit_test!=""}`,
  1349  		}, nil)
  1350  		return err
  1351  	}, "")
  1352  
  1353  	// Force wait for permit
  1354  	setSeriesDiskReadLimits(t, coordinator, kvpb.QueryLimits{
  1355  		MaxRecentlyQueriedSeriesDiskRead: &kvpb.QueryLimit{
  1356  			Limit:           10000,
  1357  			LookbackSeconds: 5,
  1358  			ForceWaited:     true,
  1359  		},
  1360  	})
  1361  
  1362  	// Check that waited header is returned. Use RunQuery to get access to headers.
  1363  	queryAndParams := `api/v1/query?query={query_global_limit_test!=""}`
  1364  	require.NoError(t, coordinator.RunQuery(
  1365  		func(status int, h map[string][]string, resp string, err error) error {
  1366  			if status != http.StatusOK {
  1367  				return fmt.Errorf("expected 200, got %d. body=%v", status, resp)
  1368  			}
  1369  
  1370  			if _, ok := h[headers.WaitedHeader]; !ok {
  1371  				return fmt.Errorf("expected header %v to be present on response",
  1372  					headers.WaitedHeader)
  1373  			}
  1374  
  1375  			return nil
  1376  		}, queryAndParams, nil))
  1377  
  1378  	// Test that global limits are tripped
  1379  	requireError(t, func() error {
  1380  		_, err := coordinator.InstantQuery(resources.QueryRequest{
  1381  			Query: `{query_global_limit_test!=""}`,
  1382  		}, resources.Headers{
  1383  			headers.LimitRequireNoWaitHeader: []string{"true"},
  1384  		})
  1385  		return err
  1386  	}, "400")
  1387  
  1388  	// Restore limits
  1389  	setSeriesDiskReadLimits(t, coordinator, kvpb.QueryLimits{})
  1390  }
  1391  
  1392  func testGlobalAggregateLimits(
  1393  	t *testing.T,
  1394  	coordinator resources.Coordinator,
  1395  	logger *zap.Logger,
  1396  ) {
  1397  	logger.Info("test global aggregate limits")
  1398  	metricName := fmt.Sprintf("aggregate_limits_%v", xtime.Now().Seconds())
  1399  	numMetrics := 60
  1400  	for i := 0; i < numMetrics; i++ {
  1401  		require.NoError(t, coordinator.WriteProm(metricName, map[string]string{
  1402  			metricName: strconv.Itoa(i),
  1403  		}, []prompb.Sample{
  1404  			{
  1405  				Value:     float64(i),
  1406  				Timestamp: storage.TimeToPromTimestamp(xtime.Now()),
  1407  			},
  1408  		}, nil))
  1409  	}
  1410  
  1411  	// Set global limits
  1412  	setSeriesDiskReadLimits(t, coordinator, kvpb.QueryLimits{
  1413  		MaxRecentlyQueriedMetadataRead: &kvpb.QueryLimit{Limit: 150, LookbackSeconds: 5},
  1414  	})
  1415  
  1416  	checkLabels := func() {
  1417  		requireLabelValuesSuccess(t, coordinator, resources.LabelValuesRequest{
  1418  			LabelName: metricName,
  1419  		}, nil, func(res model.LabelValues) error {
  1420  			if len(res) != numMetrics {
  1421  				return fmt.Errorf("expected %d results, got %v", numMetrics, len(res))
  1422  			}
  1423  			return nil
  1424  		})
  1425  	}
  1426  
  1427  	// Make sure any existing limit has expired before continuing.
  1428  	checkLabels()
  1429  	checkLabels()
  1430  	requireError(t, func() error {
  1431  		_, err := coordinator.LabelValues(resources.LabelValuesRequest{
  1432  			LabelName: metricName,
  1433  		}, nil)
  1434  		return err
  1435  	}, "error")
  1436  
  1437  	// Make sure that a query is unaffected by the the metadata limits.
  1438  	ts := time.Now()
  1439  	requireInstantQuerySuccess(t, coordinator, resources.QueryRequest{
  1440  		Query: fmt.Sprintf("sum(%v)", metricName),
  1441  		Time:  &ts,
  1442  	}, nil, func(res model.Vector) error {
  1443  		// NB(nate): just want a 200 here which a call to this method indicates
  1444  		return nil
  1445  	})
  1446  
  1447  	checkLabels()
  1448  
  1449  	// Restore limits
  1450  	setSeriesDiskReadLimits(t, coordinator, kvpb.QueryLimits{})
  1451  }
  1452  
  1453  func testParseThreshold(
  1454  	t *testing.T,
  1455  	coordinator resources.Coordinator,
  1456  	logger *zap.Logger,
  1457  ) {
  1458  	logger.Info("test parse and threshold API endpoints")
  1459  	require.NoError(t, coordinator.RunQuery(
  1460  		func(status int, h map[string][]string, resp string, err error) error {
  1461  			if status != http.StatusOK {
  1462  				return fmt.Errorf("expected 200, got %d. body=%v", status, resp)
  1463  			}
  1464  
  1465  			var parsedResp parseResponse
  1466  			if err := json.Unmarshal([]byte(resp), &parsedResp); err != nil {
  1467  				return err
  1468  			}
  1469  
  1470  			if parsedResp.Name != "fetch" {
  1471  				return fmt.Errorf("expected 'fetch', got '%s'", parsedResp.Name)
  1472  			}
  1473  
  1474  			return nil
  1475  		}, "api/v1/parse?query=up", nil))
  1476  
  1477  	thresholdTest := func(path string, comparator string) {
  1478  		require.NoError(t, coordinator.RunQuery(
  1479  			func(status int, h map[string][]string, resp string, err error) error {
  1480  				if status != http.StatusOK {
  1481  					return fmt.Errorf("expected 200, got %d. body=%v", status, resp)
  1482  				}
  1483  
  1484  				var parsedResp thresholdResponse
  1485  				if err := json.Unmarshal([]byte(resp), &parsedResp); err != nil {
  1486  					return err
  1487  				}
  1488  
  1489  				if parsedResp.Query.Name != "fetch" {
  1490  					return fmt.Errorf("expected 'fetch', got '%s'", parsedResp.Query.Name)
  1491  				}
  1492  
  1493  				if parsedResp.Threshold.Comparator != comparator {
  1494  					return fmt.Errorf("expected '>', got '%s'", parsedResp.Threshold.Comparator)
  1495  				}
  1496  
  1497  				if parsedResp.Threshold.Value != 1 {
  1498  					return fmt.Errorf("expected 1, got %d", parsedResp.Threshold.Value)
  1499  				}
  1500  
  1501  				return nil
  1502  			}, path, nil))
  1503  	}
  1504  	thresholdTest("api/v1/threshold?query=up>1", ">")
  1505  	thresholdTest("api/v1/threshold?query=up<1", "<")
  1506  }
  1507  
  1508  func testReplace(
  1509  	t *testing.T,
  1510  	coordinator resources.Coordinator,
  1511  	logger *zap.Logger,
  1512  ) {
  1513  	logger.Info("test label replace")
  1514  	var (
  1515  		numMetrics = 5
  1516  		metricName = fmt.Sprintf("quail_%v", xtime.Now().Seconds())
  1517  	)
  1518  	writeCorrectnessTestMetrics(t, coordinator, metricName, numMetrics, "extra")
  1519  
  1520  	replaceTest := func(query string) {
  1521  		requireInstantQuerySuccess(t, coordinator, resources.QueryRequest{
  1522  			Query: query,
  1523  		}, nil, func(res model.Vector) error {
  1524  			if len(res) != numMetrics {
  1525  				return fmt.Errorf("expected %d results, got %d", numMetrics, len(res))
  1526  			}
  1527  
  1528  			for i := 0; i < numMetrics; i++ {
  1529  				expectedVal := fmt.Sprintf("bar_%d", i)
  1530  				if val, ok := res[i].Metric["foo"]; !ok || string(val) != expectedVal {
  1531  					return fmt.Errorf("expected %s, got %s", expectedVal, val)
  1532  				}
  1533  			}
  1534  
  1535  			return nil
  1536  		})
  1537  	}
  1538  	replaceTest(`label_replace(` + metricName + `,"foo","bar_$1","val","(.*)")`)
  1539  	replaceTest(`label_replace(` + metricName + `,"foo","bar_$1","val","(.*)")-0`)
  1540  }
  1541  
  1542  func testEmptyMatcher(
  1543  	t *testing.T,
  1544  	coordinator resources.Coordinator,
  1545  	logger *zap.Logger,
  1546  ) {
  1547  	logger.Info("test empty matcher")
  1548  	var (
  1549  		numMetrics = 5
  1550  		metricName = fmt.Sprintf("foo_%v", xtime.Now().Seconds())
  1551  		tagName1   = "exists"
  1552  		tagName2   = "not_exists"
  1553  	)
  1554  	writeCorrectnessTestMetrics(t, coordinator, metricName, numMetrics, tagName1)
  1555  	writeCorrectnessTestMetrics(t, coordinator, metricName, numMetrics, tagName2)
  1556  
  1557  	emptyMatcherTests := func(query, matchedTag, notMatchedTag string) {
  1558  		requireInstantQuerySuccess(t, coordinator, resources.QueryRequest{
  1559  			Query: query,
  1560  		}, nil, func(res model.Vector) error {
  1561  			if len(res) != numMetrics {
  1562  				return fmt.Errorf("expected %d results, got %d", numMetrics, len(res))
  1563  			}
  1564  
  1565  			metricsCount := make(map[string]int)
  1566  			for i := 0; i < numMetrics; i++ {
  1567  				if _, ok := res[i].Metric[model.LabelName(matchedTag)]; ok {
  1568  					metricsCount[matchedTag]++
  1569  				}
  1570  				if _, ok := res[i].Metric[model.LabelName(notMatchedTag)]; ok {
  1571  					metricsCount[notMatchedTag]++
  1572  				}
  1573  			}
  1574  
  1575  			if metricsCount[matchedTag] != numMetrics {
  1576  				return fmt.Errorf("expected %d results, got %d", numMetrics, metricsCount[matchedTag])
  1577  			}
  1578  
  1579  			if metricsCount[notMatchedTag] != 0 {
  1580  				return fmt.Errorf("expected 0 results, got %d", metricsCount[notMatchedTag])
  1581  			}
  1582  
  1583  			return nil
  1584  		})
  1585  	}
  1586  	emptyMatcherTests(fmt.Sprintf("%s{%s%s\"\"}", metricName, tagName1, "="), tagName2, tagName1)
  1587  	emptyMatcherTests(fmt.Sprintf("%s{%s%s\"\"}", metricName, tagName1, "!="), tagName1, tagName2)
  1588  	emptyMatcherTests(fmt.Sprintf("%s{%s%s\"\"}", metricName, tagName2, "="), tagName1, tagName2)
  1589  	emptyMatcherTests(fmt.Sprintf("%s{%s%s\"\"}", metricName, tagName2, "!="), tagName2, tagName1)
  1590  }
  1591  
  1592  func testDebugPromReturnsDuplicates(
  1593  	t *testing.T,
  1594  	m3 resources.M3Resources,
  1595  	logger *zap.Logger,
  1596  ) {
  1597  	logger.Info("test debug prom returns duplicates")
  1598  	var (
  1599  		metricName  = fmt.Sprintf("duplicate_%v", xtime.Now().Seconds())
  1600  		coordinator = m3.Coordinator()
  1601  		dbnode      = m3.Nodes()[0]
  1602  		encoderPool = newTagEncoderPool()
  1603  		encodedTags = encodeTags(t, encoderPool,
  1604  			ident.StringTag("__name__", metricName),
  1605  			ident.StringTag("val", "extra"),
  1606  			ident.StringTag("val", "0"))
  1607  	)
  1608  
  1609  	require.NoError(t, dbnode.WriteTaggedBatchRaw(&rpc.WriteTaggedBatchRawRequest{
  1610  		NameSpace: []byte(resources.UnaggName),
  1611  		Elements: []*rpc.WriteTaggedBatchRawRequestElement{
  1612  			{
  1613  				ID:          []byte(metricName),
  1614  				EncodedTags: encodedTags,
  1615  				Datapoint: &rpc.Datapoint{
  1616  					Timestamp:         time.Now().UnixNano(),
  1617  					TimestampTimeType: rpc.TimeType_UNIX_NANOSECONDS,
  1618  					Value:             1,
  1619  				},
  1620  			},
  1621  		},
  1622  	}))
  1623  
  1624  	queryAndParams := fmt.Sprintf("api/v1/prom/remote/read?query=%s&start=%v&end=%v"+
  1625  		"&format=json", metricName, time.Now().Add(-100*time.Second).Unix(),
  1626  		time.Now().Add(100*time.Second).Unix())
  1627  	expectedTags := `tags":[["__name__","` + metricName + `"],["val","extra"],["val","0"]]`
  1628  	require.NoError(t, coordinator.RunQuery(
  1629  		func(status int, h map[string][]string, resp string, err error) error {
  1630  			if status != http.StatusOK {
  1631  				return fmt.Errorf("expected 200, got %d. body=%v", status, resp)
  1632  			}
  1633  
  1634  			if !strings.Contains(resp, expectedTags) {
  1635  				return fmt.Errorf("expected %v in response but not found. response=%v",
  1636  					expectedTags, resp)
  1637  			}
  1638  
  1639  			return nil
  1640  		}, queryAndParams, nil))
  1641  }
  1642  
  1643  func writeCorrectnessTestMetrics(
  1644  	t *testing.T,
  1645  	coordinator resources.Coordinator,
  1646  	metricName string,
  1647  	numMetrics int,
  1648  	extraTag string,
  1649  ) {
  1650  	ts := xtime.Now()
  1651  	if extraTag == "" {
  1652  		extraTag = "default"
  1653  	}
  1654  	for i := 0; i < numMetrics; i++ {
  1655  		err := coordinator.WriteProm(metricName, map[string]string{
  1656  			extraTag: "extra",
  1657  			"val":    strconv.Itoa(i),
  1658  		}, []prompb.Sample{
  1659  			{
  1660  				Timestamp: storage.TimeToPromTimestamp(ts),
  1661  				Value:     float64(i),
  1662  			},
  1663  		}, nil)
  1664  		require.NoError(t, err)
  1665  	}
  1666  }
  1667  
  1668  type parseResponse struct {
  1669  	Name string
  1670  }
  1671  
  1672  type thresholdResponse struct {
  1673  	Query     parseResponse
  1674  	Threshold threshold
  1675  }
  1676  
  1677  type threshold struct {
  1678  	Comparator string
  1679  	Value      int
  1680  }
  1681  
  1682  func setSeriesDiskReadLimits(
  1683  	t *testing.T,
  1684  	coordinator resources.Coordinator,
  1685  	limits kvpb.QueryLimits,
  1686  ) {
  1687  	limitBytes, err := json.Marshal(limits)
  1688  	require.NoError(t, err)
  1689  
  1690  	update := &database.KeyValueUpdate{
  1691  		Key:    kvconfig.QueryLimits,
  1692  		Commit: true,
  1693  		Value:  limitBytes,
  1694  	}
  1695  	updateBytes, err := json.Marshal(update)
  1696  	require.NoError(t, err)
  1697  
  1698  	require.NoError(t, coordinator.ApplyKVUpdate(string(updateBytes)))
  1699  }
  1700  
  1701  type seriesResponse struct {
  1702  	Status string
  1703  	Data   []map[string]string
  1704  }
  1705  
  1706  func requireError(t *testing.T, query func() error, errorMsg string) {
  1707  	require.NoError(t, resources.Retry(func() error {
  1708  		if err := query(); err != nil {
  1709  			if errorMsg == "" || strings.Contains(err.Error(), errorMsg) {
  1710  				return nil
  1711  			}
  1712  		}
  1713  
  1714  		err := errors.New("expected read request to fail with error")
  1715  		if errorMsg == "" {
  1716  			err = fmt.Errorf("expected read request to fail with error containing: %s", errorMsg)
  1717  		}
  1718  
  1719  		return err
  1720  	}))
  1721  }
  1722  
  1723  func requireInstantQuerySuccess(
  1724  	t *testing.T,
  1725  	coordinator resources.Coordinator,
  1726  	request resources.QueryRequest,
  1727  	headers resources.Headers,
  1728  	successCond func(res model.Vector) error,
  1729  ) {
  1730  	require.NoError(t, resources.Retry(func() error {
  1731  		res, err := coordinator.InstantQuery(request, headers)
  1732  		if err != nil {
  1733  			return err
  1734  		}
  1735  
  1736  		return successCond(res)
  1737  	}))
  1738  }
  1739  
  1740  func requireNativeInstantQuerySuccess(
  1741  	t *testing.T,
  1742  	coordinator resources.Coordinator,
  1743  	request resources.QueryRequest,
  1744  	headers resources.Headers,
  1745  	successCond func(res model.Vector) error,
  1746  ) {
  1747  	require.NoError(t, resources.Retry(func() error {
  1748  		res, err := coordinator.InstantQueryWithEngine(request, options.M3QueryEngine, headers)
  1749  		if err != nil {
  1750  			return err
  1751  		}
  1752  
  1753  		return successCond(res)
  1754  	}))
  1755  }
  1756  
  1757  func requireRangeQuerySuccess(
  1758  	t *testing.T,
  1759  	coordinator resources.Coordinator,
  1760  	request resources.RangeQueryRequest,
  1761  	headers resources.Headers,
  1762  	successCond func(res model.Matrix) error,
  1763  ) {
  1764  	require.NoError(t, resources.Retry(func() error {
  1765  		res, err := coordinator.RangeQuery(request, headers)
  1766  		if err != nil {
  1767  			return err
  1768  		}
  1769  
  1770  		return successCond(res)
  1771  	}))
  1772  }
  1773  
  1774  func requireNativeRangeQuerySuccess(
  1775  	t *testing.T,
  1776  	coordinator resources.Coordinator,
  1777  	request resources.RangeQueryRequest,
  1778  	headers resources.Headers,
  1779  	successCond func(res model.Matrix) error,
  1780  ) {
  1781  	require.NoError(t, resources.Retry(func() error {
  1782  		res, err := coordinator.RangeQueryWithEngine(request, options.M3QueryEngine, headers)
  1783  		if err != nil {
  1784  			return err
  1785  		}
  1786  
  1787  		return successCond(res)
  1788  	}))
  1789  }
  1790  
  1791  func requireLabelValuesSuccess(
  1792  	t *testing.T,
  1793  	coordinator resources.Coordinator,
  1794  	request resources.LabelValuesRequest,
  1795  	headers resources.Headers,
  1796  	successCond func(res model.LabelValues) error,
  1797  ) {
  1798  	require.NoError(t, resources.Retry(func() error {
  1799  		res, err := coordinator.LabelValues(request, headers)
  1800  		if err != nil {
  1801  			return err
  1802  		}
  1803  
  1804  		return successCond(res)
  1805  	}))
  1806  }
  1807  
  1808  func requireLabelNamesSuccess(
  1809  	t *testing.T,
  1810  	coordinator resources.Coordinator,
  1811  	request resources.LabelNamesRequest,
  1812  	headers resources.Headers,
  1813  	successCond func(res model.LabelNames) error,
  1814  ) {
  1815  	require.NoError(t, resources.Retry(func() error {
  1816  		res, err := coordinator.LabelNames(request, headers)
  1817  		if err != nil {
  1818  			return err
  1819  		}
  1820  
  1821  		return successCond(res)
  1822  	}))
  1823  }
  1824  
  1825  func requireSeriesSuccess(
  1826  	t *testing.T,
  1827  	coordinator resources.Coordinator,
  1828  	request resources.SeriesRequest,
  1829  	headers resources.Headers,
  1830  	successCond func(res []model.Metric) error,
  1831  ) {
  1832  	require.NoError(t, resources.Retry(func() error {
  1833  		res, err := coordinator.Series(request, headers)
  1834  		if err != nil {
  1835  			return err
  1836  		}
  1837  
  1838  		return successCond(res)
  1839  	}))
  1840  }
  1841  
  1842  func verifyPrometheusQuery(t *testing.T, p *docker.Prometheus, query string, threshold float64) {
  1843  	require.NoError(t, resources.Retry(func() error {
  1844  		res, err := p.Query(docker.PrometheusQueryRequest{
  1845  			Query: query,
  1846  		})
  1847  		if err != nil {
  1848  			return err
  1849  		}
  1850  		if len(res) == 0 {
  1851  			return errors.New("no samples returned for query")
  1852  		}
  1853  		if res[0].Value > model.SampleValue(threshold) {
  1854  			return nil
  1855  		}
  1856  
  1857  		return errors.New("value not greater than threshold")
  1858  	}))
  1859  }
  1860  
  1861  func newTagEncoderPool() serialize.TagEncoderPool {
  1862  	encoderPool := serialize.
  1863  		NewTagEncoderPool(serialize.NewTagEncoderOptions(),
  1864  			pool.NewObjectPoolOptions().SetSize(1))
  1865  	encoderPool.Init()
  1866  	return encoderPool
  1867  }
  1868  
  1869  func encodeTags(
  1870  	t *testing.T,
  1871  	pool serialize.TagEncoderPool,
  1872  	tags ...ident.Tag,
  1873  ) []byte {
  1874  	encoder := pool.Get()
  1875  
  1876  	seriesTags := ident.NewTags(tags...)
  1877  	err := encoder.Encode(ident.NewTagsIterator(seriesTags))
  1878  	require.NoError(t, err)
  1879  
  1880  	data, ok := encoder.Data()
  1881  	require.True(t, ok)
  1882  
  1883  	return data.Bytes()
  1884  }