github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/querier/queryrange/downstreamer_test.go (about)

     1  package queryrange
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/prometheus/prometheus/model/labels"
    11  	"github.com/prometheus/prometheus/promql"
    12  	"github.com/stretchr/testify/require"
    13  	"github.com/weaveworks/common/user"
    14  	"go.uber.org/atomic"
    15  
    16  	"github.com/grafana/loki/pkg/logproto"
    17  	"github.com/grafana/loki/pkg/logql"
    18  	"github.com/grafana/loki/pkg/logql/syntax"
    19  	"github.com/grafana/loki/pkg/logqlmodel"
    20  	"github.com/grafana/loki/pkg/logqlmodel/stats"
    21  	"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
    22  )
    23  
    24  func testSampleStreams() []queryrangebase.SampleStream {
    25  	return []queryrangebase.SampleStream{
    26  		{
    27  			Labels: []logproto.LabelAdapter{{Name: "foo", Value: "bar"}},
    28  			Samples: []logproto.LegacySample{
    29  				{
    30  					Value:       0,
    31  					TimestampMs: 0,
    32  				},
    33  				{
    34  					Value:       1,
    35  					TimestampMs: 1,
    36  				},
    37  				{
    38  					Value:       2,
    39  					TimestampMs: 2,
    40  				},
    41  			},
    42  		},
    43  		{
    44  			Labels: []logproto.LabelAdapter{{Name: "bazz", Value: "buzz"}},
    45  			Samples: []logproto.LegacySample{
    46  				{
    47  					Value:       4,
    48  					TimestampMs: 4,
    49  				},
    50  				{
    51  					Value:       5,
    52  					TimestampMs: 5,
    53  				},
    54  				{
    55  					Value:       6,
    56  					TimestampMs: 6,
    57  				},
    58  			},
    59  		},
    60  	}
    61  }
    62  
    63  func TestSampleStreamToMatrix(t *testing.T) {
    64  	input := testSampleStreams()
    65  	expected := promql.Matrix{
    66  		{
    67  			Metric: labels.FromMap(map[string]string{
    68  				"foo": "bar",
    69  			}),
    70  			Points: []promql.Point{
    71  				{
    72  					V: 0,
    73  					T: 0,
    74  				},
    75  				{
    76  					V: 1,
    77  					T: 1,
    78  				},
    79  				{
    80  					V: 2,
    81  					T: 2,
    82  				},
    83  			},
    84  		},
    85  		{
    86  			Metric: labels.FromMap(map[string]string{
    87  				"bazz": "buzz",
    88  			}),
    89  			Points: []promql.Point{
    90  				{
    91  					V: 4,
    92  					T: 4,
    93  				},
    94  				{
    95  					V: 5,
    96  					T: 5,
    97  				},
    98  				{
    99  					V: 6,
   100  					T: 6,
   101  				},
   102  			},
   103  		},
   104  	}
   105  	require.Equal(t, expected, sampleStreamToMatrix(input))
   106  }
   107  
   108  func TestResponseToResult(t *testing.T) {
   109  	for _, tc := range []struct {
   110  		desc     string
   111  		input    queryrangebase.Response
   112  		err      bool
   113  		expected logqlmodel.Result
   114  	}{
   115  		{
   116  			desc: "LokiResponse",
   117  			input: &LokiResponse{
   118  				Data: LokiData{
   119  					Result: []logproto.Stream{{
   120  						Labels: `{foo="bar"}`,
   121  					}},
   122  				},
   123  				Statistics: stats.Result{
   124  					Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
   125  				},
   126  			},
   127  			expected: logqlmodel.Result{
   128  				Statistics: stats.Result{
   129  					Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
   130  				},
   131  				Data: logqlmodel.Streams{{
   132  					Labels: `{foo="bar"}`,
   133  				}},
   134  			},
   135  		},
   136  		{
   137  			desc: "LokiResponseError",
   138  			input: &LokiResponse{
   139  				Error:     "foo",
   140  				ErrorType: "bar",
   141  			},
   142  			err: true,
   143  		},
   144  		{
   145  			desc: "LokiPromResponse",
   146  			input: &LokiPromResponse{
   147  				Statistics: stats.Result{
   148  					Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
   149  				},
   150  				Response: &queryrangebase.PrometheusResponse{
   151  					Data: queryrangebase.PrometheusData{
   152  						Result: testSampleStreams(),
   153  					},
   154  				},
   155  			},
   156  			expected: logqlmodel.Result{
   157  				Statistics: stats.Result{
   158  					Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
   159  				},
   160  				Data: sampleStreamToMatrix(testSampleStreams()),
   161  			},
   162  		},
   163  		{
   164  			desc: "LokiPromResponseError",
   165  			input: &LokiPromResponse{
   166  				Response: &queryrangebase.PrometheusResponse{
   167  					Error:     "foo",
   168  					ErrorType: "bar",
   169  				},
   170  			},
   171  			err: true,
   172  		},
   173  		{
   174  			desc:  "UnexpectedTypeError",
   175  			input: nil,
   176  			err:   true,
   177  		},
   178  	} {
   179  		t.Run(tc.desc, func(t *testing.T) {
   180  			out, err := ResponseToResult(tc.input)
   181  			if tc.err {
   182  				require.NotNil(t, err)
   183  			}
   184  			require.Equal(t, tc.expected, out)
   185  		})
   186  	}
   187  }
   188  
   189  func TestDownstreamHandler(t *testing.T) {
   190  	// Pretty poor test, but this is just a passthrough struct, so ensure we create locks
   191  	// and can consume them
   192  	h := DownstreamHandler{limits: fakeLimits{}, next: nil}
   193  	in := h.Downstreamer(context.Background()).(*instance)
   194  	require.Equal(t, DefaultDownstreamConcurrency, in.parallelism)
   195  	require.NotNil(t, in.locks)
   196  	ensureParallelism(t, in, in.parallelism)
   197  }
   198  
   199  // Consumes the locks in an instance, making sure they're all available. Does not replace them and thus instance is unusable after. This is a cleanup test to ensure internal state
   200  func ensureParallelism(t *testing.T, in *instance, n int) {
   201  	for i := 0; i < n; i++ {
   202  		select {
   203  		case <-in.locks:
   204  		case <-time.After(time.Millisecond):
   205  			require.FailNow(t, "lock couldn't be acquired")
   206  		}
   207  	}
   208  	// ensure no more locks available
   209  	select {
   210  	case <-in.locks:
   211  		require.FailNow(t, "unexpected lock acquisition")
   212  	default:
   213  	}
   214  }
   215  
   216  func TestInstanceFor(t *testing.T) {
   217  	mkIn := func() *instance {
   218  		return DownstreamHandler{
   219  			limits: fakeLimits{},
   220  			next:   nil,
   221  		}.Downstreamer(context.Background()).(*instance)
   222  	}
   223  	in := mkIn()
   224  
   225  	queries := make([]logql.DownstreamQuery, in.parallelism+1)
   226  	var mtx sync.Mutex
   227  	var ct int
   228  
   229  	// ensure we can execute queries that number more than the parallelism parameter
   230  	_, err := in.For(context.TODO(), queries, func(_ logql.DownstreamQuery) (logqlmodel.Result, error) {
   231  		mtx.Lock()
   232  		defer mtx.Unlock()
   233  		ct++
   234  		return logqlmodel.Result{}, nil
   235  	})
   236  	require.Nil(t, err)
   237  	require.Equal(t, len(queries), ct)
   238  	ensureParallelism(t, in, in.parallelism)
   239  
   240  	// ensure an early error abandons the other queues queries
   241  	in = mkIn()
   242  	ct = 0
   243  	_, err = in.For(context.TODO(), queries, func(_ logql.DownstreamQuery) (logqlmodel.Result, error) {
   244  		mtx.Lock()
   245  		defer mtx.Unlock()
   246  		ct++
   247  		return logqlmodel.Result{}, errors.New("testerr")
   248  	})
   249  	require.NotNil(t, err)
   250  	mtx.Lock()
   251  	ctRes := ct
   252  	mtx.Unlock()
   253  
   254  	// Ensure no more than the initial batch was parallelized. (One extra instance can be started though.)
   255  	require.LessOrEqual(t, ctRes, in.parallelism+1)
   256  	ensureParallelism(t, in, in.parallelism)
   257  
   258  	in = mkIn()
   259  	results, err := in.For(
   260  		context.TODO(),
   261  		[]logql.DownstreamQuery{
   262  			{
   263  				Shards: logql.Shards{
   264  					{Shard: 0, Of: 2},
   265  				},
   266  			},
   267  			{
   268  				Shards: logql.Shards{
   269  					{Shard: 1, Of: 2},
   270  				},
   271  			},
   272  		},
   273  		func(qry logql.DownstreamQuery) (logqlmodel.Result, error) {
   274  			return logqlmodel.Result{
   275  				Data: logqlmodel.Streams{{
   276  					Labels: qry.Shards[0].String(),
   277  				}},
   278  			}, nil
   279  		},
   280  	)
   281  	require.Nil(t, err)
   282  	require.Equal(
   283  		t,
   284  		[]logqlmodel.Result{
   285  			{
   286  				Data: logqlmodel.Streams{{Labels: "0_of_2"}},
   287  			},
   288  			{
   289  				Data: logqlmodel.Streams{{Labels: "1_of_2"}},
   290  			},
   291  		},
   292  		results,
   293  	)
   294  	ensureParallelism(t, in, in.parallelism)
   295  }
   296  
   297  func TestInstanceDownstream(t *testing.T) {
   298  	params := logql.NewLiteralParams(
   299  		"",
   300  		time.Now(),
   301  		time.Now(),
   302  		0,
   303  		0,
   304  		logproto.BACKWARD,
   305  		1000,
   306  		nil,
   307  	)
   308  	expr, err := syntax.ParseExpr(`{foo="bar"}`)
   309  	require.Nil(t, err)
   310  
   311  	expectedResp := func() *LokiResponse {
   312  		return &LokiResponse{
   313  			Data: LokiData{
   314  				Result: []logproto.Stream{{
   315  					Labels: `{foo="bar"}`,
   316  				}},
   317  			},
   318  			Statistics: stats.Result{
   319  				Summary: stats.Summary{QueueTime: 1, ExecTime: 2},
   320  			},
   321  		}
   322  	}
   323  
   324  	queries := []logql.DownstreamQuery{
   325  		{
   326  			Expr:   expr,
   327  			Params: params,
   328  			Shards: logql.Shards{{Shard: 0, Of: 2}},
   329  		},
   330  	}
   331  
   332  	var got queryrangebase.Request
   333  	var want queryrangebase.Request
   334  	handler := queryrangebase.HandlerFunc(
   335  		func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
   336  			// for some reason these seemingly can't be checked in their own goroutines,
   337  			// so we assign them to scoped variables for later comparison.
   338  			got = req
   339  			want = ParamsToLokiRequest(params, queries[0].Shards).WithQuery(expr.String())
   340  
   341  			return expectedResp(), nil
   342  		},
   343  	)
   344  
   345  	expected, err := ResponseToResult(expectedResp())
   346  	require.Nil(t, err)
   347  
   348  	results, err := DownstreamHandler{
   349  		limits: fakeLimits{},
   350  		next:   handler,
   351  	}.Downstreamer(context.Background()).Downstream(context.Background(), queries)
   352  
   353  	require.Equal(t, want, got)
   354  
   355  	require.Nil(t, err)
   356  	require.Equal(t, []logqlmodel.Result{expected}, results)
   357  }
   358  
   359  func TestCancelWhileWaitingResponse(t *testing.T) {
   360  	mkIn := func() *instance {
   361  		return DownstreamHandler{
   362  			limits: fakeLimits{},
   363  			next:   nil,
   364  		}.Downstreamer(context.Background()).(*instance)
   365  	}
   366  	in := mkIn()
   367  
   368  	queries := make([]logql.DownstreamQuery, in.parallelism+1)
   369  
   370  	ctx, cancel := context.WithCancel(context.Background())
   371  
   372  	// Launch the For call in a goroutine because it blocks and we need to be able to cancel the context
   373  	// to prove it will exit when the context is canceled.
   374  	b := atomic.NewBool(false)
   375  	go func() {
   376  		_, _ = in.For(ctx, queries, func(_ logql.DownstreamQuery) (logqlmodel.Result, error) {
   377  			// Intended to keep the For method from returning unless the context is canceled.
   378  			time.Sleep(100 * time.Second)
   379  			return logqlmodel.Result{}, nil
   380  		})
   381  		// Should only reach here if the For method returns after the context is canceled.
   382  		b.Store(true)
   383  	}()
   384  
   385  	// Cancel the parent call
   386  	cancel()
   387  	require.Eventually(t, func() bool {
   388  		return b.Load()
   389  	}, 5*time.Second, 10*time.Millisecond,
   390  		"The parent context calling the Downstreamer For method was canceled "+
   391  			"but the For method did not return as expected.")
   392  }
   393  
   394  func TestDownstreamerUsesCorrectParallelism(t *testing.T) {
   395  	ctx := user.InjectOrgID(context.Background(), "fake")
   396  	l := fakeLimits{maxQueryParallelism: 4}
   397  	d := DownstreamHandler{
   398  		limits: l,
   399  		next:   nil,
   400  	}.Downstreamer(ctx)
   401  
   402  	i := d.(*instance)
   403  	close(i.locks)
   404  	var ct int
   405  	for range i.locks {
   406  		ct++
   407  	}
   408  	require.Equal(t, l.maxQueryParallelism, ct)
   409  }